repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
yoer/hue | desktop/core/ext-py/kazoo-2.0/kazoo/recipe/party.py | 54 | 3886 | """Party
:Maintainer: Ben Bangert <ben@groovie.org>
:Status: Production
A Zookeeper pool of party members. The :class:`Party` object can be
used for determining members of a party.
"""
import uuid
from kazoo.exceptions import NodeExistsError, NoNodeError
class BaseParty(object):
"""Base implementation of a party."""
def __init__(self, client, path, identifier=None):
"""
:param client: A :class:`~kazoo.client.KazooClient` instance.
:param path: The party path to use.
:param identifier: An identifier to use for this member of the
party when participating.
"""
self.client = client
self.path = path
self.data = str(identifier or "").encode('utf-8')
self.ensured_path = False
self.participating = False
def _ensure_parent(self):
if not self.ensured_path:
# make sure our parent node exists
self.client.ensure_path(self.path)
self.ensured_path = True
def join(self):
"""Join the party"""
return self.client.retry(self._inner_join)
def _inner_join(self):
self._ensure_parent()
try:
self.client.create(self.create_path, self.data, ephemeral=True)
self.participating = True
except NodeExistsError:
# node was already created, perhaps we are recovering from a
# suspended connection
self.participating = True
def leave(self):
"""Leave the party"""
self.participating = False
return self.client.retry(self._inner_leave)
def _inner_leave(self):
try:
self.client.delete(self.create_path)
except NoNodeError:
return False
return True
def __len__(self):
"""Return a count of participating clients"""
self._ensure_parent()
return len(self._get_children())
def _get_children(self):
return self.client.retry(self.client.get_children, self.path)
class Party(BaseParty):
"""Simple pool of participating processes"""
_NODE_NAME = "__party__"
def __init__(self, client, path, identifier=None):
BaseParty.__init__(self, client, path, identifier=identifier)
self.node = uuid.uuid4().hex + self._NODE_NAME
self.create_path = self.path + "/" + self.node
def __iter__(self):
"""Get a list of participating clients' data values"""
self._ensure_parent()
children = self._get_children()
for child in children:
try:
d, _ = self.client.retry(self.client.get, self.path +
"/" + child)
yield d.decode('utf-8')
except NoNodeError: # pragma: nocover
pass
def _get_children(self):
children = BaseParty._get_children(self)
return [c for c in children if self._NODE_NAME in c]
class ShallowParty(BaseParty):
"""Simple shallow pool of participating processes
This differs from the :class:`Party` as the identifier is used in
the name of the party node itself, rather than the data. This
places some restrictions on the length as it must be a valid
Zookeeper node (an alphanumeric string), but reduces the overhead
of getting a list of participants to a single Zookeeper call.
"""
def __init__(self, client, path, identifier=None):
BaseParty.__init__(self, client, path, identifier=identifier)
self.node = '-'.join([uuid.uuid4().hex, self.data.decode('utf-8')])
self.create_path = self.path + "/" + self.node
def __iter__(self):
"""Get a list of participating clients' identifiers"""
self._ensure_parent()
children = self._get_children()
for child in children:
yield child[child.find('-') + 1:]
| apache-2.0 |
yongwen/makahiki | makahiki/apps/widgets/action_feedback/models.py | 9 | 1913 | """action_feedback model."""
from django.db import models
from django.contrib.auth.models import User
from django.contrib import admin
from apps.widgets.smartgrid.models import Action
from apps.managers.challenge_mgr import challenge_mgr
from apps.admin.admin import challenge_designer_site, challenge_manager_site, developer_site
class ActionFeedback(models.Model):
"""Defines the Action Feedback model."""
action = models.ForeignKey(Action,
null=True, blank=True,
help_text="The action this feedback is for.")
user = models.ForeignKey(User,
null=True, blank=True,
help_text="The user providing the feedback.")
rating = models.IntegerField(help_text="The user's rating of the action.", default=0)
comment = models.CharField(
max_length=1500,
blank=True,
null=True,
help_text="The user's comments about the action.")
added = models.DateTimeField(editable=False,
help_text="The time the feedback was made.",
auto_now_add=True)
changed = models.DateTimeField(editable=False,
help_text="The time the feedback was changed.",
auto_now=True)
admin_tool_tip = "Player Feedback about Actions"
def __unicode__(self):
return "%s rated %s %d and said %s" % \
(self.user.username, self.action.name, self.rating, self.comment)
admin.site.register(ActionFeedback)
challenge_designer_site.register(ActionFeedback)
challenge_manager_site.register(ActionFeedback)
developer_site.register(ActionFeedback)
challenge_mgr.register_developer_game_info_model("Smart Grid Game", ActionFeedback)
| mit |
spaceone/pyjs | pyjswidgets/pyjamas/builder/XMLFile.py | 7 | 9070 | #
# Copyright 2010 ZX www.zx.nl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
class XMLFileError(RuntimeError):
pass
class XMLFile(object):
re_xml = re.compile('''<[?]xml([^?]*)[?]>''')
re_comment = re.compile('''<!--(-*)''')
re_tag = re.compile('''<\s*([^/]\S*)(.*)>''')
re_tag_close = re.compile('''</\s*(\S+)\s*>''')
#re_attr = re.compile('''(\S+)="([^"]*)"''') # Bug in pyjamas re module
re_attr = re.compile('''\S+="[^"]*"''')
def __init__(self, lines):
if isinstance(lines, basestring):
lines = lines.split("\n")
self.lines = lines
self.lineno = 0
self.xmlAttrs = None
def error(self, msg):
raise XMLFileError("Line %s: %s" % (self.lineno, msg))
def parseValue(self, v, unpackStr=False):
if v == "":
# Quick return
return v
vlower = v.lower()
if vlower in ["null", "none"]:
return None
if vlower == "true":
return True
if vlower == "false":
return False
try:
v = int(v)
return v
except:
pass
try:
v = float(v)
return v
except:
pass
if len(v) > 1:
if v[0] == v[-1]:
if unpackStr and v[0] in ["'", '"']:
return v[1:-1]
elif v[0] == '(' and v[-1] == ')':
values = []
try:
for value in v[1:-1].split(','):
value = self.parseValue(value.strip(), True)
values.append(value)
return tuple(values)
except:
pass
if len(v) > 2:
if v[:2] == "u'" and v[-1] == "'":
return v[2:-1]
if v[:2] == 'u"' and v[-1] == '"':
return v[2:-1]
return v
def getAttrs(self, line):
attrs = {}
#for k, v in self.re_attr.findall(line):
# attrs[k] = self.parseValue(v)
for kv in self.re_attr.findall(line):
k, v = kv.split("=", 1)
k = k.strip()
v = v.replace("%22;", '"')
v = v.replace("%0A;", "\n")
v = v.strip()[1:-1]
attrs[k] = self.parseValue(v)
return attrs
def getTag(self, line, requiredTags=None):
mTag = self.re_tag.match(line)
if ( not mTag
or ( requiredTags is not None
and mTag.group(1) not in requiredTags
)
):
if requiredTags is not None:
self.error("Expected tag %s" % ",".join(requiredTags))
else:
self.error("Expected a tag")
tagName = mTag.group(1)
tagAttrs = mTag.group(2)
if tagAttrs and tagAttrs[-1] == "/":
tagAttrs = tagAttrs[:-1]
tagClose = True
else:
tagClose = False
return (tagName, tagClose, self.getAttrs(tagAttrs))
def getTagClose(self, line, tag=None):
mTag = self.re_tag_close.match(line)
if not mTag or (tag is not None and mTag.group(1) != tag):
if tag is not None:
self.error("Expected closing tag '%s'" % tag)
else:
self.error("Expected a closing tag")
return (
mTag.group(1),
)
def currentLine(self):
if self.lineno > len(self.lines):
return None
line = self.lines[self.lineno].strip()
startlineno = self.lineno
mComment = self.re_comment.search(line)
while mComment:
start = '<!--%s' % mComment.group(1)
end = '%s-->' % mComment.group(1)
left = line.find(start) + len(start)
right = line.find(end, left)
if right >= left:
right += len(end)
line = line[:left - len(start)] + line[right + len(end):]
mComment = self.re_comment.search(line)
elif self.lineno == len(self.lines):
self.error(
"Unterminated comment starting at line %s" % startlineno,
)
else:
self.lineno += 1
line = line[:left] + self.lines[self.lineno].strip()
return line
def nextLine(self):
if self.lineno > len(self.lines):
return None
line = self.currentLine()
self.lineno += 1
return line
def isTagClose(self, tagName):
line = self.currentLine()
mTag = self.re_tag_close.match(line)
if mTag and mTag.group(1) == tagName:
return True
return False
def nextTag(self, requiredTags):
line = self.nextLine()
tag = self.getTag(line, requiredTags)
if self.isTagClose(tag[0]):
line = self.nextLine()
tag = (tag[0], True) + tag[2:]
tagFunc = "tag_%s" % tag[0]
if hasattr(self, tagFunc):
return getattr(self, tagFunc)(tag)
self.error("Unknown tag '%s'" % tag[0])
def parse(self):
line = self.currentLine()
mXML = self.re_xml.match(line)
if mXML:
xmlAttrs = mXML.group(1)
self.xmlAttrs = self.getAttrs(xmlAttrs)
line = self.nextLine()
rootTag = None
properties = self.nextTag(["pyjsglade", "properties", "components"])
if properties[0] == 'pyjsglade':
rootTag = properties[0]
properties = self.nextTag(["properties", "components"])
if properties[0] == 'properties':
properties = properties[2]
components = self.nextTag(["components"])[1]
else:
components = properties[1]
properties = {}
if rootTag is not None:
line = self.nextLine()
self.getTagClose(line, rootTag)
return properties, components
def tag_pyjsglade(self, tag):
return tag
def tag_components(self, tag):
tags = []
tagName, tagClosed, tagAttrs = tag
if not tagClosed:
while not self.isTagClose(tagName):
tags.append(self.nextTag(["component"]))
line = self.nextLine()
self.getTagClose(line, tagName)
components = []
for tag in tags:
components.append((tag[1]["index"], tag[1:]))
components.sort()
return tagName, [c[1] for c in components]
def tag_component(self, tag):
tags = []
tagName, tagClosed, tagAttrs = tag
if not tagClosed:
while not self.isTagClose(tagName):
tags.append(self.nextTag(["properties", "components"]))
line = self.nextLine()
self.getTagClose(line, tagName)
props = {}
childs = []
for tag in tags:
if tag[0] == 'properties':
name = tag[1]["name"]
if not name in props:
props[name] = {}
props[name].update(tag[2])
elif tag[0] == 'components':
childs += tag[1]
else:
assert("Unknown tag found: %s" % repr(tag[0]))
return tagName, tagAttrs, props, childs
def tag_properties(self, tag):
tags = []
tagName, tagClosed, tagAttrs = tag
if not tagClosed:
while not self.isTagClose(tagName):
tags.append(self.nextTag(["properties", "property"]))
line = self.nextLine()
self.getTagClose(line, tagName)
props = {}
for tag in tags:
if tag[0] == "properties":
props[tag[1]["name"]] = tag[2]
else:
props.update(tag[1])
return tagName, tagAttrs, props
def tag_property(self, tag):
tags = []
tagName, tagClosed, tagAttrs = tag
if not tagClosed:
line = self.nextLine()
self.getTagClose(line, tagName)
return tagName, {tag[2]["name"]: tag[2]["value"]}
if __name__ == '__main__':
import sys
lines = open(sys.argv[1]).read()
xmlFile = XMLFile(lines)
tagName, components = xmlFile.parse()
def dump(component):
print "component:", component[0], component[1]
for c in component[2]:
dump(c)
for component in components:
print "Frame:", component[0], component[1]
for c in component[2]:
dump(c)
| apache-2.0 |
jamespacileo/django-france | tests/regressiontests/comment_tests/tests/app_api_tests.py | 55 | 2592 | from django.conf import settings
from django.contrib import comments
from django.contrib.comments.models import Comment
from django.contrib.comments.forms import CommentForm
from regressiontests.comment_tests.tests import CommentTestCase
class CommentAppAPITests(CommentTestCase):
"""Tests for the "comment app" API"""
def testGetCommentApp(self):
self.assertEqual(comments.get_comment_app(), comments)
def testGetForm(self):
self.assertEqual(comments.get_form(), CommentForm)
def testGetFormTarget(self):
self.assertEqual(comments.get_form_target(), "/post/")
def testGetFlagURL(self):
c = Comment(id=12345)
self.assertEqual(comments.get_flag_url(c), "/flag/12345/")
def getGetDeleteURL(self):
c = Comment(id=12345)
self.assertEqual(comments.get_delete_url(c), "/delete/12345/")
def getGetApproveURL(self):
c = Comment(id=12345)
self.assertEqual(comments.get_approve_url(c), "/approve/12345/")
class CustomCommentTest(CommentTestCase):
urls = 'regressiontests.comment_tests.urls'
def setUp(self):
self.old_comments_app = getattr(settings, 'COMMENTS_APP', None)
settings.COMMENTS_APP = 'regressiontests.comment_tests.custom_comments'
settings.INSTALLED_APPS = list(settings.INSTALLED_APPS) + [settings.COMMENTS_APP,]
def tearDown(self):
del settings.INSTALLED_APPS[-1]
settings.COMMENTS_APP = self.old_comments_app
if settings.COMMENTS_APP is None:
del settings._wrapped.COMMENTS_APP
def testGetCommentApp(self):
from regressiontests.comment_tests import custom_comments
self.assertEqual(comments.get_comment_app(), custom_comments)
def testGetModel(self):
from regressiontests.comment_tests.custom_comments.models import CustomComment
self.assertEqual(comments.get_model(), CustomComment)
def testGetForm(self):
from regressiontests.comment_tests.custom_comments.forms import CustomCommentForm
self.assertEqual(comments.get_form(), CustomCommentForm)
def testGetFormTarget(self):
self.assertEqual(comments.get_form_target(), "/post/")
def testGetFlagURL(self):
c = Comment(id=12345)
self.assertEqual(comments.get_flag_url(c), "/flag/12345/")
def getGetDeleteURL(self):
c = Comment(id=12345)
self.assertEqual(comments.get_delete_url(c), "/delete/12345/")
def getGetApproveURL(self):
c = Comment(id=12345)
self.assertEqual(comments.get_approve_url(c), "/approve/12345/")
| bsd-3-clause |
Just-D/chromium-1 | content/test/gpu/gpu_tests/memory_test.py | 7 | 4469 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import gpu_test_base
import memory_test_expectations
import page_sets
from telemetry.page import page_test
from telemetry.timeline import counter
from telemetry.timeline import model
from telemetry.timeline import tracing_category_filter
from telemetry.timeline import tracing_options
MEMORY_LIMIT_MB = 192
SINGLE_TAB_LIMIT_MB = 192
WIGGLE_ROOM_MB = 24
test_harness_script = r"""
var domAutomationController = {};
domAutomationController._finished = false;
domAutomationController.send = function(msg) {
// This should wait until all effects of memory management complete.
// We will need to wait until all
// 1. pending commits from the main thread to the impl thread in the
// compositor complete (for visible compositors).
// 2. allocations that the renderer's impl thread will make due to the
// compositor and WebGL are completed.
// 3. pending GpuMemoryManager::Manage() calls to manage are made.
// 4. renderers' OnMemoryAllocationChanged callbacks in response to
// manager are made.
// Each step in this sequence can cause trigger the next (as a 1-2-3-4-1
// cycle), so we will need to pump this cycle until it stabilizes.
// Pump the cycle 8 times (in principle it could take an infinite number
// of iterations to settle).
var rafCount = 0;
// Impl-side painting has changed the behavior of this test.
// Currently the background of the page shows up checkerboarded
// initially, causing the test to fail because the memory
// allocation is too low (no root layer). Temporarily increase the
// rAF count to 32 in order to make the test work reliably again.
// crbug.com/373098
// TODO(kbr): revert this change and put it back to 8 iterations.
var totalRafCount = 32;
function pumpRAF() {
if (rafCount == totalRafCount) {
domAutomationController._finished = true;
return;
}
++rafCount;
window.requestAnimationFrame(pumpRAF);
}
pumpRAF();
}
window.domAutomationController = domAutomationController;
window.addEventListener("load", function() {
useGpuMemory(%d);
}, false);
""" % MEMORY_LIMIT_MB
class _MemoryValidator(gpu_test_base.ValidatorBase):
def ValidateAndMeasurePageInner(self, page, tab, results):
timeline_data = tab.browser.platform.tracing_controller.Stop()
timeline_model = model.TimelineModel(timeline_data)
for process in timeline_model.GetAllProcesses():
if 'gpu.GpuMemoryUsage' in process.counters:
counter = process.GetCounter('gpu', 'GpuMemoryUsage')
mb_used = counter.samples[-1] / 1048576
if mb_used + WIGGLE_ROOM_MB < SINGLE_TAB_LIMIT_MB:
raise page_test.Failure(self._FormatException('low', mb_used))
if mb_used - WIGGLE_ROOM_MB > MEMORY_LIMIT_MB:
raise page_test.Failure(self._FormatException('high', mb_used))
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs('--enable-logging')
options.AppendExtraBrowserArgs(
'--force-gpu-mem-available-mb=%s' % MEMORY_LIMIT_MB)
def WillNavigateToPage(self, page, tab):
# FIXME: Remove webkit.console when blink.console lands in chromium and the
# ref builds are updated. crbug.com/386847
custom_categories = ['webkit.console', 'blink.console', 'gpu']
category_filter = tracing_category_filter.TracingCategoryFilter()
for c in custom_categories:
category_filter.AddIncludedCategory(c)
options = tracing_options.TracingOptions()
options.enable_chrome_trace = True
tab.browser.platform.tracing_controller.Start(options, category_filter, 60)
def _FormatException(self, low_or_high, mb_used):
return 'Memory allocation too %s (was %d MB, should be %d MB +/- %d MB)' % (
low_or_high, mb_used, SINGLE_TAB_LIMIT_MB, WIGGLE_ROOM_MB)
class MemoryTest(gpu_test_base.TestBase):
"""Tests GPU memory limits"""
test = _MemoryValidator
@classmethod
def Name(cls):
return 'memory_test'
def _CreateExpectations(self):
return memory_test_expectations.MemoryTestExpectations()
def CreateStorySet(self, options):
story_set = page_sets.MemoryTestsStorySet(self.GetExpectations())
for page in story_set:
page.script_to_evaluate_on_commit = test_harness_script
return story_set
| bsd-3-clause |
nisse3000/pymatgen | pymatgen/entries/entry_tools.py | 11 | 5906 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module implements functions to perform various useful operations on
entries, such as grouping entries by structure.
"""
from six.moves import filter, zip
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Feb 24, 2012"
import logging
import json
import datetime
import collections
from monty.json import MontyEncoder, MontyDecoder
from pymatgen.core.structure import Structure
from pymatgen.analysis.structure_matcher import StructureMatcher, \
SpeciesComparator
logger = logging.getLogger(__name__)
def _get_host(structure, species_to_remove):
if species_to_remove:
s = structure.copy()
s.remove_species(species_to_remove)
return s
else:
return structure
def _perform_grouping(args):
(entries_json, hosts_json, ltol, stol, angle_tol,
primitive_cell, scale, comparator, groups) = args
entries = json.loads(entries_json, cls=MontyDecoder)
hosts = json.loads(hosts_json, cls=MontyDecoder)
unmatched = list(zip(entries, hosts))
while len(unmatched) > 0:
ref_host = unmatched[0][1]
logger.info(
"Reference tid = {}, formula = {}".format(unmatched[0][0].entry_id,
ref_host.formula)
)
ref_formula = ref_host.composition.reduced_formula
logger.info("Reference host = {}".format(ref_formula))
matches = [unmatched[0]]
for i in range(1, len(unmatched)):
test_host = unmatched[i][1]
logger.info("Testing tid = {}, formula = {}"
.format(unmatched[i][0].entry_id, test_host.formula))
test_formula = test_host.composition.reduced_formula
logger.info("Test host = {}".format(test_formula))
m = StructureMatcher(ltol=ltol, stol=stol, angle_tol=angle_tol,
primitive_cell=primitive_cell, scale=scale,
comparator=comparator)
if m.fit(ref_host, test_host):
logger.info("Fit found")
matches.append(unmatched[i])
groups.append(json.dumps([m[0] for m in matches], cls=MontyEncoder))
unmatched = list(filter(lambda x: x not in matches, unmatched))
logger.info("{} unmatched remaining".format(len(unmatched)))
def group_entries_by_structure(entries, species_to_remove=None,
ltol=0.2, stol=.4, angle_tol=5,
primitive_cell=True, scale=True,
comparator=SpeciesComparator(),
ncpus=None):
"""
Given a sequence of ComputedStructureEntries, use structure fitter to group
them by structural similarity.
Args:
entries: Sequence of ComputedStructureEntries.
species_to_remove: Sometimes you want to compare a host framework
(e.g., in Li-ion battery analysis). This allows you to specify
species to remove before structural comparison.
ltol (float): Fractional length tolerance. Default is 0.2.
stol (float): Site tolerance in Angstrom. Default is 0.4 Angstrom.
angle_tol (float): Angle tolerance in degrees. Default is 5 degrees.
primitive_cell (bool): If true: input structures will be reduced to
primitive cells prior to matching. Defaults to True.
scale: Input structures are scaled to equivalent volume if true;
For exact matching, set to False.
comparator: A comparator object implementing an equals method that
declares equivalency of sites. Default is SpeciesComparator,
which implies rigid species mapping.
ncpus: Number of cpus to use. Use of multiple cpus can greatly improve
fitting speed. Default of None means serial processing.
Returns:
Sequence of sequence of entries by structural similarity. e.g,
[[ entry1, entry2], [entry3, entry4, entry5]]
"""
start = datetime.datetime.now()
logger.info("Started at {}".format(start))
entries_host = [(entry, _get_host(entry.structure, species_to_remove))
for entry in entries]
if ncpus:
symm_entries = collections.defaultdict(list)
for entry, host in entries_host:
symm_entries[comparator.get_structure_hash(host)].append((entry,
host))
import multiprocessing as mp
logging.info("Using {} cpus".format(ncpus))
manager = mp.Manager()
groups = manager.list()
p = mp.Pool(ncpus)
#Parallel processing only supports Python primitives and not objects.
p.map(_perform_grouping,
[(json.dumps([e[0] for e in eh], cls=MontyEncoder),
json.dumps([e[1] for e in eh], cls=MontyEncoder),
ltol, stol, angle_tol, primitive_cell, scale,
comparator, groups)
for eh in symm_entries.values()])
else:
groups = []
hosts = [host for entry, host in entries_host]
_perform_grouping((json.dumps(entries, cls=MontyEncoder),
json.dumps(hosts, cls=MontyEncoder),
ltol, stol, angle_tol, primitive_cell, scale,
comparator, groups))
entry_groups = []
for g in groups:
entry_groups.append(json.loads(g, cls=MontyDecoder))
logging.info("Finished at {}".format(datetime.datetime.now()))
logging.info("Took {}".format(datetime.datetime.now() - start))
return entry_groups
| mit |
malishevg/edugraph | common/djangoapps/student/migrations/0029_remove_pearson.py | 58 | 15163 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'TestCenterUser'
db.delete_table('student_testcenteruser')
# Deleting model 'TestCenterRegistration'
db.delete_table('student_testcenterregistration')
def backwards(self, orm):
# Adding model 'TestCenterUser'
db.create_table('student_testcenteruser', (
('last_name', self.gf('django.db.models.fields.CharField')(max_length=50, db_index=True)),
('suffix', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('confirmed_at', self.gf('django.db.models.fields.DateTimeField')(null=True, db_index=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True, db_index=True)),
('salutation', self.gf('django.db.models.fields.CharField')(max_length=50, blank=True)),
('postal_code', self.gf('django.db.models.fields.CharField')(blank=True, max_length=16, db_index=True)),
('processed_at', self.gf('django.db.models.fields.DateTimeField')(null=True, db_index=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('city', self.gf('django.db.models.fields.CharField')(max_length=32, db_index=True)),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=30, db_index=True)),
('middle_name', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('phone_country_code', self.gf('django.db.models.fields.CharField')(max_length=3, db_index=True)),
('upload_status', self.gf('django.db.models.fields.CharField')(blank=True, max_length=20, db_index=True)),
('state', self.gf('django.db.models.fields.CharField')(blank=True, max_length=20, db_index=True)),
('upload_error_message', self.gf('django.db.models.fields.CharField')(max_length=512, blank=True)),
('company_name', self.gf('django.db.models.fields.CharField')(blank=True, max_length=50, db_index=True)),
('candidate_id', self.gf('django.db.models.fields.IntegerField')(null=True, db_index=True)),
('fax', self.gf('django.db.models.fields.CharField')(max_length=35, blank=True)),
('user_updated_at', self.gf('django.db.models.fields.DateTimeField')(db_index=True)),
('phone', self.gf('django.db.models.fields.CharField')(max_length=35)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['auth.User'], unique=True)),
('uploaded_at', self.gf('django.db.models.fields.DateTimeField')(blank=True, null=True, db_index=True)),
('extension', self.gf('django.db.models.fields.CharField')(blank=True, max_length=8, db_index=True)),
('fax_country_code', self.gf('django.db.models.fields.CharField')(max_length=3, blank=True)),
('country', self.gf('django.db.models.fields.CharField')(max_length=3, db_index=True)),
('client_candidate_id', self.gf('django.db.models.fields.CharField')(max_length=50, unique=True, db_index=True)),
('address_1', self.gf('django.db.models.fields.CharField')(max_length=40)),
('address_2', self.gf('django.db.models.fields.CharField')(max_length=40, blank=True)),
('address_3', self.gf('django.db.models.fields.CharField')(max_length=40, blank=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True, db_index=True)),
))
db.send_create_signal('student', ['TestCenterUser'])
# Adding model 'TestCenterRegistration'
db.create_table('student_testcenterregistration', (
('client_authorization_id', self.gf('django.db.models.fields.CharField')(max_length=20, unique=True, db_index=True)),
('uploaded_at', self.gf('django.db.models.fields.DateTimeField')(null=True, db_index=True)),
('user_updated_at', self.gf('django.db.models.fields.DateTimeField')(db_index=True)),
('authorization_id', self.gf('django.db.models.fields.IntegerField')(null=True, db_index=True)),
('upload_status', self.gf('django.db.models.fields.CharField')(blank=True, max_length=20, db_index=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True, db_index=True)),
('confirmed_at', self.gf('django.db.models.fields.DateTimeField')(null=True, db_index=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True, db_index=True)),
('accommodation_request', self.gf('django.db.models.fields.CharField')(max_length=1024, blank=True)),
('eligibility_appointment_date_first', self.gf('django.db.models.fields.DateField')(db_index=True)),
('exam_series_code', self.gf('django.db.models.fields.CharField')(max_length=15, db_index=True)),
('processed_at', self.gf('django.db.models.fields.DateTimeField')(null=True, db_index=True)),
('upload_error_message', self.gf('django.db.models.fields.CharField')(max_length=512, blank=True)),
('accommodation_code', self.gf('django.db.models.fields.CharField')(max_length=64, blank=True)),
('course_id', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)),
('testcenter_user', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['student.TestCenterUser'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('eligibility_appointment_date_last', self.gf('django.db.models.fields.DateField')(db_index=True)),
))
db.send_create_signal('student', ['TestCenterRegistration'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollmentallowed': {
'Meta': {'unique_together': "(('email', 'course_id'),)", 'object_name': 'CourseEnrollmentAllowed'},
'auto_enroll': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'allow_certificate': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'goals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'year_of_birth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'student.userstanding': {
'Meta': {'object_name': 'UserStanding'},
'account_status': ('django.db.models.fields.CharField', [], {'max_length': '31', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'standing_last_changed_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'standing'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student'] | agpl-3.0 |
tgroh/beam | sdks/python/apache_beam/runners/common.py | 2 | 28619 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=True
"""Worker operations executor.
For internal use only; no backwards-compatibility guarantees.
"""
import sys
import traceback
import six
from apache_beam.internal import util
from apache_beam.pvalue import TaggedOutput
from apache_beam.transforms import DoFn
from apache_beam.transforms import core
from apache_beam.transforms.core import RestrictionProvider
from apache_beam.transforms.window import GlobalWindow
from apache_beam.transforms.window import TimestampedValue
from apache_beam.transforms.window import WindowFn
from apache_beam.utils.windowed_value import WindowedValue
class NameContext(object):
"""Holds the name information for a step."""
def __init__(self, step_name):
"""Creates a new step NameContext.
Args:
step_name: The name of the step.
"""
self.step_name = step_name
def __eq__(self, other):
return self.step_name == other.step_name
def __ne__(self, other):
return not self == other
def __repr__(self):
return 'NameContext(%s)' % self.__dict__
def __hash__(self):
return hash(self.step_name)
def metrics_name(self):
"""Returns the step name used for metrics reporting."""
return self.step_name
def logging_name(self):
"""Returns the step name used for logging."""
return self.step_name
# TODO(BEAM-4028): Move DataflowNameContext to Dataflow internal code.
class DataflowNameContext(NameContext):
"""Holds the name information for a step in Dataflow.
This includes a step_name (e.g. s2), a user_name (e.g. Foo/Bar/ParDo(Fab)),
and a system_name (e.g. s2-shuffle-read34)."""
def __init__(self, step_name, user_name, system_name):
"""Creates a new step NameContext.
Args:
step_name: The internal name of the step (e.g. s2).
user_name: The full user-given name of the step (e.g. Foo/Bar/ParDo(Far)).
system_name: The step name in the optimized graph (e.g. s2-1).
"""
super(DataflowNameContext, self).__init__(step_name)
self.user_name = user_name
self.system_name = system_name
def __eq__(self, other):
return (self.step_name == other.step_name and
self.user_name == other.user_name and
self.system_name == other.system_name)
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.step_name, self.user_name, self.system_name))
def __repr__(self):
return 'DataflowNameContext(%s)' % self.__dict__
def logging_name(self):
"""Stackdriver logging relies on user-given step names (e.g. Foo/Bar)."""
return self.user_name
class LoggingContext(object):
"""For internal use only; no backwards-compatibility guarantees."""
def enter(self):
pass
def exit(self):
pass
class Receiver(object):
"""For internal use only; no backwards-compatibility guarantees.
An object that consumes a WindowedValue.
This class can be efficiently used to pass values between the
sdk and worker harnesses.
"""
def receive(self, windowed_value):
raise NotImplementedError
class MethodWrapper(object):
"""For internal use only; no backwards-compatibility guarantees.
Represents a method that can be invoked by `DoFnInvoker`."""
def __init__(self, obj_to_invoke, method_name):
"""
Initiates a ``MethodWrapper``.
Args:
obj_to_invoke: the object that contains the method. Has to either be a
`DoFn` object or a `RestrictionProvider` object.
method_name: name of the method as a string.
"""
if not isinstance(obj_to_invoke, (DoFn, RestrictionProvider)):
raise ValueError('\'obj_to_invoke\' has to be either a \'DoFn\' or '
'a \'RestrictionProvider\'. Received %r instead.',
obj_to_invoke)
args, _, _, defaults = core.get_function_arguments(
obj_to_invoke, method_name)
defaults = defaults if defaults else []
method_value = getattr(obj_to_invoke, method_name)
self.method_value = method_value
self.args = args
self.defaults = defaults
class DoFnSignature(object):
"""Represents the signature of a given ``DoFn`` object.
Signature of a ``DoFn`` provides a view of the properties of a given ``DoFn``.
Among other things, this will give an extensible way for for (1) accessing the
structure of the ``DoFn`` including methods and method parameters
(2) identifying features that a given ``DoFn`` support, for example, whether
a given ``DoFn`` is a Splittable ``DoFn`` (
https://s.apache.org/splittable-do-fn) (3) validating a ``DoFn`` based on the
feature set offered by it.
"""
def __init__(self, do_fn):
# We add a property here for all methods defined by Beam DoFn features.
assert isinstance(do_fn, core.DoFn)
self.do_fn = do_fn
self.process_method = MethodWrapper(do_fn, 'process')
self.start_bundle_method = MethodWrapper(do_fn, 'start_bundle')
self.finish_bundle_method = MethodWrapper(do_fn, 'finish_bundle')
restriction_provider = self._get_restriction_provider(do_fn)
self.initial_restriction_method = (
MethodWrapper(restriction_provider, 'initial_restriction')
if restriction_provider else None)
self.restriction_coder_method = (
MethodWrapper(restriction_provider, 'restriction_coder')
if restriction_provider else None)
self.create_tracker_method = (
MethodWrapper(restriction_provider, 'create_tracker')
if restriction_provider else None)
self.split_method = (
MethodWrapper(restriction_provider, 'split')
if restriction_provider else None)
self._validate()
def _get_restriction_provider(self, do_fn):
result = _find_param_with_default(self.process_method,
default_as_type=RestrictionProvider)
return result[1] if result else None
def _validate(self):
self._validate_process()
self._validate_bundle_method(self.start_bundle_method)
self._validate_bundle_method(self.finish_bundle_method)
def _validate_process(self):
"""Validate that none of the DoFnParameters are repeated in the function
"""
for param in core.DoFn.DoFnParams:
assert self.process_method.defaults.count(param) <= 1
def _validate_bundle_method(self, method_wrapper):
"""Validate that none of the DoFnParameters are used in the function
"""
for param in core.DoFn.DoFnParams:
assert param not in method_wrapper.defaults
def is_splittable_dofn(self):
return any([isinstance(default, RestrictionProvider) for default in
self.process_method.defaults])
class DoFnInvoker(object):
"""An abstraction that can be used to execute DoFn methods.
A DoFnInvoker describes a particular way for invoking methods of a DoFn
represented by a given DoFnSignature."""
def __init__(self, output_processor, signature):
self.output_processor = output_processor
self.signature = signature
@staticmethod
def create_invoker(
signature,
output_processor=None,
context=None, side_inputs=None, input_args=None, input_kwargs=None,
process_invocation=True):
""" Creates a new DoFnInvoker based on given arguments.
Args:
output_processor: an OutputProcessor for receiving elements produced by
invoking functions of the DoFn.
signature: a DoFnSignature for the DoFn being invoked.
context: Context to be used when invoking the DoFn (deprecated).
side_inputs: side inputs to be used when invoking th process method.
input_args: arguments to be used when invoking the process method. Some
of the arguments given here might be placeholders (for
example for side inputs) that get filled before invoking the
process method.
input_kwargs: keyword arguments to be used when invoking the process
method. Some of the keyword arguments given here might be
placeholders (for example for side inputs) that get filled
before invoking the process method.
process_invocation: If True, this function may return an invoker that
performs extra optimizations for invoking process()
method efficiently.
"""
side_inputs = side_inputs or []
default_arg_values = signature.process_method.defaults
use_simple_invoker = not process_invocation or (
not side_inputs and not input_args and not input_kwargs and
not default_arg_values)
if use_simple_invoker:
return SimpleInvoker(output_processor, signature)
else:
return PerWindowInvoker(
output_processor,
signature, context, side_inputs, input_args, input_kwargs)
def invoke_process(self, windowed_value, restriction_tracker=None,
output_processor=None,
additional_args=None, additional_kwargs=None):
"""Invokes the DoFn.process() function.
Args:
windowed_value: a WindowedValue object that gives the element for which
process() method should be invoked along with the window
the element belongs to.
output_procesor: if provided given OutputProcessor will be used.
additional_args: additional arguments to be passed to the current
`DoFn.process()` invocation, usually as side inputs.
additional_kwargs: additional keyword arguments to be passed to the
current `DoFn.process()` invocation.
"""
raise NotImplementedError
def invoke_start_bundle(self):
"""Invokes the DoFn.start_bundle() method.
"""
self.output_processor.start_bundle_outputs(
self.signature.start_bundle_method.method_value())
def invoke_finish_bundle(self):
"""Invokes the DoFn.finish_bundle() method.
"""
self.output_processor.finish_bundle_outputs(
self.signature.finish_bundle_method.method_value())
def invoke_split(self, element, restriction):
return self.signature.split_method.method_value(element, restriction)
def invoke_initial_restriction(self, element):
return self.signature.initial_restriction_method.method_value(element)
def invoke_restriction_coder(self):
return self.signature.restriction_coder_method.method_value()
def invoke_create_tracker(self, restriction):
return self.signature.create_tracker_method.method_value(restriction)
def _find_param_with_default(
method, default_as_value=None, default_as_type=None):
if ((default_as_value and default_as_type) or
not (default_as_value or default_as_type)):
raise ValueError(
'Exactly one of \'default_as_value\' and \'default_as_type\' should be '
'provided. Received %r and %r.', default_as_value, default_as_type)
defaults = method.defaults
default_as_value = default_as_value
default_as_type = default_as_type
ret = None
for i, value in enumerate(defaults):
if default_as_value and value == default_as_value:
ret = (method.args[len(method.args) - len(defaults) + i], value)
elif default_as_type and isinstance(value, default_as_type):
index = len(method.args) - len(defaults) + i
ret = (method.args[index], value)
return ret
class SimpleInvoker(DoFnInvoker):
"""An invoker that processes elements ignoring windowing information."""
def __init__(self, output_processor, signature):
super(SimpleInvoker, self).__init__(output_processor, signature)
self.process_method = signature.process_method.method_value
def invoke_process(self, windowed_value, restriction_tracker=None,
output_processor=None,
additional_args=None, additional_kwargs=None):
if not output_processor:
output_processor = self.output_processor
output_processor.process_outputs(
windowed_value, self.process_method(windowed_value.value))
class PerWindowInvoker(DoFnInvoker):
"""An invoker that processes elements considering windowing information."""
def __init__(self, output_processor, signature, context,
side_inputs, input_args, input_kwargs):
super(PerWindowInvoker, self).__init__(output_processor, signature)
self.side_inputs = side_inputs
self.context = context
self.process_method = signature.process_method.method_value
default_arg_values = signature.process_method.defaults
self.has_windowed_inputs = (
not all(si.is_globally_windowed() for si in side_inputs) or
(core.DoFn.WindowParam in default_arg_values))
# Try to prepare all the arguments that can just be filled in
# without any additional work. in the process function.
# Also cache all the placeholders needed in the process function.
# Flag to cache additional arguments on the first element if all
# inputs are within the global window.
self.cache_globally_windowed_args = not self.has_windowed_inputs
input_args = input_args if input_args else []
input_kwargs = input_kwargs if input_kwargs else {}
arguments = signature.process_method.args
defaults = signature.process_method.defaults
# Create placeholder for element parameter of DoFn.process() method.
self_in_args = int(signature.do_fn.is_process_bounded())
class ArgPlaceholder(object):
def __init__(self, placeholder):
self.placeholder = placeholder
if core.DoFn.ElementParam not in default_arg_values:
args_to_pick = len(arguments) - len(default_arg_values) - 1 - self_in_args
args_with_placeholders = (
[ArgPlaceholder(core.DoFn.ElementParam)] + input_args[:args_to_pick])
else:
args_to_pick = len(arguments) - len(defaults) - self_in_args
args_with_placeholders = input_args[:args_to_pick]
# Fill the OtherPlaceholders for context, window or timestamp
remaining_args_iter = iter(input_args[args_to_pick:])
for a, d in zip(arguments[-len(defaults):], defaults):
if d == core.DoFn.ElementParam:
args_with_placeholders.append(ArgPlaceholder(d))
elif d == core.DoFn.WindowParam:
args_with_placeholders.append(ArgPlaceholder(d))
elif d == core.DoFn.TimestampParam:
args_with_placeholders.append(ArgPlaceholder(d))
elif d == core.DoFn.SideInputParam:
# If no more args are present then the value must be passed via kwarg
try:
args_with_placeholders.append(next(remaining_args_iter))
except StopIteration:
if a not in input_kwargs:
raise ValueError("Value for sideinput %s not provided" % a)
else:
# If no more args are present then the value must be passed via kwarg
try:
args_with_placeholders.append(next(remaining_args_iter))
except StopIteration:
pass
args_with_placeholders.extend(list(remaining_args_iter))
# Stash the list of placeholder positions for performance
self.placeholders = [(i, x.placeholder) for (i, x) in enumerate(
args_with_placeholders)
if isinstance(x, ArgPlaceholder)]
self.args_for_process = args_with_placeholders
self.kwargs_for_process = input_kwargs
def invoke_process(self, windowed_value, restriction_tracker=None,
output_processor=None,
additional_args=None, additional_kwargs=None):
if not additional_args:
additional_args = []
if not additional_kwargs:
additional_kwargs = {}
if not output_processor:
output_processor = self.output_processor
self.context.set_element(windowed_value)
# Call for the process function for each window if has windowed side inputs
# or if the process accesses the window parameter. We can just call it once
# otherwise as none of the arguments are changing
if restriction_tracker:
restriction_tracker_param = _find_param_with_default(
self.signature.process_method,
default_as_type=core.RestrictionProvider)[0]
if not restriction_tracker_param:
raise ValueError(
'A RestrictionTracker %r was provided but DoFn does not have a '
'RestrictionTrackerParam defined', restriction_tracker)
additional_kwargs[restriction_tracker_param] = restriction_tracker
if self.has_windowed_inputs and len(windowed_value.windows) != 1:
for w in windowed_value.windows:
self._invoke_per_window(
WindowedValue(windowed_value.value, windowed_value.timestamp, (w,)),
additional_args, additional_kwargs, output_processor)
else:
self._invoke_per_window(
windowed_value, additional_args, additional_kwargs, output_processor)
def _invoke_per_window(
self, windowed_value, additional_args,
additional_kwargs, output_processor):
if self.has_windowed_inputs:
window, = windowed_value.windows
side_inputs = [si[window] for si in self.side_inputs]
side_inputs.extend(additional_args)
args_for_process, kwargs_for_process = util.insert_values_in_args(
self.args_for_process, self.kwargs_for_process,
side_inputs)
elif self.cache_globally_windowed_args:
# Attempt to cache additional args if all inputs are globally
# windowed inputs when processing the first element.
self.cache_globally_windowed_args = False
# Fill in sideInputs if they are globally windowed
global_window = GlobalWindow()
self.args_for_process, self.kwargs_for_process = (
util.insert_values_in_args(
self.args_for_process, self.kwargs_for_process,
[si[global_window] for si in self.side_inputs]))
args_for_process, kwargs_for_process = (
self.args_for_process, self.kwargs_for_process)
else:
args_for_process, kwargs_for_process = (
self.args_for_process, self.kwargs_for_process)
# TODO(sourabhbajaj): Investigate why we can't use `is` instead of ==
for i, p in self.placeholders:
if p == core.DoFn.ElementParam:
args_for_process[i] = windowed_value.value
elif p == core.DoFn.WindowParam:
args_for_process[i] = window
elif p == core.DoFn.TimestampParam:
args_for_process[i] = windowed_value.timestamp
if additional_kwargs:
if kwargs_for_process is None:
kwargs_for_process = additional_kwargs
else:
for key in additional_kwargs:
kwargs_for_process[key] = additional_kwargs[key]
if kwargs_for_process:
output_processor.process_outputs(
windowed_value,
self.process_method(*args_for_process, **kwargs_for_process))
else:
output_processor.process_outputs(
windowed_value, self.process_method(*args_for_process))
class DoFnRunner(Receiver):
"""For internal use only; no backwards-compatibility guarantees.
A helper class for executing ParDo operations.
"""
def __init__(self,
fn,
args,
kwargs,
side_inputs,
windowing,
tagged_receivers=None,
step_name=None,
logging_context=None,
state=None,
scoped_metrics_container=None):
"""Initializes a DoFnRunner.
Args:
fn: user DoFn to invoke
args: positional side input arguments (static and placeholder), if any
kwargs: keyword side input arguments (static and placeholder), if any
side_inputs: list of sideinput.SideInputMaps for deferred side inputs
windowing: windowing properties of the output PCollection(s)
tagged_receivers: a dict of tag name to Receiver objects
step_name: the name of this step
logging_context: a LoggingContext object
state: handle for accessing DoFn state
scoped_metrics_container: Context switcher for metrics container
"""
# Need to support multiple iterations.
side_inputs = list(side_inputs)
from apache_beam.metrics.execution import ScopedMetricsContainer
self.scoped_metrics_container = (
scoped_metrics_container or ScopedMetricsContainer())
self.step_name = step_name
self.logging_context = logging_context or LoggingContext()
self.context = DoFnContext(step_name, state=state)
do_fn_signature = DoFnSignature(fn)
# Optimize for the common case.
main_receivers = tagged_receivers[None]
output_processor = _OutputProcessor(
windowing.windowfn, main_receivers, tagged_receivers)
self.do_fn_invoker = DoFnInvoker.create_invoker(
do_fn_signature, output_processor, self.context, side_inputs, args,
kwargs)
def receive(self, windowed_value):
self.process(windowed_value)
def process(self, windowed_value):
try:
self.logging_context.enter()
self.scoped_metrics_container.enter()
self.do_fn_invoker.invoke_process(windowed_value)
except BaseException as exn:
self._reraise_augmented(exn)
finally:
self.scoped_metrics_container.exit()
self.logging_context.exit()
def _invoke_bundle_method(self, bundle_method):
try:
self.logging_context.enter()
self.scoped_metrics_container.enter()
self.context.set_element(None)
bundle_method()
except BaseException as exn:
self._reraise_augmented(exn)
finally:
self.scoped_metrics_container.exit()
self.logging_context.exit()
def start(self):
self._invoke_bundle_method(self.do_fn_invoker.invoke_start_bundle)
def finish(self):
self._invoke_bundle_method(self.do_fn_invoker.invoke_finish_bundle)
def _reraise_augmented(self, exn):
if getattr(exn, '_tagged_with_step', False) or not self.step_name:
raise
step_annotation = " [while running '%s']" % self.step_name
# To emulate exception chaining (not available in Python 2).
original_traceback = sys.exc_info()[2]
try:
# Attempt to construct the same kind of exception
# with an augmented message.
new_exn = type(exn)(exn.args[0] + step_annotation, *exn.args[1:])
new_exn._tagged_with_step = True # Could raise attribute error.
except: # pylint: disable=bare-except
# If anything goes wrong, construct a RuntimeError whose message
# records the original exception's type and message.
new_exn = RuntimeError(
traceback.format_exception_only(type(exn), exn)[-1].strip()
+ step_annotation)
new_exn._tagged_with_step = True
six.reraise(type(new_exn), new_exn, original_traceback)
class OutputProcessor(object):
def process_outputs(self, windowed_input_element, results):
raise NotImplementedError
class _OutputProcessor(OutputProcessor):
"""Processes output produced by DoFn method invocations."""
def __init__(self, window_fn, main_receivers, tagged_receivers):
"""Initializes ``_OutputProcessor``.
Args:
window_fn: a windowing function (WindowFn).
main_receivers: a dict of tag name to Receiver objects.
tagged_receivers: main receiver object.
"""
self.window_fn = window_fn
self.main_receivers = main_receivers
self.tagged_receivers = tagged_receivers
def process_outputs(self, windowed_input_element, results):
"""Dispatch the result of process computation to the appropriate receivers.
A value wrapped in a TaggedOutput object will be unwrapped and
then dispatched to the appropriate indexed output.
"""
if results is None:
return
for result in results:
tag = None
if isinstance(result, TaggedOutput):
tag = result.tag
if not isinstance(tag, six.string_types):
raise TypeError('In %s, tag %s is not a string' % (self, tag))
result = result.value
if isinstance(result, WindowedValue):
windowed_value = result
if (windowed_input_element is not None
and len(windowed_input_element.windows) != 1):
windowed_value.windows *= len(windowed_input_element.windows)
elif isinstance(result, TimestampedValue):
assign_context = WindowFn.AssignContext(result.timestamp, result.value)
windowed_value = WindowedValue(
result.value, result.timestamp,
self.window_fn.assign(assign_context))
if len(windowed_input_element.windows) != 1:
windowed_value.windows *= len(windowed_input_element.windows)
else:
windowed_value = windowed_input_element.with_value(result)
if tag is None:
self.main_receivers.receive(windowed_value)
else:
self.tagged_receivers[tag].receive(windowed_value)
def start_bundle_outputs(self, results):
"""Validate that start_bundle does not output any elements"""
if results is None:
return
raise RuntimeError(
'Start Bundle should not output any elements but got %s' % results)
def finish_bundle_outputs(self, results):
"""Dispatch the result of finish_bundle to the appropriate receivers.
A value wrapped in a TaggedOutput object will be unwrapped and
then dispatched to the appropriate indexed output.
"""
if results is None:
return
for result in results:
tag = None
if isinstance(result, TaggedOutput):
tag = result.tag
if not isinstance(tag, six.string_types):
raise TypeError('In %s, tag %s is not a string' % (self, tag))
result = result.value
if isinstance(result, WindowedValue):
windowed_value = result
else:
raise RuntimeError('Finish Bundle should only output WindowedValue ' +\
'type but got %s' % type(result))
if tag is None:
self.main_receivers.receive(windowed_value)
else:
self.tagged_receivers[tag].receive(windowed_value)
class _NoContext(WindowFn.AssignContext):
"""An uninspectable WindowFn.AssignContext."""
NO_VALUE = object()
def __init__(self, value, timestamp=NO_VALUE):
self.value = value
self._timestamp = timestamp
@property
def timestamp(self):
if self._timestamp is self.NO_VALUE:
raise ValueError('No timestamp in this context.')
else:
return self._timestamp
@property
def existing_windows(self):
raise ValueError('No existing_windows in this context.')
class DoFnState(object):
"""For internal use only; no backwards-compatibility guarantees.
Keeps track of state that DoFns want, currently, user counters.
"""
def __init__(self, counter_factory):
self.step_name = ''
self._counter_factory = counter_factory
def counter_for(self, aggregator):
"""Looks up the counter for this aggregator, creating one if necessary."""
return self._counter_factory.get_aggregator_counter(
self.step_name, aggregator)
# TODO(robertwb): Replace core.DoFnContext with this.
class DoFnContext(object):
"""For internal use only; no backwards-compatibility guarantees."""
def __init__(self, label, element=None, state=None):
self.label = label
self.state = state
if element is not None:
self.set_element(element)
def set_element(self, windowed_value):
self.windowed_value = windowed_value
@property
def element(self):
if self.windowed_value is None:
raise AttributeError('element not accessible in this context')
else:
return self.windowed_value.value
@property
def timestamp(self):
if self.windowed_value is None:
raise AttributeError('timestamp not accessible in this context')
else:
return self.windowed_value.timestamp
@property
def windows(self):
if self.windowed_value is None:
raise AttributeError('windows not accessible in this context')
else:
return self.windowed_value.windows
| apache-2.0 |
coderbone/SickRage-alt | lib/rebulk/test/test_toposort.py | 36 | 4042 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014 True Blade Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Original:
# - https://bitbucket.org/ericvsmith/toposort (1.4)
# Modifications:
# - port to pytest
# pylint: skip-file
import pytest
from ..toposort import toposort, toposort_flatten, CyclicDependency
class TestCase(object):
def test_simple(self):
results = list(toposort({2: set([11]), 9: set([11, 8]), 10: set([11, 3]), 11: set([7, 5]), 8: set([7, 3])}))
expected = [set([3, 5, 7]), set([8, 11]), set([2, 9, 10])]
assert results == expected
# make sure self dependencies are ignored
results = list(toposort({2: set([2, 11]), 9: set([11, 8]), 10: set([10, 11, 3]), 11: set([7, 5]), 8: set([7, 3])}))
expected = [set([3, 5, 7]), set([8, 11]), set([2, 9, 10])]
assert results == expected
assert list(toposort({1: set()})) == [set([1])]
assert list(toposort({1: set([1])})) == [set([1])]
def test_no_dependencies(self):
assert list(toposort({1: set([2]), 3: set([4]), 5: set([6])})) == [set([2, 4, 6]), set([1, 3, 5])]
assert list(toposort({1: set(), 3: set(), 5: set()})) == [set([1, 3, 5])]
def test_empty(self):
assert list(toposort({})) == []
def test_strings(self):
results = list(toposort({'2': set(['11']), '9': set(['11', '8']), '10': set(['11', '3']), '11': set(['7', '5']), '8': set(['7', '3'])}))
expected = [set(['3', '5', '7']), set(['8', '11']), set(['2', '9', '10'])]
assert results == expected
def test_objects(self):
o2 = object()
o3 = object()
o5 = object()
o7 = object()
o8 = object()
o9 = object()
o10 = object()
o11 = object()
results = list(toposort({o2: set([o11]), o9: set([o11, o8]), o10: set([o11, o3]), o11: set([o7, o5]), o8: set([o7, o3, o8])}))
expected = [set([o3, o5, o7]), set([o8, o11]), set([o2, o9, o10])]
assert results == expected
def test_cycle(self):
# a simple, 2 element cycle
with pytest.raises(CyclicDependency):
list(toposort({1: set([2]), 2: set([1])}))
# an indirect cycle
with pytest.raises(CyclicDependency):
list(toposort({1: set([2]), 2: set([3]), 3: set([1])}))
def test_input_not_modified(self):
data = {2: set([11]),
9: set([11, 8]),
10: set([11, 3]),
11: set([7, 5]),
8: set([7, 3, 8]), # includes something self-referential
}
orig = data.copy()
results = list(toposort(data))
assert data == orig
def test_input_not_modified_when_cycle_error(self):
data = {1: set([2]),
2: set([1]),
3: set([4]),
}
orig = data.copy()
with pytest.raises(CyclicDependency):
list(toposort(data))
assert data == orig
class TestCaseAll(object):
def test_sort_flatten(self):
data = {2: set([11]),
9: set([11, 8]),
10: set([11, 3]),
11: set([7, 5]),
8: set([7, 3, 8]), # includes something self-referential
}
expected = [set([3, 5, 7]), set([8, 11]), set([2, 9, 10])]
assert list(toposort(data)) == expected
# now check the sorted results
results = []
for item in expected:
results.extend(sorted(item))
assert toposort_flatten(data) == results
# and the unsorted results. break the results up into groups to compare them
actual = toposort_flatten(data, False)
results = [set([i for i in actual[0:3]]), set([i for i in actual[3:5]]), set([i for i in actual[5:8]])]
assert results == expected
| gpl-3.0 |
ananthonline/grpc | src/python/grpcio/grpc/framework/crust/implementations.py | 5 | 14691 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Entry points into the Crust layer of RPC Framework."""
import six
from grpc.framework.common import cardinality
from grpc.framework.common import style
from grpc.framework.crust import _calls
from grpc.framework.crust import _service
from grpc.framework.interfaces.base import base
from grpc.framework.interfaces.face import face
class _BaseServicer(base.Servicer):
def __init__(self, adapted_methods, adapted_multi_method):
self._adapted_methods = adapted_methods
self._adapted_multi_method = adapted_multi_method
def service(self, group, method, context, output_operator):
adapted_method = self._adapted_methods.get((group, method), None)
if adapted_method is not None:
return adapted_method(output_operator, context)
elif self._adapted_multi_method is not None:
try:
return self._adapted_multi_method(
group, method, output_operator, context)
except face.NoSuchMethodError:
raise base.NoSuchMethodError(None, None)
else:
raise base.NoSuchMethodError(None, None)
class _UnaryUnaryMultiCallable(face.UnaryUnaryMultiCallable):
def __init__(self, end, group, method, pool):
self._end = end
self._group = group
self._method = method
self._pool = pool
def __call__(
self, request, timeout, metadata=None, with_call=False,
protocol_options=None):
return _calls.blocking_unary_unary(
self._end, self._group, self._method, timeout, with_call,
protocol_options, metadata, request)
def future(self, request, timeout, metadata=None, protocol_options=None):
return _calls.future_unary_unary(
self._end, self._group, self._method, timeout, protocol_options,
metadata, request)
def event(
self, request, receiver, abortion_callback, timeout,
metadata=None, protocol_options=None):
return _calls.event_unary_unary(
self._end, self._group, self._method, timeout, protocol_options,
metadata, request, receiver, abortion_callback, self._pool)
class _UnaryStreamMultiCallable(face.UnaryStreamMultiCallable):
def __init__(self, end, group, method, pool):
self._end = end
self._group = group
self._method = method
self._pool = pool
def __call__(self, request, timeout, metadata=None, protocol_options=None):
return _calls.inline_unary_stream(
self._end, self._group, self._method, timeout, protocol_options,
metadata, request)
def event(
self, request, receiver, abortion_callback, timeout,
metadata=None, protocol_options=None):
return _calls.event_unary_stream(
self._end, self._group, self._method, timeout, protocol_options,
metadata, request, receiver, abortion_callback, self._pool)
class _StreamUnaryMultiCallable(face.StreamUnaryMultiCallable):
def __init__(self, end, group, method, pool):
self._end = end
self._group = group
self._method = method
self._pool = pool
def __call__(
self, request_iterator, timeout, metadata=None,
with_call=False, protocol_options=None):
return _calls.blocking_stream_unary(
self._end, self._group, self._method, timeout, with_call,
protocol_options, metadata, request_iterator, self._pool)
def future(
self, request_iterator, timeout, metadata=None, protocol_options=None):
return _calls.future_stream_unary(
self._end, self._group, self._method, timeout, protocol_options,
metadata, request_iterator, self._pool)
def event(
self, receiver, abortion_callback, timeout, metadata=None,
protocol_options=None):
return _calls.event_stream_unary(
self._end, self._group, self._method, timeout, protocol_options,
metadata, receiver, abortion_callback, self._pool)
class _StreamStreamMultiCallable(face.StreamStreamMultiCallable):
def __init__(self, end, group, method, pool):
self._end = end
self._group = group
self._method = method
self._pool = pool
def __call__(
self, request_iterator, timeout, metadata=None, protocol_options=None):
return _calls.inline_stream_stream(
self._end, self._group, self._method, timeout, protocol_options,
metadata, request_iterator, self._pool)
def event(
self, receiver, abortion_callback, timeout, metadata=None,
protocol_options=None):
return _calls.event_stream_stream(
self._end, self._group, self._method, timeout, protocol_options,
metadata, receiver, abortion_callback, self._pool)
class _GenericStub(face.GenericStub):
"""An face.GenericStub implementation."""
def __init__(self, end, pool):
self._end = end
self._pool = pool
def blocking_unary_unary(
self, group, method, request, timeout, metadata=None,
with_call=None, protocol_options=None):
return _calls.blocking_unary_unary(
self._end, group, method, timeout, with_call, protocol_options,
metadata, request)
def future_unary_unary(
self, group, method, request, timeout, metadata=None,
protocol_options=None):
return _calls.future_unary_unary(
self._end, group, method, timeout, protocol_options, metadata, request)
def inline_unary_stream(
self, group, method, request, timeout, metadata=None,
protocol_options=None):
return _calls.inline_unary_stream(
self._end, group, method, timeout, protocol_options, metadata, request)
def blocking_stream_unary(
self, group, method, request_iterator, timeout, metadata=None,
with_call=None, protocol_options=None):
return _calls.blocking_stream_unary(
self._end, group, method, timeout, with_call, protocol_options,
metadata, request_iterator, self._pool)
def future_stream_unary(
self, group, method, request_iterator, timeout, metadata=None,
protocol_options=None):
return _calls.future_stream_unary(
self._end, group, method, timeout, protocol_options, metadata,
request_iterator, self._pool)
def inline_stream_stream(
self, group, method, request_iterator, timeout, metadata=None,
protocol_options=None):
return _calls.inline_stream_stream(
self._end, group, method, timeout, protocol_options, metadata,
request_iterator, self._pool)
def event_unary_unary(
self, group, method, request, receiver, abortion_callback, timeout,
metadata=None, protocol_options=None):
return _calls.event_unary_unary(
self._end, group, method, timeout, protocol_options, metadata, request,
receiver, abortion_callback, self._pool)
def event_unary_stream(
self, group, method, request, receiver, abortion_callback, timeout,
metadata=None, protocol_options=None):
return _calls.event_unary_stream(
self._end, group, method, timeout, protocol_options, metadata, request,
receiver, abortion_callback, self._pool)
def event_stream_unary(
self, group, method, receiver, abortion_callback, timeout,
metadata=None, protocol_options=None):
return _calls.event_stream_unary(
self._end, group, method, timeout, protocol_options, metadata, receiver,
abortion_callback, self._pool)
def event_stream_stream(
self, group, method, receiver, abortion_callback, timeout,
metadata=None, protocol_options=None):
return _calls.event_stream_stream(
self._end, group, method, timeout, protocol_options, metadata, receiver,
abortion_callback, self._pool)
def unary_unary(self, group, method):
return _UnaryUnaryMultiCallable(self._end, group, method, self._pool)
def unary_stream(self, group, method):
return _UnaryStreamMultiCallable(self._end, group, method, self._pool)
def stream_unary(self, group, method):
return _StreamUnaryMultiCallable(self._end, group, method, self._pool)
def stream_stream(self, group, method):
return _StreamStreamMultiCallable(self._end, group, method, self._pool)
class _DynamicStub(face.DynamicStub):
"""An face.DynamicStub implementation."""
def __init__(self, end, group, cardinalities, pool):
self._end = end
self._group = group
self._cardinalities = cardinalities
self._pool = pool
def __getattr__(self, attr):
method_cardinality = self._cardinalities.get(attr)
if method_cardinality is cardinality.Cardinality.UNARY_UNARY:
return _UnaryUnaryMultiCallable(self._end, self._group, attr, self._pool)
elif method_cardinality is cardinality.Cardinality.UNARY_STREAM:
return _UnaryStreamMultiCallable(self._end, self._group, attr, self._pool)
elif method_cardinality is cardinality.Cardinality.STREAM_UNARY:
return _StreamUnaryMultiCallable(self._end, self._group, attr, self._pool)
elif method_cardinality is cardinality.Cardinality.STREAM_STREAM:
return _StreamStreamMultiCallable(
self._end, self._group, attr, self._pool)
else:
raise AttributeError('_DynamicStub object has no attribute "%s"!' % attr)
def _adapt_method_implementations(method_implementations, pool):
adapted_implementations = {}
for name, method_implementation in six.iteritems(method_implementations):
if method_implementation.style is style.Service.INLINE:
if method_implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
adapted_implementations[name] = _service.adapt_inline_unary_unary(
method_implementation.unary_unary_inline, pool)
elif method_implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
adapted_implementations[name] = _service.adapt_inline_unary_stream(
method_implementation.unary_stream_inline, pool)
elif method_implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
adapted_implementations[name] = _service.adapt_inline_stream_unary(
method_implementation.stream_unary_inline, pool)
elif method_implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
adapted_implementations[name] = _service.adapt_inline_stream_stream(
method_implementation.stream_stream_inline, pool)
elif method_implementation.style is style.Service.EVENT:
if method_implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
adapted_implementations[name] = _service.adapt_event_unary_unary(
method_implementation.unary_unary_event, pool)
elif method_implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
adapted_implementations[name] = _service.adapt_event_unary_stream(
method_implementation.unary_stream_event, pool)
elif method_implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
adapted_implementations[name] = _service.adapt_event_stream_unary(
method_implementation.stream_unary_event, pool)
elif method_implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
adapted_implementations[name] = _service.adapt_event_stream_stream(
method_implementation.stream_stream_event, pool)
return adapted_implementations
def servicer(method_implementations, multi_method_implementation, pool):
"""Creates a base.Servicer.
It is guaranteed that any passed face.MultiMethodImplementation will
only be called to service an RPC if there is no
face.MethodImplementation for the RPC method in the passed
method_implementations dictionary.
Args:
method_implementations: A dictionary from RPC method name to
face.MethodImplementation object to be used to service the named
RPC method.
multi_method_implementation: An face.MultiMethodImplementation to be
used to service any RPCs not serviced by the
face.MethodImplementations given in the method_implementations
dictionary, or None.
pool: A thread pool.
Returns:
A base.Servicer that services RPCs via the given implementations.
"""
adapted_implementations = _adapt_method_implementations(
method_implementations, pool)
if multi_method_implementation is None:
adapted_multi_method_implementation = None
else:
adapted_multi_method_implementation = _service.adapt_multi_method(
multi_method_implementation, pool)
return _BaseServicer(
adapted_implementations, adapted_multi_method_implementation)
def generic_stub(end, pool):
"""Creates an face.GenericStub.
Args:
end: A base.End.
pool: A futures.ThreadPoolExecutor.
Returns:
A face.GenericStub that performs RPCs via the given base.End.
"""
return _GenericStub(end, pool)
def dynamic_stub(end, group, cardinalities, pool):
"""Creates an face.DynamicStub.
Args:
end: A base.End.
group: The group identifier for all RPCs to be made with the created
face.DynamicStub.
cardinalities: A dict from method identifier to cardinality.Cardinality
value identifying the cardinality of every RPC method to be supported by
the created face.DynamicStub.
pool: A futures.ThreadPoolExecutor.
Returns:
A face.DynamicStub that performs RPCs via the given base.End.
"""
return _DynamicStub(end, group, cardinalities, pool)
| bsd-3-clause |
mavenlin/tensorflow | tensorflow/contrib/labeled_tensor/python/ops/nn_test.py | 158 | 2854 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.contrib.labeled_tensor.python.ops import nn
from tensorflow.contrib.labeled_tensor.python.ops import test_util
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
class NNTests(test_util.Base):
def setUp(self):
super(NNTests, self).setUp()
self.axes = ['x']
self.original_lt = core.LabeledTensor([0.0, 0.5, 1.0], self.axes)
self.other_lt = 1 - self.original_lt
def test_unary_ops(self):
ops = [
('relu', nn_ops.relu, nn.relu),
('relu6', nn_ops.relu6, nn.relu6),
('crelu', nn_ops.crelu, nn.crelu),
('elu', nn_ops.elu, nn.elu),
('softplus', nn_ops.softplus, nn.softplus),
('l2_loss', nn_ops.l2_loss, nn.l2_loss),
('softmax', nn_ops.softmax, nn.softmax),
('log_softmax', nn_ops.log_softmax, nn.log_softmax),
]
for op_name, tf_op, lt_op in ops:
golden_tensor = tf_op(self.original_lt.tensor)
golden_lt = core.LabeledTensor(golden_tensor, self.axes)
actual_lt = lt_op(self.original_lt)
self.assertIn(op_name, actual_lt.name)
self.assertLabeledTensorsEqual(golden_lt, actual_lt)
def test_binary_ops(self):
ops = [
('sigmoid_cross_entropy_with_logits',
nn_impl.sigmoid_cross_entropy_with_logits,
nn.sigmoid_cross_entropy_with_logits),
('softmax_cross_entropy_with_logits',
nn_ops.softmax_cross_entropy_with_logits,
nn.softmax_cross_entropy_with_logits),
('sparse_softmax_cross_entropy_with_logits',
nn_ops.sparse_softmax_cross_entropy_with_logits,
nn.sparse_softmax_cross_entropy_with_logits),
]
for op_name, tf_op, lt_op in ops:
golden_tensor = tf_op(self.original_lt.tensor, self.other_lt.tensor)
golden_lt = core.LabeledTensor(golden_tensor, self.axes)
actual_lt = lt_op(self.original_lt, self.other_lt)
self.assertIn(op_name, actual_lt.name)
self.assertLabeledTensorsEqual(golden_lt, actual_lt)
| apache-2.0 |
mahak/neutron | neutron/tests/unit/db/test_securitygroups_db.py | 2 | 33677 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from unittest import mock
from neutron_lib.callbacks import events
from neutron_lib.callbacks import exceptions
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants
from neutron_lib import context
from neutron_lib.objects import exceptions as obj_exc
import sqlalchemy
import testtools
from neutron.db import securitygroups_db
from neutron.extensions import securitygroup
from neutron import quota
from neutron.services.revisions import revision_plugin
from neutron.tests.unit import testlib_api
FAKE_SECGROUP = {
'security_group': {
"tenant_id": 'fake',
'description': 'fake',
'name': 'fake'
}
}
FAKE_SECGROUP_RULE = {
'security_group_rule': {
"tenant_id": 'fake',
'description': 'fake',
'name': 'fake',
'port_range_min': '21',
'protocol': 'tcp',
'port_range_max': '23',
'remote_ip_prefix': '10.0.0.1',
'ethertype': 'IPv4',
'remote_group_id': None,
'remote_address_group_id': None,
'security_group_id': 'None',
'direction': 'ingress'
}
}
DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
def fake_callback(resource, event, *args, **kwargs):
raise KeyError('bar')
class SecurityGroupDbMixinImpl(securitygroups_db.SecurityGroupDbMixin):
pass
class SecurityGroupDbMixinTestCase(testlib_api.SqlTestCase):
def setUp(self):
super(SecurityGroupDbMixinTestCase, self).setUp()
self.setup_coreplugin(core_plugin=DB_PLUGIN_KLASS)
self.ctx = context.get_admin_context()
self.mixin = SecurityGroupDbMixinImpl()
make_res = mock.patch.object(quota.QuotaEngine, 'make_reservation')
self.mock_quota_make_res = make_res.start()
commit_res = mock.patch.object(quota.QuotaEngine, 'commit_reservation')
self.mock_quota_commit_res = commit_res.start()
is_ext_supported = mock.patch(
'neutron_lib.api.extensions.is_extension_supported')
self.is_ext_supported = is_ext_supported.start()
self.is_ext_supported.return_value = True
def test_create_security_group_conflict(self):
with mock.patch.object(registry, "publish") as mock_publish:
mock_publish.side_effect = exceptions.CallbackFailure(Exception())
secgroup = {'security_group': mock.ANY}
with testtools.ExpectedException(
securitygroup.SecurityGroupConflict):
self.mixin.create_security_group(self.ctx, secgroup)
def test_delete_security_group_in_use(self):
with mock.patch.object(self.mixin,
'_get_port_security_group_bindings'),\
mock.patch.object(self.mixin, '_get_security_group'),\
mock.patch.object(registry, "notify") as mock_notify:
mock_notify.side_effect = exceptions.CallbackFailure(Exception())
with testtools.ExpectedException(securitygroup.SecurityGroupInUse):
self.mixin.delete_security_group(self.ctx, mock.ANY)
def test_update_security_group_statefulness_binded_conflict(self):
FAKE_SECGROUP['security_group']['stateful'] = mock.ANY
sg_dict = self.mixin.create_security_group(self.ctx, FAKE_SECGROUP)
FAKE_SECGROUP['security_group']['stateful'] = not sg_dict['stateful']
with mock.patch.object(self.mixin,
'_get_port_security_group_bindings'), \
mock.patch.object(registry, "notify") as mock_notify:
mock_notify.side_effect = exceptions.CallbackFailure(Exception())
with testtools.ExpectedException(securitygroup.SecurityGroupInUse):
self.mixin.update_security_group(self.ctx, sg_dict['id'],
FAKE_SECGROUP)
def test_update_security_group_conflict(self):
with mock.patch.object(registry, "publish") as mock_publish:
mock_publish.side_effect = exceptions.CallbackFailure(Exception())
secgroup = {'security_group': FAKE_SECGROUP}
with testtools.ExpectedException(
securitygroup.SecurityGroupConflict):
self.mixin.update_security_group(self.ctx, 'foo_id', secgroup)
def test_create_security_group_rule_conflict(self):
with mock.patch.object(self.mixin, '_validate_security_group_rule'),\
mock.patch.object(self.mixin,
'_check_for_duplicate_rules'),\
mock.patch.object(registry, "publish") as mock_publish:
mock_publish.side_effect = exceptions.CallbackFailure(Exception())
with testtools.ExpectedException(
securitygroup.SecurityGroupConflict):
self.mixin.create_security_group_rule(
self.ctx, FAKE_SECGROUP_RULE)
def test__check_for_duplicate_rules_does_not_drop_protocol(self):
with mock.patch.object(self.mixin, 'get_security_group',
return_value=None):
context = mock.Mock()
rule_dict = {
'security_group_rule': {'protocol': None,
'tenant_id': 'fake',
'security_group_id': 'fake',
'direction': 'fake'}
}
self.mixin._check_for_duplicate_rules(context, 'fake', [rule_dict])
self.assertIn('protocol', rule_dict['security_group_rule'])
def test__check_for_duplicate_rules_ignores_rule_id(self):
rules = [{'security_group_rule': {'protocol': 'tcp', 'id': 'fake1'}},
{'security_group_rule': {'protocol': 'tcp', 'id': 'fake2'}}]
# NOTE(arosen): the name of this exception is a little misleading
# in this case as this test, tests that the id fields are dropped
# while being compared. This is in the case if a plugin specifies
# the rule ids themselves.
with mock.patch.object(self.mixin, 'get_security_group',
return_value=None):
self.assertRaises(securitygroup.DuplicateSecurityGroupRuleInPost,
self.mixin._check_for_duplicate_rules,
context, 'fake', rules)
def test_check_for_duplicate_diff_rules_remote_ip_prefix_ipv4(self):
fake_secgroup = copy.deepcopy(FAKE_SECGROUP)
fake_secgroup['security_group_rules'] = \
[{'id': 'fake', 'tenant_id': 'fake', 'ethertype': 'IPv4',
'direction': 'ingress', 'security_group_id': 'fake',
'remote_ip_prefix': None}]
with mock.patch.object(self.mixin, 'get_security_group',
return_value=fake_secgroup):
context = mock.Mock()
rule_dict = {
'security_group_rule': {'id': 'fake2',
'tenant_id': 'fake',
'security_group_id': 'fake',
'ethertype': 'IPv4',
'direction': 'ingress',
'remote_ip_prefix': '0.0.0.0/0'}
}
self.assertRaises(securitygroup.SecurityGroupRuleExists,
self.mixin._check_for_duplicate_rules,
context, 'fake', [rule_dict])
def test_check_for_duplicate_diff_rules_remote_ip_prefix_ipv6(self):
fake_secgroup = copy.deepcopy(FAKE_SECGROUP)
fake_secgroup['security_group_rules'] = \
[{'id': 'fake', 'tenant_id': 'fake', 'ethertype': 'IPv6',
'direction': 'ingress', 'security_group_id': 'fake',
'remote_ip_prefix': None}]
with mock.patch.object(self.mixin, 'get_security_group',
return_value=fake_secgroup):
context = mock.Mock()
rule_dict = {
'security_group_rule': {'id': 'fake2',
'tenant_id': 'fake',
'security_group_id': 'fake',
'ethertype': 'IPv6',
'direction': 'ingress',
'remote_ip_prefix': '::/0'}
}
self.assertRaises(securitygroup.SecurityGroupRuleExists,
self.mixin._check_for_duplicate_rules,
context, 'fake', [rule_dict])
def test_delete_security_group_rule_in_use(self):
with mock.patch.object(registry, "publish") as mock_publish:
mock_publish.side_effect = exceptions.CallbackFailure(Exception())
with testtools.ExpectedException(
securitygroup.SecurityGroupRuleInUse):
self.mixin.delete_security_group_rule(self.ctx, mock.ANY)
def test_delete_security_group_rule_raise_error_on_not_found(self):
with testtools.ExpectedException(
securitygroup.SecurityGroupRuleNotFound):
self.mixin.delete_security_group_rule(self.ctx, 'foo_rule')
def test_validate_ethertype_and_protocol(self):
fake_ipv4_rules = [{'protocol': constants.PROTO_NAME_IPV6_ICMP,
'ethertype': constants.IPv4},
{'protocol': constants.PROTO_NAME_IPV6_ICMP_LEGACY,
'ethertype': constants.IPv4},
{'protocol': constants.PROTO_NAME_IPV6_ENCAP,
'ethertype': constants.IPv4},
{'protocol': constants.PROTO_NAME_IPV6_ROUTE,
'ethertype': constants.IPv4},
{'protocol': constants.PROTO_NAME_IPV6_FRAG,
'ethertype': constants.IPv4},
{'protocol': constants.PROTO_NAME_IPV6_NONXT,
'ethertype': constants.IPv4},
{'protocol': constants.PROTO_NAME_IPV6_OPTS,
'ethertype': constants.IPv4},
{'protocol': str(constants.PROTO_NUM_IPV6_ICMP),
'ethertype': constants.IPv4},
{'protocol': str(constants.PROTO_NUM_IPV6_ENCAP),
'ethertype': constants.IPv4},
{'protocol': str(constants.PROTO_NUM_IPV6_ROUTE),
'ethertype': constants.IPv4},
{'protocol': str(constants.PROTO_NUM_IPV6_FRAG),
'ethertype': constants.IPv4},
{'protocol': str(constants.PROTO_NUM_IPV6_NONXT),
'ethertype': constants.IPv4},
{'protocol': str(constants.PROTO_NUM_IPV6_OPTS),
'ethertype': constants.IPv4}]
# test wrong protocols
for rule in fake_ipv4_rules:
with testtools.ExpectedException(
securitygroup.SecurityGroupEthertypeConflictWithProtocol):
self.mixin._validate_ethertype_and_protocol(rule)
def test_security_group_precommit_create_event_fail(self):
registry.subscribe(fake_callback, resources.SECURITY_GROUP,
events.PRECOMMIT_CREATE)
with mock.patch.object(sqlalchemy.orm.session.SessionTransaction,
'rollback') as mock_rollback:
self.assertRaises(securitygroup.SecurityGroupConflict,
self.mixin.create_security_group,
self.ctx, FAKE_SECGROUP)
self.assertTrue(mock_rollback.called)
def test_security_group_precommit_update_event_fail(self):
registry.subscribe(fake_callback, resources.SECURITY_GROUP,
events.PRECOMMIT_UPDATE)
sg_dict = self.mixin.create_security_group(self.ctx, FAKE_SECGROUP)
with mock.patch.object(sqlalchemy.orm.session.SessionTransaction,
'rollback') as mock_rollback:
self.assertRaises(securitygroup.SecurityGroupConflict,
self.mixin.update_security_group,
self.ctx, sg_dict['id'], FAKE_SECGROUP)
self.assertTrue(mock_rollback.called)
def test_security_group_precommit_delete_event_fail(self):
registry.subscribe(fake_callback, resources.SECURITY_GROUP,
events.PRECOMMIT_DELETE)
sg_dict = self.mixin.create_security_group(self.ctx, FAKE_SECGROUP)
with mock.patch.object(sqlalchemy.orm.session.SessionTransaction,
'rollback') as mock_rollback:
self.assertRaises(securitygroup.SecurityGroupInUse,
self.mixin.delete_security_group,
self.ctx, sg_dict['id'])
self.assertTrue(mock_rollback.called)
def _test_security_group_precommit_create_event(self,
with_revisions=False):
DEFAULT_SECGROUP = {
'tenant_id': FAKE_SECGROUP['security_group']['tenant_id'],
'name': 'default',
'description': 'Default security group',
}
DEFAULT_SECGROUP_DICT = {
'id': mock.ANY,
'tenant_id': FAKE_SECGROUP['security_group']['tenant_id'],
'project_id': FAKE_SECGROUP['security_group']['tenant_id'],
'name': 'default',
'description': 'Default security group',
'stateful': mock.ANY,
'standard_attr_id': mock.ANY,
'security_group_rules': [
# Four rules for egress/ingress and ipv4/ipv6
mock.ANY, mock.ANY, mock.ANY, mock.ANY,
],
}
if with_revisions:
DEFAULT_SECGROUP_DICT.update({
'revision_number': mock.ANY,
})
with mock.patch.object(registry, 'publish') as publish:
sg_dict = self.mixin.create_security_group(self.ctx, FAKE_SECGROUP)
publish.assert_has_calls([
mock.call(resources.SECURITY_GROUP, 'before_create', mock.ANY,
payload=mock.ANY),
mock.call(resources.SECURITY_GROUP, 'before_create', mock.ANY,
payload=mock.ANY),
mock.call(resources.SECURITY_GROUP, 'precommit_create',
mock.ANY, payload=mock.ANY),
mock.call(resources.SECURITY_GROUP, 'after_create', mock.ANY,
payload=mock.ANY),
mock.call(resources.SECURITY_GROUP, 'precommit_create',
mock.ANY, payload=mock.ANY),
mock.call(resources.SECURITY_GROUP, 'after_create', mock.ANY,
payload=mock.ANY)])
payload = publish.mock_calls[0][2]['payload']
self.assertDictEqual(payload.desired_state,
FAKE_SECGROUP['security_group'])
payload = publish.mock_calls[1][2]['payload']
self.assertDictEqual(payload.desired_state,
DEFAULT_SECGROUP)
payload = publish.mock_calls[2][2]['payload']
self.assertDictEqual(payload.latest_state,
DEFAULT_SECGROUP_DICT)
self.assertTrue(payload.metadata['is_default'])
payload = publish.mock_calls[3][2]['payload']
self.assertDictEqual(payload.latest_state,
DEFAULT_SECGROUP_DICT)
self.assertTrue(payload.metadata['is_default'])
payload = publish.mock_calls[4][2]['payload']
self.assertDictEqual(payload.latest_state, sg_dict)
self.assertFalse(payload.metadata['is_default'])
payload = publish.mock_calls[5][2]['payload']
self.assertDictEqual(payload.latest_state, sg_dict)
self.assertFalse(payload.metadata['is_default'])
# Ensure that the result of create is same as get.
# Especially we want to check the revision number here.
sg_dict_got = self.mixin.get_security_group(
self.ctx, sg_dict['id'])
self.assertEqual(sg_dict, sg_dict_got)
def test_security_group_precommit_create_event_with_revisions(self):
revision = revision_plugin.RevisionPlugin()
self._test_security_group_precommit_create_event(True)
del revision # appease pep8
def test_security_group_precommit_create_event(self):
self._test_security_group_precommit_create_event()
def test_security_group_precommit_update_event(self):
FAKE_SECGROUP['security_group']['stateful'] = mock.ANY
original_sg_dict = self.mixin.create_security_group(self.ctx,
FAKE_SECGROUP)
sg_id = original_sg_dict['id']
with mock.patch.object(self.mixin,
'_get_port_security_group_bindings'), \
mock.patch.object(registry, "publish") as mock_publish:
fake_secgroup = copy.deepcopy(FAKE_SECGROUP)
fake_secgroup['security_group']['name'] = 'updated_fake'
fake_secgroup['security_group']['stateful'] = mock.ANY
sg_dict = self.mixin.update_security_group(
self.ctx, sg_id, fake_secgroup)
mock_publish.assert_has_calls(
[mock.call(resources.SECURITY_GROUP, events.PRECOMMIT_UPDATE,
mock.ANY, payload=mock.ANY)])
payload = mock_publish.call_args[1]['payload']
self.assertEqual(original_sg_dict, payload.states[0])
self.assertEqual(sg_id, payload.resource_id)
self.assertEqual(sg_dict, payload.latest_state)
def test_security_group_precommit_and_after_delete_event(self):
sg_dict = self.mixin.create_security_group(self.ctx, FAKE_SECGROUP)
with mock.patch.object(registry, "publish") as mock_publish:
self.mixin.delete_security_group(self.ctx, sg_dict['id'])
sg_dict['security_group_rules'] = mock.ANY
mock_publish.assert_has_calls(
[mock.call('security_group', 'precommit_delete',
mock.ANY, payload=mock.ANY),
mock.call('security_group', 'after_delete',
mock.ANY,
payload=mock.ANY)])
payload = mock_publish.mock_calls[1][2]['payload']
self.assertEqual(mock.ANY, payload.context)
self.assertEqual(sg_dict, payload.latest_state)
self.assertEqual(sg_dict['id'], payload.resource_id)
self.assertEqual([mock.ANY, mock.ANY],
payload.metadata.get('security_group_rule_ids'))
payload = mock_publish.mock_calls[2][2]['payload']
self.assertEqual(mock.ANY, payload.context)
self.assertEqual(sg_dict, payload.latest_state)
self.assertEqual(sg_dict['id'], payload.resource_id)
self.assertEqual([mock.ANY, mock.ANY],
payload.metadata.get('security_group_rule_ids'))
def test_security_group_rule_precommit_create_event_fail(self):
registry.subscribe(fake_callback, resources.SECURITY_GROUP_RULE,
events.PRECOMMIT_CREATE)
sg_dict = self.mixin.create_security_group(self.ctx, FAKE_SECGROUP)
fake_rule = FAKE_SECGROUP_RULE
fake_rule['security_group_rule']['security_group_id'] = sg_dict['id']
with mock.patch.object(sqlalchemy.orm.session.SessionTransaction,
'rollback') as mock_rollback,\
mock.patch.object(self.mixin, '_get_security_group'):
self.assertRaises(securitygroup.SecurityGroupConflict,
self.mixin.create_security_group_rule,
self.ctx, fake_rule)
self.assertTrue(mock_rollback.called)
def test_security_group_rule_precommit_delete_event_fail(self):
registry.subscribe(fake_callback, resources.SECURITY_GROUP_RULE,
events.PRECOMMIT_DELETE)
sg_dict = self.mixin.create_security_group(self.ctx, FAKE_SECGROUP)
fake_rule = FAKE_SECGROUP_RULE
fake_rule['security_group_rule']['security_group_id'] = sg_dict['id']
with mock.patch.object(sqlalchemy.orm.session.SessionTransaction,
'rollback') as mock_rollback,\
mock.patch.object(self.mixin, '_get_security_group'):
sg_rule_dict = self.mixin.create_security_group_rule(self.ctx,
fake_rule)
self.assertRaises(securitygroup.SecurityGroupRuleInUse,
self.mixin.delete_security_group_rule, self.ctx,
sg_rule_dict['id'])
self.assertTrue(mock_rollback.called)
def test_security_group_rule_precommit_create_event(self):
sg_dict = self.mixin.create_security_group(self.ctx, FAKE_SECGROUP)
fake_rule = FAKE_SECGROUP_RULE
fake_rule['security_group_rule']['security_group_id'] = sg_dict['id']
with mock.patch.object(registry, "publish") as mock_publish, \
mock.patch.object(self.mixin, '_get_security_group'):
sg_rule = self.mixin.create_security_group_rule(self.ctx,
fake_rule)
mock_publish.assert_has_calls([mock.call(
'security_group_rule',
'precommit_create', mock.ANY, payload=mock.ANY)])
payload = mock_publish.mock_calls[1][2]['payload']
self.assertEqual(sg_rule['id'], payload.resource_id)
self.assertEqual(sg_rule, payload.latest_state)
def test_sg_rule_before_precommit_and_after_delete_event(self):
sg_dict = self.mixin.create_security_group(self.ctx, FAKE_SECGROUP)
fake_rule = FAKE_SECGROUP_RULE
fake_rule['security_group_rule']['security_group_id'] = sg_dict['id']
with mock.patch.object(registry, "publish") as mock_publish, \
mock.patch.object(self.mixin, '_get_security_group'):
sg_rule_dict = self.mixin.create_security_group_rule(self.ctx,
fake_rule)
self.mixin.delete_security_group_rule(self.ctx,
sg_rule_dict['id'])
mock_publish.assert_has_calls([mock.call('security_group_rule',
'before_delete',
mock.ANY,
payload=mock.ANY)])
mock_publish.assert_has_calls([mock.call('security_group_rule',
'precommit_delete',
mock.ANY,
payload=mock.ANY)])
mock_publish.assert_has_calls([mock.call('security_group_rule',
'after_delete',
mock.ANY,
payload=mock.ANY)])
payload = mock_publish.mock_calls[1][2]['payload']
self.assertEqual(mock.ANY, payload.context)
self.assertEqual(sg_rule_dict, payload.latest_state)
self.assertEqual(sg_rule_dict['id'], payload.resource_id)
payload = mock_publish.mock_calls[2][2]['payload']
self.assertEqual(mock.ANY, payload.context)
self.assertEqual(sg_rule_dict, payload.latest_state)
self.assertEqual(sg_rule_dict['id'], payload.resource_id)
payload = mock_publish.mock_calls[3][2]['payload']
self.assertEqual(mock.ANY, payload.context)
self.assertEqual(sg_rule_dict['id'], payload.resource_id)
def test_get_ip_proto_name_and_num(self):
protocols = [constants.PROTO_NAME_UDP, str(constants.PROTO_NUM_TCP),
'blah', '111']
protocol_names_nums = (
[[constants.PROTO_NAME_UDP, str(constants.PROTO_NUM_UDP)],
[constants.PROTO_NAME_TCP, str(constants.PROTO_NUM_TCP)],
['blah', 'blah'], ['111', '111']])
for i, protocol in enumerate(protocols):
self.assertEqual(protocol_names_nums[i],
self.mixin._get_ip_proto_name_and_num(protocol))
def test__validate_port_range_for_icmp_exception(self):
states = [(1, 256, securitygroup.SecurityGroupInvalidIcmpValue),
(None, 6, securitygroup.SecurityGroupMissingIcmpType),
(300, 1, securitygroup.SecurityGroupInvalidIcmpValue)]
for protocol in (constants.PROTO_NAME_ICMP,
constants.PROTO_NAME_IPV6_ICMP,
constants.PROTO_NAME_IPV6_ICMP_LEGACY):
for pmin, pmax, exception in states:
self.assertRaises(exception,
self.mixin._validate_port_range,
{'port_range_min': pmin,
'port_range_max': pmax,
'protocol': protocol})
def test__validate_port_range_exception(self):
self.assertRaises(securitygroup.SecurityGroupInvalidPortValue,
self.mixin._validate_port_range,
{'port_range_min': 0,
'port_range_max': None,
'protocol': constants.PROTO_NAME_TCP})
self.assertRaises(securitygroup.SecurityGroupInvalidPortRange,
self.mixin._validate_port_range,
{'port_range_min': 1,
'port_range_max': None,
'protocol': constants.PROTO_NAME_SCTP})
self.assertRaises(securitygroup.SecurityGroupInvalidPortRange,
self.mixin._validate_port_range,
{'port_range_min': 1000,
'port_range_max': 1,
'protocol': constants.PROTO_NAME_UDPLITE})
self.assertRaises(
securitygroup.SecurityGroupInvalidProtocolForPort,
self.mixin._validate_port_range,
{'port_range_min': 100,
'port_range_max': 200,
'protocol': '111'})
self.assertRaises(
securitygroup.SecurityGroupInvalidProtocolForPort,
self.mixin._validate_port_range,
{'port_range_min': 100,
'port_range_max': None,
'protocol': constants.PROTO_NAME_VRRP})
self.assertRaises(
securitygroup.SecurityGroupInvalidProtocolForPort,
self.mixin._validate_port_range,
{'port_range_min': None,
'port_range_max': 200,
'protocol': constants.PROTO_NAME_VRRP})
def _create_environment(self):
self.sg = copy.deepcopy(FAKE_SECGROUP)
self.user_ctx = context.Context(user_id='user1', tenant_id='tenant_1',
is_admin=False, overwrite=False)
self.admin_ctx = context.Context(user_id='user2', tenant_id='tenant_2',
is_admin=True, overwrite=False)
self.sg_user = self.mixin.create_security_group(
self.user_ctx, {'security_group': {'name': 'name',
'tenant_id': 'tenant_1',
'description': 'fake'}})
def test_get_security_group_rules(self):
self._create_environment()
rules_before = self.mixin.get_security_group_rules(self.user_ctx)
rule = copy.deepcopy(FAKE_SECGROUP_RULE)
rule['security_group_rule']['security_group_id'] = self.sg_user['id']
rule['security_group_rule']['tenant_id'] = 'tenant_2'
self.mixin.create_security_group_rule(self.admin_ctx, rule)
rules_after = self.mixin.get_security_group_rules(self.user_ctx)
self.assertEqual(len(rules_before) + 1, len(rules_after))
for rule in (rule for rule in rules_after if rule not in rules_before):
self.assertEqual('tenant_2', rule['tenant_id'])
def test_get_security_group_rules_filters_passed(self):
self._create_environment()
filters = {'security_group_id': self.sg_user['id']}
rules_before = self.mixin.get_security_group_rules(self.user_ctx,
filters=filters)
default_sg = self.mixin.get_security_groups(
self.user_ctx, filters={'name': 'default'})[0]
rule = copy.deepcopy(FAKE_SECGROUP_RULE)
rule['security_group_rule']['security_group_id'] = default_sg['id']
rule['security_group_rule']['tenant_id'] = 'tenant_1'
self.mixin.create_security_group_rule(self.user_ctx, rule)
rules_after = self.mixin.get_security_group_rules(self.user_ctx,
filters=filters)
self.assertEqual(rules_before, rules_after)
def test_get_security_group_rules_admin_context(self):
self._create_environment()
rules_before = self.mixin.get_security_group_rules(self.ctx)
rule = copy.deepcopy(FAKE_SECGROUP_RULE)
rule['security_group_rule']['security_group_id'] = self.sg_user['id']
rule['security_group_rule']['tenant_id'] = 'tenant_1'
self.mixin.create_security_group_rule(self.user_ctx, rule)
rules_after = self.mixin.get_security_group_rules(self.ctx)
self.assertEqual(len(rules_before) + 1, len(rules_after))
for rule in (rule for rule in rules_after if rule not in rules_before):
self.assertEqual('tenant_1', rule['tenant_id'])
self.assertEqual(self.sg_user['id'], rule['security_group_id'])
def test__ensure_default_security_group(self):
with mock.patch.object(
self.mixin, '_get_default_sg_id') as get_default_sg_id,\
mock.patch.object(
self.mixin, 'create_security_group') as create_sg:
get_default_sg_id.return_value = None
self.mixin._ensure_default_security_group(self.ctx, 'tenant_1')
create_sg.assert_called_once_with(
self.ctx,
{'security_group': {
'name': 'default',
'tenant_id': 'tenant_1',
'description': securitygroups_db.DEFAULT_SG_DESCRIPTION}},
default_sg=True)
get_default_sg_id.assert_called_once_with(self.ctx, 'tenant_1')
def test__ensure_default_security_group_already_exists(self):
with mock.patch.object(
self.mixin, '_get_default_sg_id') as get_default_sg_id,\
mock.patch.object(
self.mixin, 'create_security_group') as create_sg:
get_default_sg_id.return_value = 'default_sg_id'
self.mixin._ensure_default_security_group(self.ctx, 'tenant_1')
create_sg.assert_not_called()
get_default_sg_id.assert_called_once_with(self.ctx, 'tenant_1')
def test__ensure_default_security_group_created_in_parallel(self):
with mock.patch.object(
self.mixin, '_get_default_sg_id') as get_default_sg_id,\
mock.patch.object(
self.mixin, 'create_security_group') as create_sg:
get_default_sg_id.side_effect = [None, 'default_sg_id']
create_sg.side_effect = obj_exc.NeutronDbObjectDuplicateEntry(
mock.Mock(), mock.Mock())
self.mixin._ensure_default_security_group(self.ctx, 'tenant_1')
create_sg.assert_called_once_with(
self.ctx,
{'security_group': {
'name': 'default',
'tenant_id': 'tenant_1',
'description': securitygroups_db.DEFAULT_SG_DESCRIPTION}},
default_sg=True)
get_default_sg_id.assert_has_calls([
mock.call(self.ctx, 'tenant_1'),
mock.call(self.ctx, 'tenant_1')])
def test__ensure_default_security_group_when_disabled(self):
with mock.patch.object(
self.mixin, '_get_default_sg_id') as get_default_sg_id,\
mock.patch.object(
self.mixin, 'create_security_group') as create_sg:
self.is_ext_supported.return_value = False
self.mixin._ensure_default_security_group(self.ctx, 'tenant_1')
create_sg.assert_not_called()
get_default_sg_id.assert_not_called()
| apache-2.0 |
sarmentoAJ/Refugee-Data | models/convert_obj_three.py | 9 | 48316 | """Convert Wavefront OBJ / MTL files into Three.js (JSON model version, to be used with web worker based ascii / binary loader)
-------------------------
How to use this converter
-------------------------
python convert_obj_three.py -i infile.obj -o outfile.js [-m "morphfiles*.obj"] [-c "morphcolors*.obj"] [-a center|centerxz|top|bottom|none] [-s smooth|flat] [-t ascii|binary] [-d invert|normal] [-b] [-e]
Notes:
- flags
-i infile.obj input OBJ file
-o outfile.js output JS file
-m "morphfiles*.obj" morph OBJ files (can use wildcards, enclosed in quotes multiple patterns separate by space)
-c "morphcolors*.obj" morph colors OBJ files (can use wildcards, enclosed in quotes multiple patterns separate by space)
-a center|centerxz|top|bottom|none model alignment
-s smooth|flat smooth = export vertex normals, flat = no normals (face normals computed in loader)
-t ascii|binary export ascii or binary format (ascii has more features, binary just supports vertices, faces, normals, uvs and materials)
-d invert|normal invert transparency
-b bake material colors into face colors
-e export edges
-x 10.0 scale and truncate
- by default:
use smooth shading (if there were vertex normals in the original model)
will be in ASCII format
original model is assumed to use non-inverted transparency / dissolve (0.0 fully transparent, 1.0 fully opaque)
no face colors baking
no edges export
- binary conversion will create two files:
outfile.js (materials)
outfile.bin (binary buffers)
--------------------------------------------------
How to use generated JS file in your HTML document
--------------------------------------------------
<script type="text/javascript" src="Three.js"></script>
...
<script type="text/javascript">
...
// load ascii model
var jsonLoader = new THREE.JSONLoader();
jsonLoader.load( { model: "Model_ascii.js", callback: function( geometry ) { createScene( geometry) } } );
// load binary model
var binLoader = new THREE.BinaryLoader();
binLoader.load( { model: "Model_bin.js", callback: function( geometry ) { createScene( geometry) } } );
function createScene( geometry ) {
var mesh = new THREE.Mesh( geometry, new THREE.MeshFaceMaterial() );
}
...
</script>
-------------------------------------
Parsers based on formats descriptions
-------------------------------------
http://en.wikipedia.org/wiki/Obj
http://en.wikipedia.org/wiki/Material_Template_Library
-------------------
Current limitations
-------------------
- for the moment, only diffuse color and texture are used
(will need to extend shaders / renderers / materials in Three)
- texture coordinates can be wrong in canvas renderer
(there is crude normalization, but it doesn't
work for all cases)
- smoothing can be turned on/off only for the whole mesh
----------------------------------------------
How to get proper OBJ + MTL files with Blender
----------------------------------------------
0. Remove default cube (press DEL and ENTER)
1. Import / create model
2. Select all meshes (Select -> Select All by Type -> Mesh)
3. Export to OBJ (File -> Export -> Wavefront .obj) [*]
- enable following options in exporter
Material Groups
Rotate X90
Apply Modifiers
High Quality Normals
Copy Images
Selection Only
Objects as OBJ Objects
UVs
Normals
Materials
Edges
- select empty folder
- give your exported file name with "obj" extension
- click on "Export OBJ" button
4. Your model is now all files in this folder (OBJ, MTL, number of images)
- this converter assumes all files staying in the same folder,
(OBJ / MTL files use relative paths)
- for WebGL, textures must be power of 2 sized
[*] If OBJ export fails (Blender 2.54 beta), patch your Blender installation
following instructions here:
http://www.blendernation.com/2010/09/12/blender-2-54-beta-released/
------
Author
------
AlteredQualia http://alteredqualia.com
"""
import fileinput
import operator
import random
import os.path
import getopt
import sys
import struct
import math
import glob
# #####################################################
# Configuration
# #####################################################
ALIGN = "none" # center centerxz bottom top none
SHADING = "smooth" # smooth flat
TYPE = "ascii" # ascii binary
TRANSPARENCY = "normal" # normal invert
TRUNCATE = False
SCALE = 1.0
BAKE_COLORS = False
EXPORT_EDGES = False
# default colors for debugging (each material gets one distinct color):
# white, red, green, blue, yellow, cyan, magenta
COLORS = [0xeeeeee, 0xee0000, 0x00ee00, 0x0000ee, 0xeeee00, 0x00eeee, 0xee00ee]
# #####################################################
# Templates
# #####################################################
TEMPLATE_FILE_ASCII = u"""\
// Converted from: %(fname)s
// vertices: %(nvertex)d
// faces: %(nface)d
// normals: %(nnormal)d
// colors: %(ncolor)d
// uvs: %(nuv)d
// materials: %(nmaterial)d
// edges: %(nedge)d
//
// Generated with OBJ -> Three.js converter
// http://github.com/alteredq/three.js/blob/master/utils/exporters/convert_obj_three.py
var model = {
"version" : 2,
"scale" : %(scale)f,
"materials": [%(materials)s],
"vertices": [%(vertices)s],
"morphTargets": [%(morphTargets)s],
"morphColors": [%(morphColors)s],
"normals": [%(normals)s],
"colors": [%(colors)s],
"uvs": [[%(uvs)s]],
"faces": [%(faces)s],
"edges" : [%(edges)s]
};
postMessage( model );
close();
"""
TEMPLATE_FILE_BIN = u"""\
// Converted from: %(fname)s
// vertices: %(nvertex)d
// faces: %(nface)d
// materials: %(nmaterial)d
//
// Generated with OBJ -> Three.js converter
// http://github.com/alteredq/three.js/blob/master/utils/exporters/convert_obj_three.py
var model = {
"version" : 1,
"materials": [%(materials)s],
"buffers": "%(buffers)s"
};
postMessage( model );
close();
"""
TEMPLATE_VERTEX = "%f,%f,%f"
TEMPLATE_VERTEX_TRUNCATE = "%d,%d,%d"
TEMPLATE_N = "%.5g,%.5g,%.5g"
TEMPLATE_UV = "%.5g,%.5g"
TEMPLATE_COLOR = "%.3g,%.3g,%.3g"
TEMPLATE_COLOR_DEC = "%d"
TEMPLATE_EDGE = "%d,%d"
TEMPLATE_MORPH_VERTICES = '\t{ "name": "%s", "vertices": [%s] }'
TEMPLATE_MORPH_COLORS = '\t{ "name": "%s", "colors": [%s] }'
# #####################################################
# Utils
# #####################################################
def file_exists(filename):
"""Return true if file exists and is accessible for reading.
Should be safer than just testing for existence due to links and
permissions magic on Unix filesystems.
@rtype: boolean
"""
try:
f = open(filename, 'r')
f.close()
return True
except IOError:
return False
def get_name(fname):
"""Create model name based of filename ("path/fname.js" -> "fname").
"""
return os.path.splitext(os.path.basename(fname))[0]
def bbox(vertices):
"""Compute bounding box of vertex array.
"""
if len(vertices)>0:
minx = maxx = vertices[0][0]
miny = maxy = vertices[0][1]
minz = maxz = vertices[0][2]
for v in vertices[1:]:
if v[0]<minx:
minx = v[0]
elif v[0]>maxx:
maxx = v[0]
if v[1]<miny:
miny = v[1]
elif v[1]>maxy:
maxy = v[1]
if v[2]<minz:
minz = v[2]
elif v[2]>maxz:
maxz = v[2]
return { 'x':[minx,maxx], 'y':[miny,maxy], 'z':[minz,maxz] }
else:
return { 'x':[0,0], 'y':[0,0], 'z':[0,0] }
def translate(vertices, t):
"""Translate array of vertices by vector t.
"""
for i in xrange(len(vertices)):
vertices[i][0] += t[0]
vertices[i][1] += t[1]
vertices[i][2] += t[2]
def center(vertices):
"""Center model (middle of bounding box).
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][0] + (bb['y'][1] - bb['y'][0])/2.0
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def top(vertices):
"""Align top of the model with the floor (Y-axis) and center it around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][1]
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def bottom(vertices):
"""Align bottom of the model with the floor (Y-axis) and center it around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][0]
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def centerxz(vertices):
"""Center model around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = 0
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def normalize(v):
"""Normalize 3d vector"""
l = math.sqrt(v[0]*v[0] + v[1]*v[1] + v[2]*v[2])
if l:
v[0] /= l
v[1] /= l
v[2] /= l
def veckey3(v):
return round(v[0], 6), round(v[1], 6), round(v[2], 6)
# #####################################################
# MTL parser
# #####################################################
def texture_relative_path(fullpath):
texture_file = os.path.basename(fullpath)
return texture_file
def parse_mtl(fname):
"""Parse MTL file.
"""
materials = {}
for line in fileinput.input(fname):
chunks = line.split()
if len(chunks) > 0:
# Material start
# newmtl identifier
if chunks[0] == "newmtl" and len(chunks) == 2:
identifier = chunks[1]
if not identifier in materials:
materials[identifier] = {}
# Diffuse color
# Kd 1.000 1.000 1.000
if chunks[0] == "Kd" and len(chunks) == 4:
materials[identifier]["colorDiffuse"] = [float(chunks[1]), float(chunks[2]), float(chunks[3])]
# Ambient color
# Ka 1.000 1.000 1.000
if chunks[0] == "Ka" and len(chunks) == 4:
materials[identifier]["colorAmbient"] = [float(chunks[1]), float(chunks[2]), float(chunks[3])]
# Specular color
# Ks 1.000 1.000 1.000
if chunks[0] == "Ks" and len(chunks) == 4:
materials[identifier]["colorSpecular"] = [float(chunks[1]), float(chunks[2]), float(chunks[3])]
# Specular coefficient
# Ns 154.000
if chunks[0] == "Ns" and len(chunks) == 2:
materials[identifier]["specularCoef"] = float(chunks[1])
# Transparency
# Tr 0.9 or d 0.9
if (chunks[0] == "Tr" or chunks[0] == "d") and len(chunks) == 2:
if TRANSPARENCY == "invert":
materials[identifier]["transparency"] = 1.0 - float(chunks[1])
else:
materials[identifier]["transparency"] = float(chunks[1])
# Optical density
# Ni 1.0
if chunks[0] == "Ni" and len(chunks) == 2:
materials[identifier]["opticalDensity"] = float(chunks[1])
# Diffuse texture
# map_Kd texture_diffuse.jpg
if chunks[0] == "map_Kd" and len(chunks) == 2:
materials[identifier]["mapDiffuse"] = texture_relative_path(chunks[1])
# Ambient texture
# map_Ka texture_ambient.jpg
if chunks[0] == "map_Ka" and len(chunks) == 2:
materials[identifier]["mapAmbient"] = texture_relative_path(chunks[1])
# Specular texture
# map_Ks texture_specular.jpg
if chunks[0] == "map_Ks" and len(chunks) == 2:
materials[identifier]["mapSpecular"] = texture_relative_path(chunks[1])
# Alpha texture
# map_d texture_alpha.png
if chunks[0] == "map_d" and len(chunks) == 2:
materials[identifier]["mapAlpha"] = texture_relative_path(chunks[1])
# Bump texture
# map_bump texture_bump.jpg or bump texture_bump.jpg
if (chunks[0] == "map_bump" or chunks[0] == "bump") and len(chunks) == 2:
materials[identifier]["mapBump"] = texture_relative_path(chunks[1])
# Illumination
# illum 2
#
# 0. Color on and Ambient off
# 1. Color on and Ambient on
# 2. Highlight on
# 3. Reflection on and Ray trace on
# 4. Transparency: Glass on, Reflection: Ray trace on
# 5. Reflection: Fresnel on and Ray trace on
# 6. Transparency: Refraction on, Reflection: Fresnel off and Ray trace on
# 7. Transparency: Refraction on, Reflection: Fresnel on and Ray trace on
# 8. Reflection on and Ray trace off
# 9. Transparency: Glass on, Reflection: Ray trace off
# 10. Casts shadows onto invisible surfaces
if chunks[0] == "illum" and len(chunks) == 2:
materials[identifier]["illumination"] = int(chunks[1])
return materials
# #####################################################
# OBJ parser
# #####################################################
def parse_vertex(text):
"""Parse text chunk specifying single vertex.
Possible formats:
vertex index
vertex index / texture index
vertex index / texture index / normal index
vertex index / / normal index
"""
v = 0
t = 0
n = 0
chunks = text.split("/")
v = int(chunks[0])
if len(chunks) > 1:
if chunks[1]:
t = int(chunks[1])
if len(chunks) > 2:
if chunks[2]:
n = int(chunks[2])
return { 'v':v, 't':t, 'n':n }
def parse_obj(fname):
"""Parse OBJ file.
"""
vertices = []
normals = []
uvs = []
faces = []
materials = {}
mcounter = 0
mcurrent = 0
mtllib = ""
# current face state
group = 0
object = 0
smooth = 0
for line in fileinput.input(fname):
chunks = line.split()
if len(chunks) > 0:
# Vertices as (x,y,z) coordinates
# v 0.123 0.234 0.345
if chunks[0] == "v" and len(chunks) == 4:
x = float(chunks[1])
y = float(chunks[2])
z = float(chunks[3])
vertices.append([x,y,z])
# Normals in (x,y,z) form; normals might not be unit
# vn 0.707 0.000 0.707
if chunks[0] == "vn" and len(chunks) == 4:
x = float(chunks[1])
y = float(chunks[2])
z = float(chunks[3])
normals.append([x,y,z])
# Texture coordinates in (u,v[,w]) coordinates, w is optional
# vt 0.500 -1.352 [0.234]
if chunks[0] == "vt" and len(chunks) >= 3:
u = float(chunks[1])
v = float(chunks[2])
w = 0
if len(chunks)>3:
w = float(chunks[3])
uvs.append([u,v,w])
# Face
if chunks[0] == "f" and len(chunks) >= 4:
vertex_index = []
uv_index = []
normal_index = []
for v in chunks[1:]:
vertex = parse_vertex(v)
if vertex['v']:
vertex_index.append(vertex['v'])
if vertex['t']:
uv_index.append(vertex['t'])
if vertex['n']:
normal_index.append(vertex['n'])
faces.append({
'vertex':vertex_index,
'uv':uv_index,
'normal':normal_index,
'material':mcurrent,
'group':group,
'object':object,
'smooth':smooth,
})
# Group
if chunks[0] == "g" and len(chunks) == 2:
group = chunks[1]
# Object
if chunks[0] == "o" and len(chunks) == 2:
object = chunks[1]
# Materials definition
if chunks[0] == "mtllib" and len(chunks) == 2:
mtllib = chunks[1]
# Material
if chunks[0] == "usemtl" and len(chunks) == 2:
material = chunks[1]
if not material in materials:
mcurrent = mcounter
materials[material] = mcounter
mcounter += 1
else:
mcurrent = materials[material]
# Smooth shading
if chunks[0] == "s" and len(chunks) == 2:
smooth = chunks[1]
return faces, vertices, uvs, normals, materials, mtllib
# #####################################################
# Generator - faces
# #####################################################
def setBit(value, position, on):
if on:
mask = 1 << position
return (value | mask)
else:
mask = ~(1 << position)
return (value & mask)
def generate_face(f, fc):
isTriangle = ( len(f['vertex']) == 3 )
if isTriangle:
nVertices = 3
else:
nVertices = 4
hasMaterial = True # for the moment OBJs without materials get default material
hasFaceUvs = False # not supported in OBJ
hasFaceVertexUvs = ( len(f['uv']) >= nVertices )
hasFaceNormals = False # don't export any face normals (as they are computed in engine)
hasFaceVertexNormals = ( len(f["normal"]) >= nVertices and SHADING == "smooth" )
hasFaceColors = BAKE_COLORS
hasFaceVertexColors = False # not supported in OBJ
faceType = 0
faceType = setBit(faceType, 0, not isTriangle)
faceType = setBit(faceType, 1, hasMaterial)
faceType = setBit(faceType, 2, hasFaceUvs)
faceType = setBit(faceType, 3, hasFaceVertexUvs)
faceType = setBit(faceType, 4, hasFaceNormals)
faceType = setBit(faceType, 5, hasFaceVertexNormals)
faceType = setBit(faceType, 6, hasFaceColors)
faceType = setBit(faceType, 7, hasFaceVertexColors)
faceData = []
# order is important, must match order in JSONLoader
# face type
# vertex indices
# material index
# face uvs index
# face vertex uvs indices
# face normal index
# face vertex normals indices
# face color index
# face vertex colors indices
faceData.append(faceType)
# must clamp in case on polygons bigger than quads
for i in xrange(nVertices):
index = f['vertex'][i] - 1
faceData.append(index)
faceData.append( f['material'] )
if hasFaceVertexUvs:
for i in xrange(nVertices):
index = f['uv'][i] - 1
faceData.append(index)
if hasFaceVertexNormals:
for i in xrange(nVertices):
index = f['normal'][i] - 1
faceData.append(index)
if hasFaceColors:
index = fc['material']
faceData.append(index)
return ",".join( map(str, faceData) )
# #####################################################
# Generator - chunks
# #####################################################
def hexcolor(c):
return ( int(c[0] * 255) << 16 ) + ( int(c[1] * 255) << 8 ) + int(c[2] * 255)
def generate_vertex(v, option_vertices_truncate, scale):
if not option_vertices_truncate:
return TEMPLATE_VERTEX % (v[0], v[1], v[2])
else:
return TEMPLATE_VERTEX_TRUNCATE % (scale * v[0], scale * v[1], scale * v[2])
def generate_normal(n):
return TEMPLATE_N % (n[0], n[1], n[2])
def generate_uv(uv):
return TEMPLATE_UV % (uv[0], 1.0 - uv[1])
def generate_color_rgb(c):
return TEMPLATE_COLOR % (c[0], c[1], c[2])
def generate_color_decimal(c):
return TEMPLATE_COLOR_DEC % hexcolor(c)
def generate_edge(e):
return TEMPLATE_EDGE % (e[0], e[1])
# #####################################################
# Morphs
# #####################################################
def generate_morph_vertex(name, vertices):
vertex_string = ",".join(generate_vertex(v, TRUNCATE, SCALE) for v in vertices)
return TEMPLATE_MORPH_VERTICES % (name, vertex_string)
def generate_morph_color(name, colors):
color_string = ",".join(generate_color_rgb(c) for c in colors)
return TEMPLATE_MORPH_COLORS % (name, color_string)
def extract_material_colors(materials, mtlfilename, basename):
"""Extract diffuse colors from MTL materials
"""
if not materials:
materials = { 'default': 0 }
mtl = create_materials(materials, mtlfilename, basename)
mtlColorArraySrt = []
for m in mtl:
if m in materials:
index = materials[m]
color = mtl[m].get("colorDiffuse", [1,0,0])
mtlColorArraySrt.append([index, color])
mtlColorArraySrt.sort()
mtlColorArray = [x[1] for x in mtlColorArraySrt]
return mtlColorArray
def extract_face_colors(faces, material_colors):
"""Extract colors from materials and assign them to faces
"""
faceColors = []
for face in faces:
material_index = face['material']
faceColors.append(material_colors[material_index])
return faceColors
def generate_morph_targets(morphfiles, n_vertices, infile):
skipOriginalMorph = False
norminfile = os.path.normpath(infile)
morphVertexData = []
for mfilepattern in morphfiles.split():
matches = glob.glob(mfilepattern)
matches.sort()
for path in matches:
normpath = os.path.normpath(path)
if normpath != norminfile or not skipOriginalMorph:
name = os.path.basename(normpath)
morphFaces, morphVertices, morphUvs, morphNormals, morphMaterials, morphMtllib = parse_obj(normpath)
n_morph_vertices = len(morphVertices)
if n_vertices != n_morph_vertices:
print "WARNING: skipping morph [%s] with different number of vertices [%d] than the original model [%d]" % (name, n_morph_vertices, n_vertices)
else:
if ALIGN == "center":
center(morphVertices)
elif ALIGN == "centerxz":
centerxz(morphVertices)
elif ALIGN == "bottom":
bottom(morphVertices)
elif ALIGN == "top":
top(morphVertices)
morphVertexData.append((get_name(name), morphVertices))
print "adding [%s] with %d vertices" % (name, n_morph_vertices)
morphTargets = ""
if len(morphVertexData):
morphTargets = "\n%s\n\t" % ",\n".join(generate_morph_vertex(name, vertices) for name, vertices in morphVertexData)
return morphTargets
def generate_morph_colors(colorfiles, n_vertices, n_faces):
morphColorData = []
colorFaces = []
materialColors = []
for mfilepattern in colorfiles.split():
matches = glob.glob(mfilepattern)
matches.sort()
for path in matches:
normpath = os.path.normpath(path)
name = os.path.basename(normpath)
morphFaces, morphVertices, morphUvs, morphNormals, morphMaterials, morphMtllib = parse_obj(normpath)
n_morph_vertices = len(morphVertices)
n_morph_faces = len(morphFaces)
if n_vertices != n_morph_vertices:
print "WARNING: skipping morph color map [%s] with different number of vertices [%d] than the original model [%d]" % (name, n_morph_vertices, n_vertices)
elif n_faces != n_morph_faces:
print "WARNING: skipping morph color map [%s] with different number of faces [%d] than the original model [%d]" % (name, n_morph_faces, n_faces)
else:
morphMaterialColors = extract_material_colors(morphMaterials, morphMtllib, normpath)
morphFaceColors = extract_face_colors(morphFaces, morphMaterialColors)
morphColorData.append((get_name(name), morphFaceColors))
# take first color map for baking into face colors
if len(colorFaces) == 0:
colorFaces = morphFaces
materialColors = morphMaterialColors
print "adding [%s] with %d face colors" % (name, len(morphFaceColors))
morphColors = ""
if len(morphColorData):
morphColors = "\n%s\n\t" % ",\n".join(generate_morph_color(name, colors) for name, colors in morphColorData)
return morphColors, colorFaces, materialColors
# #####################################################
# Edges
# #####################################################
def edge_hash(a, b):
return "%d_%d" % (min(a, b), max(a, b))
def add_unique_edge(a, b, edge_set, edges):
h = edge_hash(a[0], b[0])
if h not in edge_set:
x = min(a[1], b[1])
y = max(a[1], b[1])
edges.append([x, y])
edge_set.add(h)
def compute_edges(faces, vertices):
edges = []
# compute unique vertices
unique_vertices = {}
vertex_count = 0
for i, v in enumerate(vertices):
key = veckey3(v)
if key not in unique_vertices:
unique_vertices[key] = [vertex_count, i]
vertex_count += 1
# find edges between unique vertices
edge_set = set()
for f in faces:
vertex_indices = f["vertex"]
unique_indices = []
for vi in vertex_indices:
v = vertices[vi - 1]
key = veckey3(v)
unique_indices.append(unique_vertices[key])
if len(unique_indices) == 3:
a = unique_indices[0]
b = unique_indices[1]
c = unique_indices[2]
add_unique_edge(a, b, edge_set, edges)
add_unique_edge(b, c, edge_set, edges)
add_unique_edge(a, c, edge_set, edges)
elif len(unique_indices) == 4:
a = unique_indices[0]
b = unique_indices[1]
c = unique_indices[2]
d = unique_indices[3]
# this should be inside edge of quad, should it go in?
# add_unique_edge(b, d, edge_set, edges)
add_unique_edge(a, b, edge_set, edges)
add_unique_edge(a, d, edge_set, edges)
add_unique_edge(b, c, edge_set, edges)
add_unique_edge(c, d, edge_set, edges)
edges.sort()
return edges
# #####################################################
# Materials
# #####################################################
def generate_color(i):
"""Generate hex color corresponding to integer.
Colors should have well defined ordering.
First N colors are hardcoded, then colors are random
(must seed random number generator with deterministic value
before getting colors).
"""
if i < len(COLORS):
#return "0x%06x" % COLORS[i]
return COLORS[i]
else:
#return "0x%06x" % int(0xffffff * random.random())
return int(0xffffff * random.random())
def value2string(v):
if type(v)==str and v[0:2] != "0x":
return '"%s"' % v
elif type(v) == bool:
return str(v).lower()
return str(v)
def generate_materials(mtl, materials):
"""Generate JS array of materials objects
JS material objects are basically prettified one-to-one
mappings of MTL properties in JSON format.
"""
mtl_array = []
for m in mtl:
if m in materials:
index = materials[m]
# add debug information
# materials should be sorted according to how
# they appeared in OBJ file (for the first time)
# this index is identifier used in face definitions
mtl[m]['DbgName'] = m
mtl[m]['DbgIndex'] = index
mtl[m]['DbgColor'] = generate_color(index)
if BAKE_COLORS:
mtl[m]['vertexColors'] = "face"
mtl_raw = ",\n".join(['\t"%s" : %s' % (n, value2string(v)) for n,v in sorted(mtl[m].items())])
mtl_string = "\t{\n%s\n\t}" % mtl_raw
mtl_array.append([index, mtl_string])
return ",\n\n".join([m for i,m in sorted(mtl_array)])
def generate_mtl(materials):
"""Generate dummy materials (if there is no MTL file).
"""
mtl = {}
for m in materials:
index = materials[m]
mtl[m] = {
'DbgName': m,
'DbgIndex': index,
'DbgColor': generate_color(index)
}
return mtl
def generate_materials_string(materials, mtlfilename, basename):
"""Generate final materials string.
"""
if not materials:
materials = { 'default': 0 }
mtl = create_materials(materials, mtlfilename, basename)
return generate_materials(mtl, materials)
def create_materials(materials, mtlfilename, basename):
"""Parse MTL file and create mapping between its materials and OBJ materials.
Eventual edge cases are handled here (missing materials, missing MTL file).
"""
random.seed(42) # to get well defined color order for debug colors
# default materials with debug colors for when
# there is no specified MTL / MTL loading failed,
# or if there were no materials / null materials
mtl = generate_mtl(materials)
if mtlfilename:
# create full pathname for MTL (included from OBJ)
path = os.path.dirname(basename)
fname = os.path.join(path, mtlfilename)
if file_exists(fname):
# override default materials with real ones from MTL
# (where they exist, otherwise keep defaults)
mtl.update(parse_mtl(fname))
else:
print "Couldn't find [%s]" % fname
return mtl
# #####################################################
# Faces
# #####################################################
def is_triangle_flat(f):
return len(f['vertex'])==3 and not (f["normal"] and SHADING == "smooth") and not f['uv']
def is_triangle_flat_uv(f):
return len(f['vertex'])==3 and not (f["normal"] and SHADING == "smooth") and len(f['uv'])==3
def is_triangle_smooth(f):
return len(f['vertex'])==3 and f["normal"] and SHADING == "smooth" and not f['uv']
def is_triangle_smooth_uv(f):
return len(f['vertex'])==3 and f["normal"] and SHADING == "smooth" and len(f['uv'])==3
def is_quad_flat(f):
return len(f['vertex'])==4 and not (f["normal"] and SHADING == "smooth") and not f['uv']
def is_quad_flat_uv(f):
return len(f['vertex'])==4 and not (f["normal"] and SHADING == "smooth") and len(f['uv'])==4
def is_quad_smooth(f):
return len(f['vertex'])==4 and f["normal"] and SHADING == "smooth" and not f['uv']
def is_quad_smooth_uv(f):
return len(f['vertex'])==4 and f["normal"] and SHADING == "smooth" and len(f['uv'])==4
def sort_faces(faces):
data = {
'triangles_flat': [],
'triangles_flat_uv': [],
'triangles_smooth': [],
'triangles_smooth_uv': [],
'quads_flat': [],
'quads_flat_uv': [],
'quads_smooth': [],
'quads_smooth_uv': []
}
for f in faces:
if is_triangle_flat(f):
data['triangles_flat'].append(f)
elif is_triangle_flat_uv(f):
data['triangles_flat_uv'].append(f)
elif is_triangle_smooth(f):
data['triangles_smooth'].append(f)
elif is_triangle_smooth_uv(f):
data['triangles_smooth_uv'].append(f)
elif is_quad_flat(f):
data['quads_flat'].append(f)
elif is_quad_flat_uv(f):
data['quads_flat_uv'].append(f)
elif is_quad_smooth(f):
data['quads_smooth'].append(f)
elif is_quad_smooth_uv(f):
data['quads_smooth_uv'].append(f)
return data
# #####################################################
# API - ASCII converter
# #####################################################
def convert_ascii(infile, morphfiles, colorfiles, outfile):
"""Convert infile.obj to outfile.js
Here is where everything happens. If you need to automate conversions,
just import this file as Python module and call this method.
"""
if not file_exists(infile):
print "Couldn't find [%s]" % infile
return
# parse OBJ / MTL files
faces, vertices, uvs, normals, materials, mtllib = parse_obj(infile)
n_vertices = len(vertices)
n_faces = len(faces)
# align model
if ALIGN == "center":
center(vertices)
elif ALIGN == "centerxz":
centerxz(vertices)
elif ALIGN == "bottom":
bottom(vertices)
elif ALIGN == "top":
top(vertices)
# generate normals string
nnormal = 0
normals_string = ""
if SHADING == "smooth":
normals_string = ",".join(generate_normal(n) for n in normals)
nnormal = len(normals)
# extract morph vertices
morphTargets = generate_morph_targets(morphfiles, n_vertices, infile)
# extract morph colors
morphColors, colorFaces, materialColors = generate_morph_colors(colorfiles, n_vertices, n_faces)
# generate colors string
ncolor = 0
colors_string = ""
if len(colorFaces) < len(faces):
colorFaces = faces
materialColors = extract_material_colors(materials, mtllib, infile)
if BAKE_COLORS:
colors_string = ",".join(generate_color_decimal(c) for c in materialColors)
ncolor = len(materialColors)
# generate edges string
nedge = 0
edges_string = ""
if EXPORT_EDGES:
edges = compute_edges(faces, vertices)
nedge = len(edges)
edges_string = ",".join(generate_edge(e) for e in edges)
# generate ascii model string
text = TEMPLATE_FILE_ASCII % {
"name" : get_name(outfile),
"fname" : infile,
"nvertex" : len(vertices),
"nface" : len(faces),
"nuv" : len(uvs),
"nnormal" : nnormal,
"ncolor" : ncolor,
"nmaterial" : len(materials),
"nedge" : nedge,
"materials" : generate_materials_string(materials, mtllib, infile),
"normals" : normals_string,
"colors" : colors_string,
"uvs" : ",".join(generate_uv(uv) for uv in uvs),
"vertices" : ",".join(generate_vertex(v, TRUNCATE, SCALE) for v in vertices),
"morphTargets" : morphTargets,
"morphColors" : morphColors,
"faces" : ",".join(generate_face(f, fc) for f, fc in zip(faces, colorFaces)),
"edges" : edges_string,
"scale" : SCALE
}
out = open(outfile, "w")
out.write(text)
out.close()
print "%d vertices, %d faces, %d materials" % (len(vertices), len(faces), len(materials))
# #############################################################################
# API - Binary converter
# #############################################################################
def convert_binary(infile, outfile):
"""Convert infile.obj to outfile.js + outfile.bin
"""
if not file_exists(infile):
print "Couldn't find [%s]" % infile
return
binfile = get_name(outfile) + ".bin"
faces, vertices, uvs, normals, materials, mtllib = parse_obj(infile)
if ALIGN == "center":
center(vertices)
elif ALIGN == "centerxz":
centerxz(vertices)
elif ALIGN == "bottom":
bottom(vertices)
elif ALIGN == "top":
top(vertices)
sfaces = sort_faces(faces)
# ###################
# generate JS file
# ###################
text = TEMPLATE_FILE_BIN % {
"name" : get_name(outfile),
"materials" : generate_materials_string(materials, mtllib, infile),
"buffers" : binfile,
"fname" : infile,
"nvertex" : len(vertices),
"nface" : len(faces),
"nmaterial" : len(materials)
}
out = open(outfile, "w")
out.write(text)
out.close()
# ###################
# generate BIN file
# ###################
if SHADING == "smooth":
nnormals = len(normals)
else:
nnormals = 0
buffer = []
# header
# ------
header_bytes = struct.calcsize('<8s')
header_bytes += struct.calcsize('<BBBBBBBB')
header_bytes += struct.calcsize('<IIIIIIIIIII')
# signature
signature = struct.pack('<8s', 'Three.js')
# metadata (all data is little-endian)
vertex_coordinate_bytes = 4
normal_coordinate_bytes = 1
uv_coordinate_bytes = 4
vertex_index_bytes = 4
normal_index_bytes = 4
uv_index_bytes = 4
material_index_bytes = 2
# header_bytes unsigned char 1
# vertex_coordinate_bytes unsigned char 1
# normal_coordinate_bytes unsigned char 1
# uv_coordinate_bytes unsigned char 1
# vertex_index_bytes unsigned char 1
# normal_index_bytes unsigned char 1
# uv_index_bytes unsigned char 1
# material_index_bytes unsigned char 1
bdata = struct.pack('<BBBBBBBB', header_bytes,
vertex_coordinate_bytes,
normal_coordinate_bytes,
uv_coordinate_bytes,
vertex_index_bytes,
normal_index_bytes,
uv_index_bytes,
material_index_bytes)
# nvertices unsigned int 4
# nnormals unsigned int 4
# nuvs unsigned int 4
# ntri_flat unsigned int 4
# ntri_smooth unsigned int 4
# ntri_flat_uv unsigned int 4
# ntri_smooth_uv unsigned int 4
# nquad_flat unsigned int 4
# nquad_smooth unsigned int 4
# nquad_flat_uv unsigned int 4
# nquad_smooth_uv unsigned int 4
ndata = struct.pack('<IIIIIIIIIII', len(vertices),
nnormals,
len(uvs),
len(sfaces['triangles_flat']),
len(sfaces['triangles_smooth']),
len(sfaces['triangles_flat_uv']),
len(sfaces['triangles_smooth_uv']),
len(sfaces['quads_flat']),
len(sfaces['quads_smooth']),
len(sfaces['quads_flat_uv']),
len(sfaces['quads_smooth_uv']))
buffer.append(signature)
buffer.append(bdata)
buffer.append(ndata)
# 1. vertices
# ------------
# x float 4
# y float 4
# z float 4
for v in vertices:
data = struct.pack('<fff', v[0], v[1], v[2])
buffer.append(data)
# 2. normals
# ---------------
# x signed char 1
# y signed char 1
# z signed char 1
if SHADING == "smooth":
for n in normals:
normalize(n)
data = struct.pack('<bbb', math.floor(n[0]*127+0.5),
math.floor(n[1]*127+0.5),
math.floor(n[2]*127+0.5))
buffer.append(data)
# 3. uvs
# -----------
# u float 4
# v float 4
for uv in uvs:
data = struct.pack('<ff', uv[0], 1.0-uv[1])
buffer.append(data)
# 4. flat triangles
# ------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# m unsigned short 2
for f in sfaces['triangles_flat']:
vi = f['vertex']
data = struct.pack('<IIIH',
vi[0]-1, vi[1]-1, vi[2]-1,
f['material'])
buffer.append(data)
# 5. smooth triangles
# -------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# m unsigned short 2
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
for f in sfaces['triangles_smooth']:
vi = f['vertex']
ni = f['normal']
data = struct.pack('<IIIHIII',
vi[0]-1, vi[1]-1, vi[2]-1,
f['material'],
ni[0]-1, ni[1]-1, ni[2]-1)
buffer.append(data)
# 6. flat triangles uv
# --------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# m unsigned short 2
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
for f in sfaces['triangles_flat_uv']:
vi = f['vertex']
ui = f['uv']
data = struct.pack('<IIIHIII',
vi[0]-1, vi[1]-1, vi[2]-1,
f['material'],
ui[0]-1, ui[1]-1, ui[2]-1)
buffer.append(data)
# 7. smooth triangles uv
# ----------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# m unsigned short 2
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
for f in sfaces['triangles_smooth_uv']:
vi = f['vertex']
ni = f['normal']
ui = f['uv']
data = struct.pack('<IIIHIIIIII',
vi[0]-1, vi[1]-1, vi[2]-1,
f['material'],
ni[0]-1, ni[1]-1, ni[2]-1,
ui[0]-1, ui[1]-1, ui[2]-1)
buffer.append(data)
# 8. flat quads
# ------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# m unsigned short 2
for f in sfaces['quads_flat']:
vi = f['vertex']
data = struct.pack('<IIIIH',
vi[0]-1, vi[1]-1, vi[2]-1, vi[3]-1,
f['material'])
buffer.append(data)
# 9. smooth quads
# -------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# m unsigned short 2
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# nd unsigned int 4
for f in sfaces['quads_smooth']:
vi = f['vertex']
ni = f['normal']
data = struct.pack('<IIIIHIIII',
vi[0]-1, vi[1]-1, vi[2]-1, vi[3]-1,
f['material'],
ni[0]-1, ni[1]-1, ni[2]-1, ni[3]-1)
buffer.append(data)
# 10. flat quads uv
# ------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# m unsigned short 2
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# ud unsigned int 4
for f in sfaces['quads_flat_uv']:
vi = f['vertex']
ui = f['uv']
data = struct.pack('<IIIIHIIII',
vi[0]-1, vi[1]-1, vi[2]-1, vi[3]-1,
f['material'],
ui[0]-1, ui[1]-1, ui[2]-1, ui[3]-1)
buffer.append(data)
# 11. smooth quads uv
# -------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# m unsigned short 2
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# nd unsigned int 4
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# ud unsigned int 4
for f in sfaces['quads_smooth_uv']:
vi = f['vertex']
ni = f['normal']
ui = f['uv']
data = struct.pack('<IIIIHIIIIIIII',
vi[0]-1, vi[1]-1, vi[2]-1, vi[3]-1,
f['material'],
ni[0]-1, ni[1]-1, ni[2]-1, ni[3]-1,
ui[0]-1, ui[1]-1, ui[2]-1, ui[3]-1)
buffer.append(data)
path = os.path.dirname(outfile)
fname = os.path.join(path, binfile)
out = open(fname, "wb")
out.write("".join(buffer))
out.close()
# #############################################################################
# Helpers
# #############################################################################
def usage():
print "Usage: %s -i filename.obj -o filename.js [-m morphfiles*.obj] [-c morphcolors*.obj] [-a center|top|bottom] [-s flat|smooth] [-t binary|ascii] [-d invert|normal]" % os.path.basename(sys.argv[0])
# #####################################################
# Main
# #####################################################
if __name__ == "__main__":
# get parameters from the command line
try:
opts, args = getopt.getopt(sys.argv[1:], "hbei:m:c:b:o:a:s:t:d:x:", ["help", "bakecolors", "edges", "input=", "morphs=", "colors=", "output=", "align=", "shading=", "type=", "dissolve=", "truncatescale="])
except getopt.GetoptError:
usage()
sys.exit(2)
infile = outfile = ""
morphfiles = ""
colorfiles = ""
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-i", "--input"):
infile = a
elif o in ("-m", "--morphs"):
morphfiles = a
elif o in ("-c", "--colors"):
colorfiles = a
elif o in ("-o", "--output"):
outfile = a
elif o in ("-a", "--align"):
if a in ("top", "bottom", "center", "centerxz", "none"):
ALIGN = a
elif o in ("-s", "--shading"):
if a in ("flat", "smooth"):
SHADING = a
elif o in ("-t", "--type"):
if a in ("binary", "ascii"):
TYPE = a
elif o in ("-d", "--dissolve"):
if a in ("normal", "invert"):
TRANSPARENCY = a
elif o in ("-b", "--bakecolors"):
BAKE_COLORS = True
elif o in ("-e", "--edges"):
EXPORT_EDGES = True
elif o in ("-x", "--truncatescale"):
TRUNCATE = True
SCALE = float(a)
if infile == "" or outfile == "":
usage()
sys.exit(2)
print "Converting [%s] into [%s] ..." % (infile, outfile)
if morphfiles:
print "Morphs [%s]" % morphfiles
if colorfiles:
print "Colors [%s]" % colorfiles
if TYPE == "ascii":
convert_ascii(infile, morphfiles, colorfiles, outfile)
elif TYPE == "binary":
convert_binary(infile, outfile)
| gpl-3.0 |
overtherain/scriptfile | software/googleAppEngine/lib/django_1_3/django/contrib/gis/geos/prototypes/misc.py | 334 | 1438 | """
This module is for the miscellaneous GEOS routines, particularly the
ones that return the area, distance, and length.
"""
from ctypes import c_int, c_double, POINTER
from django.contrib.gis.geos.libgeos import GEOM_PTR, GEOS_PREPARE
from django.contrib.gis.geos.prototypes.errcheck import check_dbl, check_string
from django.contrib.gis.geos.prototypes.geom import geos_char_p
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
__all__ = ['geos_area', 'geos_distance', 'geos_length']
### ctypes generator function ###
def dbl_from_geom(func, num_geom=1):
"""
Argument is a Geometry, return type is double that is passed
in by reference as the last argument.
"""
argtypes = [GEOM_PTR for i in xrange(num_geom)]
argtypes += [POINTER(c_double)]
func.argtypes = argtypes
func.restype = c_int # Status code returned
func.errcheck = check_dbl
return func
### ctypes prototypes ###
# Area, distance, and length prototypes.
geos_area = dbl_from_geom(GEOSFunc('GEOSArea'))
geos_distance = dbl_from_geom(GEOSFunc('GEOSDistance'), num_geom=2)
geos_length = dbl_from_geom(GEOSFunc('GEOSLength'))
# Validity reason; only in GEOS 3.1+
if GEOS_PREPARE:
geos_isvalidreason = GEOSFunc('GEOSisValidReason')
geos_isvalidreason.argtypes = [GEOM_PTR]
geos_isvalidreason.restype = geos_char_p
geos_isvalidreason.errcheck = check_string
__all__.append('geos_isvalidreason')
| mit |
ryfeus/lambda-packs | Selenium_PhantomJS/source/botocore/vendored/requests/packages/chardet/jisfreq.py | 3131 | 47315 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
#
# Japanese frequency table, applied to both S-JIS and EUC-JP
# They are sorted in order.
# 128 --> 0.77094
# 256 --> 0.85710
# 512 --> 0.92635
# 1024 --> 0.97130
# 2048 --> 0.99431
#
# Ideal Distribution Ratio = 0.92635 / (1-0.92635) = 12.58
# Random Distribution Ration = 512 / (2965+62+83+86-512) = 0.191
#
# Typical Distribution Ratio, 25% of IDR
JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0
# Char to FreqOrder table ,
JIS_TABLE_SIZE = 4368
JISCharToFreqOrder = (
40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16
3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32
1262,5072, 619, 127,3431,3512,3230,1899,1700, 232, 228,1294,1298, 284, 283,2041, # 48
2042,1061,1062, 48, 49, 44, 45, 433, 434,1040,1041, 996, 787,2997,1255,4305, # 64
2108,4609,1684,1648,5073,5074,5075,5076,5077,5078,3687,5079,4610,5080,3927,3928, # 80
5081,3296,3432, 290,2285,1471,2187,5082,2580,2825,1303,2140,1739,1445,2691,3375, # 96
1691,3297,4306,4307,4611, 452,3376,1182,2713,3688,3069,4308,5083,5084,5085,5086, # 112
5087,5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102, # 128
5103,5104,5105,5106,5107,5108,5109,5110,5111,5112,4097,5113,5114,5115,5116,5117, # 144
5118,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,5130,5131,5132,5133, # 160
5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,5149, # 176
5150,5151,5152,4612,5153,5154,5155,5156,5157,5158,5159,5160,5161,5162,5163,5164, # 192
5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,1472, 598, 618, 820,1205, # 208
1309,1412,1858,1307,1692,5176,5177,5178,5179,5180,5181,5182,1142,1452,1234,1172, # 224
1875,2043,2149,1793,1382,2973, 925,2404,1067,1241, 960,1377,2935,1491, 919,1217, # 240
1865,2030,1406,1499,2749,4098,5183,5184,5185,5186,5187,5188,2561,4099,3117,1804, # 256
2049,3689,4309,3513,1663,5189,3166,3118,3298,1587,1561,3433,5190,3119,1625,2998, # 272
3299,4613,1766,3690,2786,4614,5191,5192,5193,5194,2161, 26,3377, 2,3929, 20, # 288
3691, 47,4100, 50, 17, 16, 35, 268, 27, 243, 42, 155, 24, 154, 29, 184, # 304
4, 91, 14, 92, 53, 396, 33, 289, 9, 37, 64, 620, 21, 39, 321, 5, # 320
12, 11, 52, 13, 3, 208, 138, 0, 7, 60, 526, 141, 151,1069, 181, 275, # 336
1591, 83, 132,1475, 126, 331, 829, 15, 69, 160, 59, 22, 157, 55,1079, 312, # 352
109, 38, 23, 25, 10, 19, 79,5195, 61, 382,1124, 8, 30,5196,5197,5198, # 368
5199,5200,5201,5202,5203,5204,5205,5206, 89, 62, 74, 34,2416, 112, 139, 196, # 384
271, 149, 84, 607, 131, 765, 46, 88, 153, 683, 76, 874, 101, 258, 57, 80, # 400
32, 364, 121,1508, 169,1547, 68, 235, 145,2999, 41, 360,3027, 70, 63, 31, # 416
43, 259, 262,1383, 99, 533, 194, 66, 93, 846, 217, 192, 56, 106, 58, 565, # 432
280, 272, 311, 256, 146, 82, 308, 71, 100, 128, 214, 655, 110, 261, 104,1140, # 448
54, 51, 36, 87, 67,3070, 185,2618,2936,2020, 28,1066,2390,2059,5207,5208, # 464
5209,5210,5211,5212,5213,5214,5215,5216,4615,5217,5218,5219,5220,5221,5222,5223, # 480
5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,5235,5236,3514,5237,5238, # 496
5239,5240,5241,5242,5243,5244,2297,2031,4616,4310,3692,5245,3071,5246,3598,5247, # 512
4617,3231,3515,5248,4101,4311,4618,3808,4312,4102,5249,4103,4104,3599,5250,5251, # 528
5252,5253,5254,5255,5256,5257,5258,5259,5260,5261,5262,5263,5264,5265,5266,5267, # 544
5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,5279,5280,5281,5282,5283, # 560
5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,5294,5295,5296,5297,5298,5299, # 576
5300,5301,5302,5303,5304,5305,5306,5307,5308,5309,5310,5311,5312,5313,5314,5315, # 592
5316,5317,5318,5319,5320,5321,5322,5323,5324,5325,5326,5327,5328,5329,5330,5331, # 608
5332,5333,5334,5335,5336,5337,5338,5339,5340,5341,5342,5343,5344,5345,5346,5347, # 624
5348,5349,5350,5351,5352,5353,5354,5355,5356,5357,5358,5359,5360,5361,5362,5363, # 640
5364,5365,5366,5367,5368,5369,5370,5371,5372,5373,5374,5375,5376,5377,5378,5379, # 656
5380,5381, 363, 642,2787,2878,2788,2789,2316,3232,2317,3434,2011, 165,1942,3930, # 672
3931,3932,3933,5382,4619,5383,4620,5384,5385,5386,5387,5388,5389,5390,5391,5392, # 688
5393,5394,5395,5396,5397,5398,5399,5400,5401,5402,5403,5404,5405,5406,5407,5408, # 704
5409,5410,5411,5412,5413,5414,5415,5416,5417,5418,5419,5420,5421,5422,5423,5424, # 720
5425,5426,5427,5428,5429,5430,5431,5432,5433,5434,5435,5436,5437,5438,5439,5440, # 736
5441,5442,5443,5444,5445,5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456, # 752
5457,5458,5459,5460,5461,5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472, # 768
5473,5474,5475,5476,5477,5478,5479,5480,5481,5482,5483,5484,5485,5486,5487,5488, # 784
5489,5490,5491,5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504, # 800
5505,5506,5507,5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520, # 816
5521,5522,5523,5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536, # 832
5537,5538,5539,5540,5541,5542,5543,5544,5545,5546,5547,5548,5549,5550,5551,5552, # 848
5553,5554,5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568, # 864
5569,5570,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584, # 880
5585,5586,5587,5588,5589,5590,5591,5592,5593,5594,5595,5596,5597,5598,5599,5600, # 896
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,5615,5616, # 912
5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,5632, # 928
5633,5634,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,5647,5648, # 944
5649,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,5661,5662,5663,5664, # 960
5665,5666,5667,5668,5669,5670,5671,5672,5673,5674,5675,5676,5677,5678,5679,5680, # 976
5681,5682,5683,5684,5685,5686,5687,5688,5689,5690,5691,5692,5693,5694,5695,5696, # 992
5697,5698,5699,5700,5701,5702,5703,5704,5705,5706,5707,5708,5709,5710,5711,5712, # 1008
5713,5714,5715,5716,5717,5718,5719,5720,5721,5722,5723,5724,5725,5726,5727,5728, # 1024
5729,5730,5731,5732,5733,5734,5735,5736,5737,5738,5739,5740,5741,5742,5743,5744, # 1040
5745,5746,5747,5748,5749,5750,5751,5752,5753,5754,5755,5756,5757,5758,5759,5760, # 1056
5761,5762,5763,5764,5765,5766,5767,5768,5769,5770,5771,5772,5773,5774,5775,5776, # 1072
5777,5778,5779,5780,5781,5782,5783,5784,5785,5786,5787,5788,5789,5790,5791,5792, # 1088
5793,5794,5795,5796,5797,5798,5799,5800,5801,5802,5803,5804,5805,5806,5807,5808, # 1104
5809,5810,5811,5812,5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824, # 1120
5825,5826,5827,5828,5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840, # 1136
5841,5842,5843,5844,5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856, # 1152
5857,5858,5859,5860,5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872, # 1168
5873,5874,5875,5876,5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888, # 1184
5889,5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904, # 1200
5905,5906,5907,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920, # 1216
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936, # 1232
5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952, # 1248
5953,5954,5955,5956,5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968, # 1264
5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984, # 1280
5985,5986,5987,5988,5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000, # 1296
6001,6002,6003,6004,6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016, # 1312
6017,6018,6019,6020,6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032, # 1328
6033,6034,6035,6036,6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048, # 1344
6049,6050,6051,6052,6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064, # 1360
6065,6066,6067,6068,6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080, # 1376
6081,6082,6083,6084,6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096, # 1392
6097,6098,6099,6100,6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112, # 1408
6113,6114,2044,2060,4621, 997,1235, 473,1186,4622, 920,3378,6115,6116, 379,1108, # 1424
4313,2657,2735,3934,6117,3809, 636,3233, 573,1026,3693,3435,2974,3300,2298,4105, # 1440
854,2937,2463, 393,2581,2417, 539, 752,1280,2750,2480, 140,1161, 440, 708,1569, # 1456
665,2497,1746,1291,1523,3000, 164,1603, 847,1331, 537,1997, 486, 508,1693,2418, # 1472
1970,2227, 878,1220, 299,1030, 969, 652,2751, 624,1137,3301,2619, 65,3302,2045, # 1488
1761,1859,3120,1930,3694,3516, 663,1767, 852, 835,3695, 269, 767,2826,2339,1305, # 1504
896,1150, 770,1616,6118, 506,1502,2075,1012,2519, 775,2520,2975,2340,2938,4314, # 1520
3028,2086,1224,1943,2286,6119,3072,4315,2240,1273,1987,3935,1557, 175, 597, 985, # 1536
3517,2419,2521,1416,3029, 585, 938,1931,1007,1052,1932,1685,6120,3379,4316,4623, # 1552
804, 599,3121,1333,2128,2539,1159,1554,2032,3810, 687,2033,2904, 952, 675,1467, # 1568
3436,6121,2241,1096,1786,2440,1543,1924, 980,1813,2228, 781,2692,1879, 728,1918, # 1584
3696,4624, 548,1950,4625,1809,1088,1356,3303,2522,1944, 502, 972, 373, 513,2827, # 1600
586,2377,2391,1003,1976,1631,6122,2464,1084, 648,1776,4626,2141, 324, 962,2012, # 1616
2177,2076,1384, 742,2178,1448,1173,1810, 222, 102, 301, 445, 125,2420, 662,2498, # 1632
277, 200,1476,1165,1068, 224,2562,1378,1446, 450,1880, 659, 791, 582,4627,2939, # 1648
3936,1516,1274, 555,2099,3697,1020,1389,1526,3380,1762,1723,1787,2229, 412,2114, # 1664
1900,2392,3518, 512,2597, 427,1925,2341,3122,1653,1686,2465,2499, 697, 330, 273, # 1680
380,2162, 951, 832, 780, 991,1301,3073, 965,2270,3519, 668,2523,2636,1286, 535, # 1696
1407, 518, 671, 957,2658,2378, 267, 611,2197,3030,6123, 248,2299, 967,1799,2356, # 1712
850,1418,3437,1876,1256,1480,2828,1718,6124,6125,1755,1664,2405,6126,4628,2879, # 1728
2829, 499,2179, 676,4629, 557,2329,2214,2090, 325,3234, 464, 811,3001, 992,2342, # 1744
2481,1232,1469, 303,2242, 466,1070,2163, 603,1777,2091,4630,2752,4631,2714, 322, # 1760
2659,1964,1768, 481,2188,1463,2330,2857,3600,2092,3031,2421,4632,2318,2070,1849, # 1776
2598,4633,1302,2254,1668,1701,2422,3811,2905,3032,3123,2046,4106,1763,1694,4634, # 1792
1604, 943,1724,1454, 917, 868,2215,1169,2940, 552,1145,1800,1228,1823,1955, 316, # 1808
1080,2510, 361,1807,2830,4107,2660,3381,1346,1423,1134,4108,6127, 541,1263,1229, # 1824
1148,2540, 545, 465,1833,2880,3438,1901,3074,2482, 816,3937, 713,1788,2500, 122, # 1840
1575, 195,1451,2501,1111,6128, 859, 374,1225,2243,2483,4317, 390,1033,3439,3075, # 1856
2524,1687, 266, 793,1440,2599, 946, 779, 802, 507, 897,1081, 528,2189,1292, 711, # 1872
1866,1725,1167,1640, 753, 398,2661,1053, 246, 348,4318, 137,1024,3440,1600,2077, # 1888
2129, 825,4319, 698, 238, 521, 187,2300,1157,2423,1641,1605,1464,1610,1097,2541, # 1904
1260,1436, 759,2255,1814,2150, 705,3235, 409,2563,3304, 561,3033,2005,2564, 726, # 1920
1956,2343,3698,4109, 949,3812,3813,3520,1669, 653,1379,2525, 881,2198, 632,2256, # 1936
1027, 778,1074, 733,1957, 514,1481,2466, 554,2180, 702,3938,1606,1017,1398,6129, # 1952
1380,3521, 921, 993,1313, 594, 449,1489,1617,1166, 768,1426,1360, 495,1794,3601, # 1968
1177,3602,1170,4320,2344, 476, 425,3167,4635,3168,1424, 401,2662,1171,3382,1998, # 1984
1089,4110, 477,3169, 474,6130,1909, 596,2831,1842, 494, 693,1051,1028,1207,3076, # 2000
606,2115, 727,2790,1473,1115, 743,3522, 630, 805,1532,4321,2021, 366,1057, 838, # 2016
684,1114,2142,4322,2050,1492,1892,1808,2271,3814,2424,1971,1447,1373,3305,1090, # 2032
1536,3939,3523,3306,1455,2199, 336, 369,2331,1035, 584,2393, 902, 718,2600,6131, # 2048
2753, 463,2151,1149,1611,2467, 715,1308,3124,1268, 343,1413,3236,1517,1347,2663, # 2064
2093,3940,2022,1131,1553,2100,2941,1427,3441,2942,1323,2484,6132,1980, 872,2368, # 2080
2441,2943, 320,2369,2116,1082, 679,1933,3941,2791,3815, 625,1143,2023, 422,2200, # 2096
3816,6133, 730,1695, 356,2257,1626,2301,2858,2637,1627,1778, 937, 883,2906,2693, # 2112
3002,1769,1086, 400,1063,1325,3307,2792,4111,3077, 456,2345,1046, 747,6134,1524, # 2128
884,1094,3383,1474,2164,1059, 974,1688,2181,2258,1047, 345,1665,1187, 358, 875, # 2144
3170, 305, 660,3524,2190,1334,1135,3171,1540,1649,2542,1527, 927, 968,2793, 885, # 2160
1972,1850, 482, 500,2638,1218,1109,1085,2543,1654,2034, 876, 78,2287,1482,1277, # 2176
861,1675,1083,1779, 724,2754, 454, 397,1132,1612,2332, 893, 672,1237, 257,2259, # 2192
2370, 135,3384, 337,2244, 547, 352, 340, 709,2485,1400, 788,1138,2511, 540, 772, # 2208
1682,2260,2272,2544,2013,1843,1902,4636,1999,1562,2288,4637,2201,1403,1533, 407, # 2224
576,3308,1254,2071, 978,3385, 170, 136,1201,3125,2664,3172,2394, 213, 912, 873, # 2240
3603,1713,2202, 699,3604,3699, 813,3442, 493, 531,1054, 468,2907,1483, 304, 281, # 2256
4112,1726,1252,2094, 339,2319,2130,2639, 756,1563,2944, 748, 571,2976,1588,2425, # 2272
2715,1851,1460,2426,1528,1392,1973,3237, 288,3309, 685,3386, 296, 892,2716,2216, # 2288
1570,2245, 722,1747,2217, 905,3238,1103,6135,1893,1441,1965, 251,1805,2371,3700, # 2304
2601,1919,1078, 75,2182,1509,1592,1270,2640,4638,2152,6136,3310,3817, 524, 706, # 2320
1075, 292,3818,1756,2602, 317, 98,3173,3605,3525,1844,2218,3819,2502, 814, 567, # 2336
385,2908,1534,6137, 534,1642,3239, 797,6138,1670,1529, 953,4323, 188,1071, 538, # 2352
178, 729,3240,2109,1226,1374,2000,2357,2977, 731,2468,1116,2014,2051,6139,1261, # 2368
1593, 803,2859,2736,3443, 556, 682, 823,1541,6140,1369,2289,1706,2794, 845, 462, # 2384
2603,2665,1361, 387, 162,2358,1740, 739,1770,1720,1304,1401,3241,1049, 627,1571, # 2400
2427,3526,1877,3942,1852,1500, 431,1910,1503, 677, 297,2795, 286,1433,1038,1198, # 2416
2290,1133,1596,4113,4639,2469,1510,1484,3943,6141,2442, 108, 712,4640,2372, 866, # 2432
3701,2755,3242,1348, 834,1945,1408,3527,2395,3243,1811, 824, 994,1179,2110,1548, # 2448
1453, 790,3003, 690,4324,4325,2832,2909,3820,1860,3821, 225,1748, 310, 346,1780, # 2464
2470, 821,1993,2717,2796, 828, 877,3528,2860,2471,1702,2165,2910,2486,1789, 453, # 2480
359,2291,1676, 73,1164,1461,1127,3311, 421, 604, 314,1037, 589, 116,2487, 737, # 2496
837,1180, 111, 244, 735,6142,2261,1861,1362, 986, 523, 418, 581,2666,3822, 103, # 2512
855, 503,1414,1867,2488,1091, 657,1597, 979, 605,1316,4641,1021,2443,2078,2001, # 2528
1209, 96, 587,2166,1032, 260,1072,2153, 173, 94, 226,3244, 819,2006,4642,4114, # 2544
2203, 231,1744, 782, 97,2667, 786,3387, 887, 391, 442,2219,4326,1425,6143,2694, # 2560
633,1544,1202, 483,2015, 592,2052,1958,2472,1655, 419, 129,4327,3444,3312,1714, # 2576
1257,3078,4328,1518,1098, 865,1310,1019,1885,1512,1734, 469,2444, 148, 773, 436, # 2592
1815,1868,1128,1055,4329,1245,2756,3445,2154,1934,1039,4643, 579,1238, 932,2320, # 2608
353, 205, 801, 115,2428, 944,2321,1881, 399,2565,1211, 678, 766,3944, 335,2101, # 2624
1459,1781,1402,3945,2737,2131,1010, 844, 981,1326,1013, 550,1816,1545,2620,1335, # 2640
1008, 371,2881, 936,1419,1613,3529,1456,1395,2273,1834,2604,1317,2738,2503, 416, # 2656
1643,4330, 806,1126, 229, 591,3946,1314,1981,1576,1837,1666, 347,1790, 977,3313, # 2672
764,2861,1853, 688,2429,1920,1462, 77, 595, 415,2002,3034, 798,1192,4115,6144, # 2688
2978,4331,3035,2695,2582,2072,2566, 430,2430,1727, 842,1396,3947,3702, 613, 377, # 2704
278, 236,1417,3388,3314,3174, 757,1869, 107,3530,6145,1194, 623,2262, 207,1253, # 2720
2167,3446,3948, 492,1117,1935, 536,1838,2757,1246,4332, 696,2095,2406,1393,1572, # 2736
3175,1782, 583, 190, 253,1390,2230, 830,3126,3389, 934,3245,1703,1749,2979,1870, # 2752
2545,1656,2204, 869,2346,4116,3176,1817, 496,1764,4644, 942,1504, 404,1903,1122, # 2768
1580,3606,2945,1022, 515, 372,1735, 955,2431,3036,6146,2797,1110,2302,2798, 617, # 2784
6147, 441, 762,1771,3447,3607,3608,1904, 840,3037, 86, 939,1385, 572,1370,2445, # 2800
1336, 114,3703, 898, 294, 203,3315, 703,1583,2274, 429, 961,4333,1854,1951,3390, # 2816
2373,3704,4334,1318,1381, 966,1911,2322,1006,1155, 309, 989, 458,2718,1795,1372, # 2832
1203, 252,1689,1363,3177, 517,1936, 168,1490, 562, 193,3823,1042,4117,1835, 551, # 2848
470,4645, 395, 489,3448,1871,1465,2583,2641, 417,1493, 279,1295, 511,1236,1119, # 2864
72,1231,1982,1812,3004, 871,1564, 984,3449,1667,2696,2096,4646,2347,2833,1673, # 2880
3609, 695,3246,2668, 807,1183,4647, 890, 388,2333,1801,1457,2911,1765,1477,1031, # 2896
3316,3317,1278,3391,2799,2292,2526, 163,3450,4335,2669,1404,1802,6148,2323,2407, # 2912
1584,1728,1494,1824,1269, 298, 909,3318,1034,1632, 375, 776,1683,2061, 291, 210, # 2928
1123, 809,1249,1002,2642,3038, 206,1011,2132, 144, 975, 882,1565, 342, 667, 754, # 2944
1442,2143,1299,2303,2062, 447, 626,2205,1221,2739,2912,1144,1214,2206,2584, 760, # 2960
1715, 614, 950,1281,2670,2621, 810, 577,1287,2546,4648, 242,2168, 250,2643, 691, # 2976
123,2644, 647, 313,1029, 689,1357,2946,1650, 216, 771,1339,1306, 808,2063, 549, # 2992
913,1371,2913,2914,6149,1466,1092,1174,1196,1311,2605,2396,1783,1796,3079, 406, # 3008
2671,2117,3949,4649, 487,1825,2220,6150,2915, 448,2348,1073,6151,2397,1707, 130, # 3024
900,1598, 329, 176,1959,2527,1620,6152,2275,4336,3319,1983,2191,3705,3610,2155, # 3040
3706,1912,1513,1614,6153,1988, 646, 392,2304,1589,3320,3039,1826,1239,1352,1340, # 3056
2916, 505,2567,1709,1437,2408,2547, 906,6154,2672, 384,1458,1594,1100,1329, 710, # 3072
423,3531,2064,2231,2622,1989,2673,1087,1882, 333, 841,3005,1296,2882,2379, 580, # 3088
1937,1827,1293,2585, 601, 574, 249,1772,4118,2079,1120, 645, 901,1176,1690, 795, # 3104
2207, 478,1434, 516,1190,1530, 761,2080, 930,1264, 355, 435,1552, 644,1791, 987, # 3120
220,1364,1163,1121,1538, 306,2169,1327,1222, 546,2645, 218, 241, 610,1704,3321, # 3136
1984,1839,1966,2528, 451,6155,2586,3707,2568, 907,3178, 254,2947, 186,1845,4650, # 3152
745, 432,1757, 428,1633, 888,2246,2221,2489,3611,2118,1258,1265, 956,3127,1784, # 3168
4337,2490, 319, 510, 119, 457,3612, 274,2035,2007,4651,1409,3128, 970,2758, 590, # 3184
2800, 661,2247,4652,2008,3950,1420,1549,3080,3322,3951,1651,1375,2111, 485,2491, # 3200
1429,1156,6156,2548,2183,1495, 831,1840,2529,2446, 501,1657, 307,1894,3247,1341, # 3216
666, 899,2156,1539,2549,1559, 886, 349,2208,3081,2305,1736,3824,2170,2759,1014, # 3232
1913,1386, 542,1397,2948, 490, 368, 716, 362, 159, 282,2569,1129,1658,1288,1750, # 3248
2674, 276, 649,2016, 751,1496, 658,1818,1284,1862,2209,2087,2512,3451, 622,2834, # 3264
376, 117,1060,2053,1208,1721,1101,1443, 247,1250,3179,1792,3952,2760,2398,3953, # 3280
6157,2144,3708, 446,2432,1151,2570,3452,2447,2761,2835,1210,2448,3082, 424,2222, # 3296
1251,2449,2119,2836, 504,1581,4338, 602, 817, 857,3825,2349,2306, 357,3826,1470, # 3312
1883,2883, 255, 958, 929,2917,3248, 302,4653,1050,1271,1751,2307,1952,1430,2697, # 3328
2719,2359, 354,3180, 777, 158,2036,4339,1659,4340,4654,2308,2949,2248,1146,2232, # 3344
3532,2720,1696,2623,3827,6158,3129,1550,2698,1485,1297,1428, 637, 931,2721,2145, # 3360
914,2550,2587, 81,2450, 612, 827,2646,1242,4655,1118,2884, 472,1855,3181,3533, # 3376
3534, 569,1353,2699,1244,1758,2588,4119,2009,2762,2171,3709,1312,1531,6159,1152, # 3392
1938, 134,1830, 471,3710,2276,1112,1535,3323,3453,3535, 982,1337,2950, 488, 826, # 3408
674,1058,1628,4120,2017, 522,2399, 211, 568,1367,3454, 350, 293,1872,1139,3249, # 3424
1399,1946,3006,1300,2360,3324, 588, 736,6160,2606, 744, 669,3536,3828,6161,1358, # 3440
199, 723, 848, 933, 851,1939,1505,1514,1338,1618,1831,4656,1634,3613, 443,2740, # 3456
3829, 717,1947, 491,1914,6162,2551,1542,4121,1025,6163,1099,1223, 198,3040,2722, # 3472
370, 410,1905,2589, 998,1248,3182,2380, 519,1449,4122,1710, 947, 928,1153,4341, # 3488
2277, 344,2624,1511, 615, 105, 161,1212,1076,1960,3130,2054,1926,1175,1906,2473, # 3504
414,1873,2801,6164,2309, 315,1319,3325, 318,2018,2146,2157, 963, 631, 223,4342, # 3520
4343,2675, 479,3711,1197,2625,3712,2676,2361,6165,4344,4123,6166,2451,3183,1886, # 3536
2184,1674,1330,1711,1635,1506, 799, 219,3250,3083,3954,1677,3713,3326,2081,3614, # 3552
1652,2073,4657,1147,3041,1752, 643,1961, 147,1974,3955,6167,1716,2037, 918,3007, # 3568
1994, 120,1537, 118, 609,3184,4345, 740,3455,1219, 332,1615,3830,6168,1621,2980, # 3584
1582, 783, 212, 553,2350,3714,1349,2433,2082,4124, 889,6169,2310,1275,1410, 973, # 3600
166,1320,3456,1797,1215,3185,2885,1846,2590,2763,4658, 629, 822,3008, 763, 940, # 3616
1990,2862, 439,2409,1566,1240,1622, 926,1282,1907,2764, 654,2210,1607, 327,1130, # 3632
3956,1678,1623,6170,2434,2192, 686, 608,3831,3715, 903,3957,3042,6171,2741,1522, # 3648
1915,1105,1555,2552,1359, 323,3251,4346,3457, 738,1354,2553,2311,2334,1828,2003, # 3664
3832,1753,2351,1227,6172,1887,4125,1478,6173,2410,1874,1712,1847, 520,1204,2607, # 3680
264,4659, 836,2677,2102, 600,4660,3833,2278,3084,6174,4347,3615,1342, 640, 532, # 3696
543,2608,1888,2400,2591,1009,4348,1497, 341,1737,3616,2723,1394, 529,3252,1321, # 3712
983,4661,1515,2120, 971,2592, 924, 287,1662,3186,4349,2700,4350,1519, 908,1948, # 3728
2452, 156, 796,1629,1486,2223,2055, 694,4126,1259,1036,3392,1213,2249,2742,1889, # 3744
1230,3958,1015, 910, 408, 559,3617,4662, 746, 725, 935,4663,3959,3009,1289, 563, # 3760
867,4664,3960,1567,2981,2038,2626, 988,2263,2381,4351, 143,2374, 704,1895,6175, # 3776
1188,3716,2088, 673,3085,2362,4352, 484,1608,1921,2765,2918, 215, 904,3618,3537, # 3792
894, 509, 976,3043,2701,3961,4353,2837,2982, 498,6176,6177,1102,3538,1332,3393, # 3808
1487,1636,1637, 233, 245,3962, 383, 650, 995,3044, 460,1520,1206,2352, 749,3327, # 3824
530, 700, 389,1438,1560,1773,3963,2264, 719,2951,2724,3834, 870,1832,1644,1000, # 3840
839,2474,3717, 197,1630,3394, 365,2886,3964,1285,2133, 734, 922, 818,1106, 732, # 3856
480,2083,1774,3458, 923,2279,1350, 221,3086, 85,2233,2234,3835,1585,3010,2147, # 3872
1387,1705,2382,1619,2475, 133, 239,2802,1991,1016,2084,2383, 411,2838,1113, 651, # 3888
1985,1160,3328, 990,1863,3087,1048,1276,2647, 265,2627,1599,3253,2056, 150, 638, # 3904
2019, 656, 853, 326,1479, 680,1439,4354,1001,1759, 413,3459,3395,2492,1431, 459, # 3920
4355,1125,3329,2265,1953,1450,2065,2863, 849, 351,2678,3131,3254,3255,1104,1577, # 3936
227,1351,1645,2453,2193,1421,2887, 812,2121, 634, 95,2435, 201,2312,4665,1646, # 3952
1671,2743,1601,2554,2702,2648,2280,1315,1366,2089,3132,1573,3718,3965,1729,1189, # 3968
328,2679,1077,1940,1136, 558,1283, 964,1195, 621,2074,1199,1743,3460,3619,1896, # 3984
1916,1890,3836,2952,1154,2112,1064, 862, 378,3011,2066,2113,2803,1568,2839,6178, # 4000
3088,2919,1941,1660,2004,1992,2194, 142, 707,1590,1708,1624,1922,1023,1836,1233, # 4016
1004,2313, 789, 741,3620,6179,1609,2411,1200,4127,3719,3720,4666,2057,3721, 593, # 4032
2840, 367,2920,1878,6180,3461,1521, 628,1168, 692,2211,2649, 300, 720,2067,2571, # 4048
2953,3396, 959,2504,3966,3539,3462,1977, 701,6181, 954,1043, 800, 681, 183,3722, # 4064
1803,1730,3540,4128,2103, 815,2314, 174, 467, 230,2454,1093,2134, 755,3541,3397, # 4080
1141,1162,6182,1738,2039, 270,3256,2513,1005,1647,2185,3837, 858,1679,1897,1719, # 4096
2954,2324,1806, 402, 670, 167,4129,1498,2158,2104, 750,6183, 915, 189,1680,1551, # 4112
455,4356,1501,2455, 405,1095,2955, 338,1586,1266,1819, 570, 641,1324, 237,1556, # 4128
2650,1388,3723,6184,1368,2384,1343,1978,3089,2436, 879,3724, 792,1191, 758,3012, # 4144
1411,2135,1322,4357, 240,4667,1848,3725,1574,6185, 420,3045,1546,1391, 714,4358, # 4160
1967, 941,1864, 863, 664, 426, 560,1731,2680,1785,2864,1949,2363, 403,3330,1415, # 4176
1279,2136,1697,2335, 204, 721,2097,3838, 90,6186,2085,2505, 191,3967, 124,2148, # 4192
1376,1798,1178,1107,1898,1405, 860,4359,1243,1272,2375,2983,1558,2456,1638, 113, # 4208
3621, 578,1923,2609, 880, 386,4130, 784,2186,2266,1422,2956,2172,1722, 497, 263, # 4224
2514,1267,2412,2610, 177,2703,3542, 774,1927,1344, 616,1432,1595,1018, 172,4360, # 4240
2325, 911,4361, 438,1468,3622, 794,3968,2024,2173,1681,1829,2957, 945, 895,3090, # 4256
575,2212,2476, 475,2401,2681, 785,2744,1745,2293,2555,1975,3133,2865, 394,4668, # 4272
3839, 635,4131, 639, 202,1507,2195,2766,1345,1435,2572,3726,1908,1184,1181,2457, # 4288
3727,3134,4362, 843,2611, 437, 916,4669, 234, 769,1884,3046,3047,3623, 833,6187, # 4304
1639,2250,2402,1355,1185,2010,2047, 999, 525,1732,1290,1488,2612, 948,1578,3728, # 4320
2413,2477,1216,2725,2159, 334,3840,1328,3624,2921,1525,4132, 564,1056, 891,4363, # 4336
1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352
2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512
#Everything below is of no interest for detection purpose
2138,2122,3730,2888,1995,1820,1044,6190,6191,6192,6193,6194,6195,6196,6197,6198, # 4384
6199,6200,6201,6202,6203,6204,6205,4670,6206,6207,6208,6209,6210,6211,6212,6213, # 4400
6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,6224,6225,6226,6227,6228,6229, # 4416
6230,6231,6232,6233,6234,6235,6236,6237,3187,6238,6239,3969,6240,6241,6242,6243, # 4432
6244,4671,6245,6246,4672,6247,6248,4133,6249,6250,4364,6251,2923,2556,2613,4673, # 4448
4365,3970,6252,6253,6254,6255,4674,6256,6257,6258,2768,2353,4366,4675,4676,3188, # 4464
4367,3463,6259,4134,4677,4678,6260,2267,6261,3842,3332,4368,3543,6262,6263,6264, # 4480
3013,1954,1928,4135,4679,6265,6266,2478,3091,6267,4680,4369,6268,6269,1699,6270, # 4496
3544,4136,4681,6271,4137,6272,4370,2804,6273,6274,2593,3971,3972,4682,6275,2236, # 4512
4683,6276,6277,4684,6278,6279,4138,3973,4685,6280,6281,3258,6282,6283,6284,6285, # 4528
3974,4686,2841,3975,6286,6287,3545,6288,6289,4139,4687,4140,6290,4141,6291,4142, # 4544
6292,6293,3333,6294,6295,6296,4371,6297,3399,6298,6299,4372,3976,6300,6301,6302, # 4560
4373,6303,6304,3843,3731,6305,4688,4374,6306,6307,3259,2294,6308,3732,2530,4143, # 4576
6309,4689,6310,6311,6312,3048,6313,6314,4690,3733,2237,6315,6316,2282,3334,6317, # 4592
6318,3844,6319,6320,4691,6321,3400,4692,6322,4693,6323,3049,6324,4375,6325,3977, # 4608
6326,6327,6328,3546,6329,4694,3335,6330,4695,4696,6331,6332,6333,6334,4376,3978, # 4624
6335,4697,3979,4144,6336,3980,4698,6337,6338,6339,6340,6341,4699,4700,4701,6342, # 4640
6343,4702,6344,6345,4703,6346,6347,4704,6348,4705,4706,3135,6349,4707,6350,4708, # 4656
6351,4377,6352,4709,3734,4145,6353,2506,4710,3189,6354,3050,4711,3981,6355,3547, # 4672
3014,4146,4378,3735,2651,3845,3260,3136,2224,1986,6356,3401,6357,4712,2594,3627, # 4688
3137,2573,3736,3982,4713,3628,4714,4715,2682,3629,4716,6358,3630,4379,3631,6359, # 4704
6360,6361,3983,6362,6363,6364,6365,4147,3846,4717,6366,6367,3737,2842,6368,4718, # 4720
2628,6369,3261,6370,2386,6371,6372,3738,3984,4719,3464,4720,3402,6373,2924,3336, # 4736
4148,2866,6374,2805,3262,4380,2704,2069,2531,3138,2806,2984,6375,2769,6376,4721, # 4752
4722,3403,6377,6378,3548,6379,6380,2705,3092,1979,4149,2629,3337,2889,6381,3338, # 4768
4150,2557,3339,4381,6382,3190,3263,3739,6383,4151,4723,4152,2558,2574,3404,3191, # 4784
6384,6385,4153,6386,4724,4382,6387,6388,4383,6389,6390,4154,6391,4725,3985,6392, # 4800
3847,4155,6393,6394,6395,6396,6397,3465,6398,4384,6399,6400,6401,6402,6403,6404, # 4816
4156,6405,6406,6407,6408,2123,6409,6410,2326,3192,4726,6411,6412,6413,6414,4385, # 4832
4157,6415,6416,4158,6417,3093,3848,6418,3986,6419,6420,3849,6421,6422,6423,4159, # 4848
6424,6425,4160,6426,3740,6427,6428,6429,6430,3987,6431,4727,6432,2238,6433,6434, # 4864
4386,3988,6435,6436,3632,6437,6438,2843,6439,6440,6441,6442,3633,6443,2958,6444, # 4880
6445,3466,6446,2364,4387,3850,6447,4388,2959,3340,6448,3851,6449,4728,6450,6451, # 4896
3264,4729,6452,3193,6453,4389,4390,2706,3341,4730,6454,3139,6455,3194,6456,3051, # 4912
2124,3852,1602,4391,4161,3853,1158,3854,4162,3989,4392,3990,4731,4732,4393,2040, # 4928
4163,4394,3265,6457,2807,3467,3855,6458,6459,6460,3991,3468,4733,4734,6461,3140, # 4944
2960,6462,4735,6463,6464,6465,6466,4736,4737,4738,4739,6467,6468,4164,2403,3856, # 4960
6469,6470,2770,2844,6471,4740,6472,6473,6474,6475,6476,6477,6478,3195,6479,4741, # 4976
4395,6480,2867,6481,4742,2808,6482,2493,4165,6483,6484,6485,6486,2295,4743,6487, # 4992
6488,6489,3634,6490,6491,6492,6493,6494,6495,6496,2985,4744,6497,6498,4745,6499, # 5008
6500,2925,3141,4166,6501,6502,4746,6503,6504,4747,6505,6506,6507,2890,6508,6509, # 5024
6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,3469,4167,6520,6521,6522,4748, # 5040
4396,3741,4397,4749,4398,3342,2125,4750,6523,4751,4752,4753,3052,6524,2961,4168, # 5056
6525,4754,6526,4755,4399,2926,4169,6527,3857,6528,4400,4170,6529,4171,6530,6531, # 5072
2595,6532,6533,6534,6535,3635,6536,6537,6538,6539,6540,6541,6542,4756,6543,6544, # 5088
6545,6546,6547,6548,4401,6549,6550,6551,6552,4402,3405,4757,4403,6553,6554,6555, # 5104
4172,3742,6556,6557,6558,3992,3636,6559,6560,3053,2726,6561,3549,4173,3054,4404, # 5120
6562,6563,3993,4405,3266,3550,2809,4406,6564,6565,6566,4758,4759,6567,3743,6568, # 5136
4760,3744,4761,3470,6569,6570,6571,4407,6572,3745,4174,6573,4175,2810,4176,3196, # 5152
4762,6574,4177,6575,6576,2494,2891,3551,6577,6578,3471,6579,4408,6580,3015,3197, # 5168
6581,3343,2532,3994,3858,6582,3094,3406,4409,6583,2892,4178,4763,4410,3016,4411, # 5184
6584,3995,3142,3017,2683,6585,4179,6586,6587,4764,4412,6588,6589,4413,6590,2986, # 5200
6591,2962,3552,6592,2963,3472,6593,6594,4180,4765,6595,6596,2225,3267,4414,6597, # 5216
3407,3637,4766,6598,6599,3198,6600,4415,6601,3859,3199,6602,3473,4767,2811,4416, # 5232
1856,3268,3200,2575,3996,3997,3201,4417,6603,3095,2927,6604,3143,6605,2268,6606, # 5248
3998,3860,3096,2771,6607,6608,3638,2495,4768,6609,3861,6610,3269,2745,4769,4181, # 5264
3553,6611,2845,3270,6612,6613,6614,3862,6615,6616,4770,4771,6617,3474,3999,4418, # 5280
4419,6618,3639,3344,6619,4772,4182,6620,2126,6621,6622,6623,4420,4773,6624,3018, # 5296
6625,4774,3554,6626,4183,2025,3746,6627,4184,2707,6628,4421,4422,3097,1775,4185, # 5312
3555,6629,6630,2868,6631,6632,4423,6633,6634,4424,2414,2533,2928,6635,4186,2387, # 5328
6636,4775,6637,4187,6638,1891,4425,3202,3203,6639,6640,4776,6641,3345,6642,6643, # 5344
3640,6644,3475,3346,3641,4000,6645,3144,6646,3098,2812,4188,3642,3204,6647,3863, # 5360
3476,6648,3864,6649,4426,4001,6650,6651,6652,2576,6653,4189,4777,6654,6655,6656, # 5376
2846,6657,3477,3205,4002,6658,4003,6659,3347,2252,6660,6661,6662,4778,6663,6664, # 5392
6665,6666,6667,6668,6669,4779,4780,2048,6670,3478,3099,6671,3556,3747,4004,6672, # 5408
6673,6674,3145,4005,3748,6675,6676,6677,6678,6679,3408,6680,6681,6682,6683,3206, # 5424
3207,6684,6685,4781,4427,6686,4782,4783,4784,6687,6688,6689,4190,6690,6691,3479, # 5440
6692,2746,6693,4428,6694,6695,6696,6697,6698,6699,4785,6700,6701,3208,2727,6702, # 5456
3146,6703,6704,3409,2196,6705,4429,6706,6707,6708,2534,1996,6709,6710,6711,2747, # 5472
6712,6713,6714,4786,3643,6715,4430,4431,6716,3557,6717,4432,4433,6718,6719,6720, # 5488
6721,3749,6722,4006,4787,6723,6724,3644,4788,4434,6725,6726,4789,2772,6727,6728, # 5504
6729,6730,6731,2708,3865,2813,4435,6732,6733,4790,4791,3480,6734,6735,6736,6737, # 5520
4436,3348,6738,3410,4007,6739,6740,4008,6741,6742,4792,3411,4191,6743,6744,6745, # 5536
6746,6747,3866,6748,3750,6749,6750,6751,6752,6753,6754,6755,3867,6756,4009,6757, # 5552
4793,4794,6758,2814,2987,6759,6760,6761,4437,6762,6763,6764,6765,3645,6766,6767, # 5568
3481,4192,6768,3751,6769,6770,2174,6771,3868,3752,6772,6773,6774,4193,4795,4438, # 5584
3558,4796,4439,6775,4797,6776,6777,4798,6778,4799,3559,4800,6779,6780,6781,3482, # 5600
6782,2893,6783,6784,4194,4801,4010,6785,6786,4440,6787,4011,6788,6789,6790,6791, # 5616
6792,6793,4802,6794,6795,6796,4012,6797,6798,6799,6800,3349,4803,3483,6801,4804, # 5632
4195,6802,4013,6803,6804,4196,6805,4014,4015,6806,2847,3271,2848,6807,3484,6808, # 5648
6809,6810,4441,6811,4442,4197,4443,3272,4805,6812,3412,4016,1579,6813,6814,4017, # 5664
6815,3869,6816,2964,6817,4806,6818,6819,4018,3646,6820,6821,4807,4019,4020,6822, # 5680
6823,3560,6824,6825,4021,4444,6826,4198,6827,6828,4445,6829,6830,4199,4808,6831, # 5696
6832,6833,3870,3019,2458,6834,3753,3413,3350,6835,4809,3871,4810,3561,4446,6836, # 5712
6837,4447,4811,4812,6838,2459,4448,6839,4449,6840,6841,4022,3872,6842,4813,4814, # 5728
6843,6844,4815,4200,4201,4202,6845,4023,6846,6847,4450,3562,3873,6848,6849,4816, # 5744
4817,6850,4451,4818,2139,6851,3563,6852,6853,3351,6854,6855,3352,4024,2709,3414, # 5760
4203,4452,6856,4204,6857,6858,3874,3875,6859,6860,4819,6861,6862,6863,6864,4453, # 5776
3647,6865,6866,4820,6867,6868,6869,6870,4454,6871,2869,6872,6873,4821,6874,3754, # 5792
6875,4822,4205,6876,6877,6878,3648,4206,4455,6879,4823,6880,4824,3876,6881,3055, # 5808
4207,6882,3415,6883,6884,6885,4208,4209,6886,4210,3353,6887,3354,3564,3209,3485, # 5824
2652,6888,2728,6889,3210,3755,6890,4025,4456,6891,4825,6892,6893,6894,6895,4211, # 5840
6896,6897,6898,4826,6899,6900,4212,6901,4827,6902,2773,3565,6903,4828,6904,6905, # 5856
6906,6907,3649,3650,6908,2849,3566,6909,3567,3100,6910,6911,6912,6913,6914,6915, # 5872
4026,6916,3355,4829,3056,4457,3756,6917,3651,6918,4213,3652,2870,6919,4458,6920, # 5888
2438,6921,6922,3757,2774,4830,6923,3356,4831,4832,6924,4833,4459,3653,2507,6925, # 5904
4834,2535,6926,6927,3273,4027,3147,6928,3568,6929,6930,6931,4460,6932,3877,4461, # 5920
2729,3654,6933,6934,6935,6936,2175,4835,2630,4214,4028,4462,4836,4215,6937,3148, # 5936
4216,4463,4837,4838,4217,6938,6939,2850,4839,6940,4464,6941,6942,6943,4840,6944, # 5952
4218,3274,4465,6945,6946,2710,6947,4841,4466,6948,6949,2894,6950,6951,4842,6952, # 5968
4219,3057,2871,6953,6954,6955,6956,4467,6957,2711,6958,6959,6960,3275,3101,4843, # 5984
6961,3357,3569,6962,4844,6963,6964,4468,4845,3570,6965,3102,4846,3758,6966,4847, # 6000
3878,4848,4849,4029,6967,2929,3879,4850,4851,6968,6969,1733,6970,4220,6971,6972, # 6016
6973,6974,6975,6976,4852,6977,6978,6979,6980,6981,6982,3759,6983,6984,6985,3486, # 6032
3487,6986,3488,3416,6987,6988,6989,6990,6991,6992,6993,6994,6995,6996,6997,4853, # 6048
6998,6999,4030,7000,7001,3211,7002,7003,4221,7004,7005,3571,4031,7006,3572,7007, # 6064
2614,4854,2577,7008,7009,2965,3655,3656,4855,2775,3489,3880,4222,4856,3881,4032, # 6080
3882,3657,2730,3490,4857,7010,3149,7011,4469,4858,2496,3491,4859,2283,7012,7013, # 6096
7014,2365,4860,4470,7015,7016,3760,7017,7018,4223,1917,7019,7020,7021,4471,7022, # 6112
2776,4472,7023,7024,7025,7026,4033,7027,3573,4224,4861,4034,4862,7028,7029,1929, # 6128
3883,4035,7030,4473,3058,7031,2536,3761,3884,7032,4036,7033,2966,2895,1968,4474, # 6144
3276,4225,3417,3492,4226,2105,7034,7035,1754,2596,3762,4227,4863,4475,3763,4864, # 6160
3764,2615,2777,3103,3765,3658,3418,4865,2296,3766,2815,7036,7037,7038,3574,2872, # 6176
3277,4476,7039,4037,4477,7040,7041,4038,7042,7043,7044,7045,7046,7047,2537,7048, # 6192
7049,7050,7051,7052,7053,7054,4478,7055,7056,3767,3659,4228,3575,7057,7058,4229, # 6208
7059,7060,7061,3660,7062,3212,7063,3885,4039,2460,7064,7065,7066,7067,7068,7069, # 6224
7070,7071,7072,7073,7074,4866,3768,4867,7075,7076,7077,7078,4868,3358,3278,2653, # 6240
7079,7080,4479,3886,7081,7082,4869,7083,7084,7085,7086,7087,7088,2538,7089,7090, # 6256
7091,4040,3150,3769,4870,4041,2896,3359,4230,2930,7092,3279,7093,2967,4480,3213, # 6272
4481,3661,7094,7095,7096,7097,7098,7099,7100,7101,7102,2461,3770,7103,7104,4231, # 6288
3151,7105,7106,7107,4042,3662,7108,7109,4871,3663,4872,4043,3059,7110,7111,7112, # 6304
3493,2988,7113,4873,7114,7115,7116,3771,4874,7117,7118,4232,4875,7119,3576,2336, # 6320
4876,7120,4233,3419,4044,4877,4878,4482,4483,4879,4484,4234,7121,3772,4880,1045, # 6336
3280,3664,4881,4882,7122,7123,7124,7125,4883,7126,2778,7127,4485,4486,7128,4884, # 6352
3214,3887,7129,7130,3215,7131,4885,4045,7132,7133,4046,7134,7135,7136,7137,7138, # 6368
7139,7140,7141,7142,7143,4235,7144,4886,7145,7146,7147,4887,7148,7149,7150,4487, # 6384
4047,4488,7151,7152,4888,4048,2989,3888,7153,3665,7154,4049,7155,7156,7157,7158, # 6400
7159,7160,2931,4889,4890,4489,7161,2631,3889,4236,2779,7162,7163,4891,7164,3060, # 6416
7165,1672,4892,7166,4893,4237,3281,4894,7167,7168,3666,7169,3494,7170,7171,4050, # 6432
7172,7173,3104,3360,3420,4490,4051,2684,4052,7174,4053,7175,7176,7177,2253,4054, # 6448
7178,7179,4895,7180,3152,3890,3153,4491,3216,7181,7182,7183,2968,4238,4492,4055, # 6464
7184,2990,7185,2479,7186,7187,4493,7188,7189,7190,7191,7192,4896,7193,4897,2969, # 6480
4494,4898,7194,3495,7195,7196,4899,4495,7197,3105,2731,7198,4900,7199,7200,7201, # 6496
4056,7202,3361,7203,7204,4496,4901,4902,7205,4497,7206,7207,2315,4903,7208,4904, # 6512
7209,4905,2851,7210,7211,3577,7212,3578,4906,7213,4057,3667,4907,7214,4058,2354, # 6528
3891,2376,3217,3773,7215,7216,7217,7218,7219,4498,7220,4908,3282,2685,7221,3496, # 6544
4909,2632,3154,4910,7222,2337,7223,4911,7224,7225,7226,4912,4913,3283,4239,4499, # 6560
7227,2816,7228,7229,7230,7231,7232,7233,7234,4914,4500,4501,7235,7236,7237,2686, # 6576
7238,4915,7239,2897,4502,7240,4503,7241,2516,7242,4504,3362,3218,7243,7244,7245, # 6592
4916,7246,7247,4505,3363,7248,7249,7250,7251,3774,4506,7252,7253,4917,7254,7255, # 6608
3284,2991,4918,4919,3219,3892,4920,3106,3497,4921,7256,7257,7258,4922,7259,4923, # 6624
3364,4507,4508,4059,7260,4240,3498,7261,7262,4924,7263,2992,3893,4060,3220,7264, # 6640
7265,7266,7267,7268,7269,4509,3775,7270,2817,7271,4061,4925,4510,3776,7272,4241, # 6656
4511,3285,7273,7274,3499,7275,7276,7277,4062,4512,4926,7278,3107,3894,7279,7280, # 6672
4927,7281,4513,7282,7283,3668,7284,7285,4242,4514,4243,7286,2058,4515,4928,4929, # 6688
4516,7287,3286,4244,7288,4517,7289,7290,7291,3669,7292,7293,4930,4931,4932,2355, # 6704
4933,7294,2633,4518,7295,4245,7296,7297,4519,7298,7299,4520,4521,4934,7300,4246, # 6720
4522,7301,7302,7303,3579,7304,4247,4935,7305,4936,7306,7307,7308,7309,3777,7310, # 6736
4523,7311,7312,7313,4248,3580,7314,4524,3778,4249,7315,3581,7316,3287,7317,3221, # 6752
7318,4937,7319,7320,7321,7322,7323,7324,4938,4939,7325,4525,7326,7327,7328,4063, # 6768
7329,7330,4940,7331,7332,4941,7333,4526,7334,3500,2780,1741,4942,2026,1742,7335, # 6784
7336,3582,4527,2388,7337,7338,7339,4528,7340,4250,4943,7341,7342,7343,4944,7344, # 6800
7345,7346,3020,7347,4945,7348,7349,7350,7351,3895,7352,3896,4064,3897,7353,7354, # 6816
7355,4251,7356,7357,3898,7358,3779,7359,3780,3288,7360,7361,4529,7362,4946,4530, # 6832
2027,7363,3899,4531,4947,3222,3583,7364,4948,7365,7366,7367,7368,4949,3501,4950, # 6848
3781,4951,4532,7369,2517,4952,4252,4953,3155,7370,4954,4955,4253,2518,4533,7371, # 6864
7372,2712,4254,7373,7374,7375,3670,4956,3671,7376,2389,3502,4065,7377,2338,7378, # 6880
7379,7380,7381,3061,7382,4957,7383,7384,7385,7386,4958,4534,7387,7388,2993,7389, # 6896
3062,7390,4959,7391,7392,7393,4960,3108,4961,7394,4535,7395,4962,3421,4536,7396, # 6912
4963,7397,4964,1857,7398,4965,7399,7400,2176,3584,4966,7401,7402,3422,4537,3900, # 6928
3585,7403,3782,7404,2852,7405,7406,7407,4538,3783,2654,3423,4967,4539,7408,3784, # 6944
3586,2853,4540,4541,7409,3901,7410,3902,7411,7412,3785,3109,2327,3903,7413,7414, # 6960
2970,4066,2932,7415,7416,7417,3904,3672,3424,7418,4542,4543,4544,7419,4968,7420, # 6976
7421,4255,7422,7423,7424,7425,7426,4067,7427,3673,3365,4545,7428,3110,2559,3674, # 6992
7429,7430,3156,7431,7432,3503,7433,3425,4546,7434,3063,2873,7435,3223,4969,4547, # 7008
4548,2898,4256,4068,7436,4069,3587,3786,2933,3787,4257,4970,4971,3788,7437,4972, # 7024
3064,7438,4549,7439,7440,7441,7442,7443,4973,3905,7444,2874,7445,7446,7447,7448, # 7040
3021,7449,4550,3906,3588,4974,7450,7451,3789,3675,7452,2578,7453,4070,7454,7455, # 7056
7456,4258,3676,7457,4975,7458,4976,4259,3790,3504,2634,4977,3677,4551,4260,7459, # 7072
7460,7461,7462,3907,4261,4978,7463,7464,7465,7466,4979,4980,7467,7468,2213,4262, # 7088
7469,7470,7471,3678,4981,7472,2439,7473,4263,3224,3289,7474,3908,2415,4982,7475, # 7104
4264,7476,4983,2655,7477,7478,2732,4552,2854,2875,7479,7480,4265,7481,4553,4984, # 7120
7482,7483,4266,7484,3679,3366,3680,2818,2781,2782,3367,3589,4554,3065,7485,4071, # 7136
2899,7486,7487,3157,2462,4072,4555,4073,4985,4986,3111,4267,2687,3368,4556,4074, # 7152
3791,4268,7488,3909,2783,7489,2656,1962,3158,4557,4987,1963,3159,3160,7490,3112, # 7168
4988,4989,3022,4990,4991,3792,2855,7491,7492,2971,4558,7493,7494,4992,7495,7496, # 7184
7497,7498,4993,7499,3426,4559,4994,7500,3681,4560,4269,4270,3910,7501,4075,4995, # 7200
4271,7502,7503,4076,7504,4996,7505,3225,4997,4272,4077,2819,3023,7506,7507,2733, # 7216
4561,7508,4562,7509,3369,3793,7510,3590,2508,7511,7512,4273,3113,2994,2616,7513, # 7232
7514,7515,7516,7517,7518,2820,3911,4078,2748,7519,7520,4563,4998,7521,7522,7523, # 7248
7524,4999,4274,7525,4564,3682,2239,4079,4565,7526,7527,7528,7529,5000,7530,7531, # 7264
5001,4275,3794,7532,7533,7534,3066,5002,4566,3161,7535,7536,4080,7537,3162,7538, # 7280
7539,4567,7540,7541,7542,7543,7544,7545,5003,7546,4568,7547,7548,7549,7550,7551, # 7296
7552,7553,7554,7555,7556,5004,7557,7558,7559,5005,7560,3795,7561,4569,7562,7563, # 7312
7564,2821,3796,4276,4277,4081,7565,2876,7566,5006,7567,7568,2900,7569,3797,3912, # 7328
7570,7571,7572,4278,7573,7574,7575,5007,7576,7577,5008,7578,7579,4279,2934,7580, # 7344
7581,5009,7582,4570,7583,4280,7584,7585,7586,4571,4572,3913,7587,4573,3505,7588, # 7360
5010,7589,7590,7591,7592,3798,4574,7593,7594,5011,7595,4281,7596,7597,7598,4282, # 7376
5012,7599,7600,5013,3163,7601,5014,7602,3914,7603,7604,2734,4575,4576,4577,7605, # 7392
7606,7607,7608,7609,3506,5015,4578,7610,4082,7611,2822,2901,2579,3683,3024,4579, # 7408
3507,7612,4580,7613,3226,3799,5016,7614,7615,7616,7617,7618,7619,7620,2995,3290, # 7424
7621,4083,7622,5017,7623,7624,7625,7626,7627,4581,3915,7628,3291,7629,5018,7630, # 7440
7631,7632,7633,4084,7634,7635,3427,3800,7636,7637,4582,7638,5019,4583,5020,7639, # 7456
3916,7640,3801,5021,4584,4283,7641,7642,3428,3591,2269,7643,2617,7644,4585,3592, # 7472
7645,4586,2902,7646,7647,3227,5022,7648,4587,7649,4284,7650,7651,7652,4588,2284, # 7488
7653,5023,7654,7655,7656,4589,5024,3802,7657,7658,5025,3508,4590,7659,7660,7661, # 7504
1969,5026,7662,7663,3684,1821,2688,7664,2028,2509,4285,7665,2823,1841,7666,2689, # 7520
3114,7667,3917,4085,2160,5027,5028,2972,7668,5029,7669,7670,7671,3593,4086,7672, # 7536
4591,4087,5030,3803,7673,7674,7675,7676,7677,7678,7679,4286,2366,4592,4593,3067, # 7552
2328,7680,7681,4594,3594,3918,2029,4287,7682,5031,3919,3370,4288,4595,2856,7683, # 7568
3509,7684,7685,5032,5033,7686,7687,3804,2784,7688,7689,7690,7691,3371,7692,7693, # 7584
2877,5034,7694,7695,3920,4289,4088,7696,7697,7698,5035,7699,5036,4290,5037,5038, # 7600
5039,7700,7701,7702,5040,5041,3228,7703,1760,7704,5042,3229,4596,2106,4089,7705, # 7616
4597,2824,5043,2107,3372,7706,4291,4090,5044,7707,4091,7708,5045,3025,3805,4598, # 7632
4292,4293,4294,3373,7709,4599,7710,5046,7711,7712,5047,5048,3806,7713,7714,7715, # 7648
5049,7716,7717,7718,7719,4600,5050,7720,7721,7722,5051,7723,4295,3429,7724,7725, # 7664
7726,7727,3921,7728,3292,5052,4092,7729,7730,7731,7732,7733,7734,7735,5053,5054, # 7680
7736,7737,7738,7739,3922,3685,7740,7741,7742,7743,2635,5055,7744,5056,4601,7745, # 7696
7746,2560,7747,7748,7749,7750,3923,7751,7752,7753,7754,7755,4296,2903,7756,7757, # 7712
7758,7759,7760,3924,7761,5057,4297,7762,7763,5058,4298,7764,4093,7765,7766,5059, # 7728
3925,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,3595,7777,4299,5060,4094, # 7744
7778,3293,5061,7779,7780,4300,7781,7782,4602,7783,3596,7784,7785,3430,2367,7786, # 7760
3164,5062,5063,4301,7787,7788,4095,5064,5065,7789,3374,3115,7790,7791,7792,7793, # 7776
7794,7795,7796,3597,4603,7797,7798,3686,3116,3807,5066,7799,7800,5067,7801,7802, # 7792
4604,4302,5068,4303,4096,7803,7804,3294,7805,7806,5069,4605,2690,7807,3026,7808, # 7808
7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824, # 7824
7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7840
7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,7856, # 7856
7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,7872, # 7872
7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,7888, # 7888
7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,7904, # 7904
7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,7920, # 7920
7921,7922,7923,7924,3926,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935, # 7936
7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951, # 7952
7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967, # 7968
7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983, # 7984
7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999, # 8000
8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015, # 8016
8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031, # 8032
8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047, # 8048
8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063, # 8064
8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079, # 8080
8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095, # 8096
8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111, # 8112
8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127, # 8128
8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143, # 8144
8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159, # 8160
8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175, # 8176
8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191, # 8192
8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207, # 8208
8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223, # 8224
8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239, # 8240
8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255, # 8256
8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271) # 8272
# flake8: noqa
| mit |
AndroidOpenDevelopment/android_external_chromium_org | third_party/closure_linter/closure_linter/common/tokens_test.py | 126 | 3044 | #!/usr/bin/env python
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest as googletest
from closure_linter.common import tokens
def _CreateDummyToken():
return tokens.Token('foo', None, 1, 1)
def _CreateDummyTokens(count):
dummy_tokens = []
for _ in xrange(count):
dummy_tokens.append(_CreateDummyToken())
return dummy_tokens
def _SetTokensAsNeighbors(neighbor_tokens):
for i in xrange(len(neighbor_tokens)):
prev_index = i - 1
next_index = i + 1
if prev_index >= 0:
neighbor_tokens[i].previous = neighbor_tokens[prev_index]
if next_index < len(neighbor_tokens):
neighbor_tokens[i].next = neighbor_tokens[next_index]
class TokensTest(googletest.TestCase):
def testIsFirstInLine(self):
# First token in file (has no previous).
self.assertTrue(_CreateDummyToken().IsFirstInLine())
a, b = _CreateDummyTokens(2)
_SetTokensAsNeighbors([a, b])
# Tokens on same line
a.line_number = 30
b.line_number = 30
self.assertFalse(b.IsFirstInLine())
# Tokens on different lines
b.line_number = 31
self.assertTrue(b.IsFirstInLine())
def testIsLastInLine(self):
# Last token in file (has no next).
self.assertTrue(_CreateDummyToken().IsLastInLine())
a, b = _CreateDummyTokens(2)
_SetTokensAsNeighbors([a, b])
# Tokens on same line
a.line_number = 30
b.line_number = 30
self.assertFalse(a.IsLastInLine())
b.line_number = 31
self.assertTrue(a.IsLastInLine())
def testIsType(self):
a = tokens.Token('foo', 'fakeType1', 1, 1)
self.assertTrue(a.IsType('fakeType1'))
self.assertFalse(a.IsType('fakeType2'))
def testIsAnyType(self):
a = tokens.Token('foo', 'fakeType1', 1, 1)
self.assertTrue(a.IsAnyType(['fakeType1', 'fakeType2']))
self.assertFalse(a.IsAnyType(['fakeType3', 'fakeType4']))
def testRepr(self):
a = tokens.Token('foo', 'fakeType1', 1, 1)
self.assertEquals('<Token: fakeType1, "foo", None, 1, None>', str(a))
def testIter(self):
dummy_tokens = _CreateDummyTokens(5)
_SetTokensAsNeighbors(dummy_tokens)
a, b, c, d, e = dummy_tokens
i = iter(a)
self.assertListEqual([a, b, c, d, e], list(i))
def testReverseIter(self):
dummy_tokens = _CreateDummyTokens(5)
_SetTokensAsNeighbors(dummy_tokens)
a, b, c, d, e = dummy_tokens
ri = reversed(e)
self.assertListEqual([e, d, c, b, a], list(ri))
if __name__ == '__main__':
googletest.main()
| bsd-3-clause |
hassanabidpk/django | tests/m2m_signals/tests.py | 271 | 15982 | """
Testing signals emitted on changing m2m relations.
"""
from django.db import models
from django.test import TestCase
from .models import Car, Part, Person, SportsCar
class ManyToManySignalsTest(TestCase):
def m2m_changed_signal_receiver(self, signal, sender, **kwargs):
message = {
'instance': kwargs['instance'],
'action': kwargs['action'],
'reverse': kwargs['reverse'],
'model': kwargs['model'],
}
if kwargs['pk_set']:
message['objects'] = list(
kwargs['model'].objects.filter(pk__in=kwargs['pk_set'])
)
self.m2m_changed_messages.append(message)
def setUp(self):
self.m2m_changed_messages = []
self.vw = Car.objects.create(name='VW')
self.bmw = Car.objects.create(name='BMW')
self.toyota = Car.objects.create(name='Toyota')
self.wheelset = Part.objects.create(name='Wheelset')
self.doors = Part.objects.create(name='Doors')
self.engine = Part.objects.create(name='Engine')
self.airbag = Part.objects.create(name='Airbag')
self.sunroof = Part.objects.create(name='Sunroof')
self.alice = Person.objects.create(name='Alice')
self.bob = Person.objects.create(name='Bob')
self.chuck = Person.objects.create(name='Chuck')
self.daisy = Person.objects.create(name='Daisy')
def tearDown(self):
# disconnect all signal handlers
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Car.default_parts.through
)
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Car.optional_parts.through
)
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Person.fans.through
)
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Person.friends.through
)
def _initialize_signal_car(self, add_default_parts_before_set_signal=False):
""" Install a listener on the two m2m relations. """
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Car.optional_parts.through
)
if add_default_parts_before_set_signal:
# adding a default part to our car - no signal listener installed
self.vw.default_parts.add(self.sunroof)
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Car.default_parts.through
)
def test_m2m_relations_add_remove_clear(self):
expected_messages = []
self._initialize_signal_car(add_default_parts_before_set_signal=True)
self.vw.default_parts.add(self.wheelset, self.doors, self.engine)
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# give the BMW and Toyota some doors as well
self.doors.car_set.add(self.bmw, self.toyota)
expected_messages.append({
'instance': self.doors,
'action': 'pre_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
expected_messages.append({
'instance': self.doors,
'action': 'post_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
def test_m2m_relations_signals_remove_relation(self):
self._initialize_signal_car()
# remove the engine from the self.vw and the airbag (which is not set
# but is returned)
self.vw.default_parts.remove(self.engine, self.airbag)
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.vw,
'action': 'pre_remove',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.engine],
}, {
'instance': self.vw,
'action': 'post_remove',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.engine],
}
])
def test_m2m_relations_signals_give_the_self_vw_some_optional_parts(self):
expected_messages = []
self._initialize_signal_car()
# give the self.vw some optional parts (second relation to same model)
self.vw.optional_parts.add(self.airbag, self.sunroof)
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.sunroof],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.sunroof],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# add airbag to all the cars (even though the self.vw already has one)
self.airbag.cars_optional.add(self.vw, self.bmw, self.toyota)
expected_messages.append({
'instance': self.airbag,
'action': 'pre_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
expected_messages.append({
'instance': self.airbag,
'action': 'post_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
def test_m2m_relations_signals_reverse_relation_with_custom_related_name(self):
self._initialize_signal_car()
# remove airbag from the self.vw (reverse relation with custom
# related_name)
self.airbag.cars_optional.remove(self.vw)
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.airbag,
'action': 'pre_remove',
'reverse': True,
'model': Car,
'objects': [self.vw],
}, {
'instance': self.airbag,
'action': 'post_remove',
'reverse': True,
'model': Car,
'objects': [self.vw],
}
])
def test_m2m_relations_signals_clear_all_parts_of_the_self_vw(self):
self._initialize_signal_car()
# clear all parts of the self.vw
self.vw.default_parts.clear()
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.vw,
'action': 'pre_clear',
'reverse': False,
'model': Part,
}, {
'instance': self.vw,
'action': 'post_clear',
'reverse': False,
'model': Part,
}
])
def test_m2m_relations_signals_all_the_doors_off_of_cars(self):
self._initialize_signal_car()
# take all the doors off of cars
self.doors.car_set.clear()
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.doors,
'action': 'pre_clear',
'reverse': True,
'model': Car,
}, {
'instance': self.doors,
'action': 'post_clear',
'reverse': True,
'model': Car,
}
])
def test_m2m_relations_signals_reverse_relation(self):
self._initialize_signal_car()
# take all the airbags off of cars (clear reverse relation with custom
# related_name)
self.airbag.cars_optional.clear()
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.airbag,
'action': 'pre_clear',
'reverse': True,
'model': Car,
}, {
'instance': self.airbag,
'action': 'post_clear',
'reverse': True,
'model': Car,
}
])
def test_m2m_relations_signals_alternative_ways(self):
expected_messages = []
self._initialize_signal_car()
# alternative ways of setting relation:
self.vw.default_parts.create(name='Windows')
p6 = Part.objects.get(name='Windows')
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [p6],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [p6],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# direct assignment clears the set first, then adds
self.vw.default_parts = [self.wheelset, self.doors, self.engine]
expected_messages.append({
'instance': self.vw,
'action': 'pre_remove',
'reverse': False,
'model': Part,
'objects': [p6],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_remove',
'reverse': False,
'model': Part,
'objects': [p6],
})
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
def test_m2m_relations_signals_clearing_removing(self):
expected_messages = []
self._initialize_signal_car(add_default_parts_before_set_signal=True)
# set by clearing.
self.vw.default_parts.set([self.wheelset, self.doors, self.engine], clear=True)
expected_messages.append({
'instance': self.vw,
'action': 'pre_clear',
'reverse': False,
'model': Part,
})
expected_messages.append({
'instance': self.vw,
'action': 'post_clear',
'reverse': False,
'model': Part,
})
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# set by only removing what's necessary.
self.vw.default_parts.set([self.wheelset, self.doors], clear=False)
expected_messages.append({
'instance': self.vw,
'action': 'pre_remove',
'reverse': False,
'model': Part,
'objects': [self.engine],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_remove',
'reverse': False,
'model': Part,
'objects': [self.engine],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
def test_m2m_relations_signals_when_inheritance(self):
expected_messages = []
self._initialize_signal_car(add_default_parts_before_set_signal=True)
# Check that signals still work when model inheritance is involved
c4 = SportsCar.objects.create(name='Bugatti', price='1000000')
c4b = Car.objects.get(name='Bugatti')
c4.default_parts = [self.doors]
expected_messages.append({
'instance': c4,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.doors],
})
expected_messages.append({
'instance': c4,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.doors],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
self.engine.car_set.add(c4)
expected_messages.append({
'instance': self.engine,
'action': 'pre_add',
'reverse': True,
'model': Car,
'objects': [c4b],
})
expected_messages.append({
'instance': self.engine,
'action': 'post_add',
'reverse': True,
'model': Car,
'objects': [c4b],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
def _initialize_signal_person(self):
# Install a listener on the two m2m relations.
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Person.fans.through
)
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Person.friends.through
)
def test_m2m_relations_with_self_add_friends(self):
self._initialize_signal_person()
self.alice.friends = [self.bob, self.chuck]
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.alice,
'action': 'pre_add',
'reverse': False,
'model': Person,
'objects': [self.bob, self.chuck],
}, {
'instance': self.alice,
'action': 'post_add',
'reverse': False,
'model': Person,
'objects': [self.bob, self.chuck],
}
])
def test_m2m_relations_with_self_add_fan(self):
self._initialize_signal_person()
self.alice.fans = [self.daisy]
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.alice,
'action': 'pre_add',
'reverse': False,
'model': Person,
'objects': [self.daisy],
}, {
'instance': self.alice,
'action': 'post_add',
'reverse': False,
'model': Person,
'objects': [self.daisy],
}
])
def test_m2m_relations_with_self_add_idols(self):
self._initialize_signal_person()
self.chuck.idols = [self.alice, self.bob]
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.chuck,
'action': 'pre_add',
'reverse': True,
'model': Person,
'objects': [self.alice, self.bob],
}, {
'instance': self.chuck,
'action': 'post_add',
'reverse': True,
'model': Person,
'objects': [self.alice, self.bob],
}
])
| bsd-3-clause |
yaqiyang/autorest | Samples/azure-storage/Azure.Python/storagemanagementclient/models/storage_account_create_parameters.py | 4 | 1048 | # coding=utf-8
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
from msrest.serialization import Model
class StorageAccountCreateParameters(Model):
"""The parameters to provide for the account.
:param location: Resource location
:type location: str
:param tags: Resource tags
:type tags: dict
:param properties:
:type properties: :class:`StorageAccountPropertiesCreateParameters
<petstore.models.StorageAccountPropertiesCreateParameters>`
"""
_validation = {
'location': {'required': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'properties': {'key': 'properties', 'type': 'StorageAccountPropertiesCreateParameters'},
}
def __init__(self, location, tags=None, properties=None):
self.location = location
self.tags = tags
self.properties = properties
| mit |
flwh/KK_mt6589_iq451 | prebuilts/python/darwin-x86/2.7.5/lib/python2.7/test/test_int_literal.py | 138 | 9128 | """Test correct treatment of hex/oct constants.
This is complex because of changes due to PEP 237.
"""
import unittest
from test import test_support
class TestHexOctBin(unittest.TestCase):
def test_hex_baseline(self):
# A few upper/lowercase tests
self.assertEqual(0x0, 0X0)
self.assertEqual(0x1, 0X1)
self.assertEqual(0x123456789abcdef, 0X123456789abcdef)
# Baseline tests
self.assertEqual(0x0, 0)
self.assertEqual(0x10, 16)
self.assertEqual(0x7fffffff, 2147483647)
self.assertEqual(0x7fffffffffffffff, 9223372036854775807)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0x0), 0)
self.assertEqual(-(0x10), -16)
self.assertEqual(-(0x7fffffff), -2147483647)
self.assertEqual(-(0x7fffffffffffffff), -9223372036854775807)
# Ditto with a minus sign and NO parentheses
self.assertEqual(-0x0, 0)
self.assertEqual(-0x10, -16)
self.assertEqual(-0x7fffffff, -2147483647)
self.assertEqual(-0x7fffffffffffffff, -9223372036854775807)
def test_hex_unsigned(self):
# Positive constants
self.assertEqual(0x80000000, 2147483648L)
self.assertEqual(0xffffffff, 4294967295L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0x80000000), -2147483648L)
self.assertEqual(-(0xffffffff), -4294967295L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0x80000000, -2147483648L)
self.assertEqual(-0xffffffff, -4294967295L)
# Positive constants
self.assertEqual(0x8000000000000000, 9223372036854775808L)
self.assertEqual(0xffffffffffffffff, 18446744073709551615L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0x8000000000000000), -9223372036854775808L)
self.assertEqual(-(0xffffffffffffffff), -18446744073709551615L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0x8000000000000000, -9223372036854775808L)
self.assertEqual(-0xffffffffffffffff, -18446744073709551615L)
def test_oct_baseline(self):
# Baseline tests
self.assertEqual(00, 0)
self.assertEqual(020, 16)
self.assertEqual(017777777777, 2147483647)
self.assertEqual(0777777777777777777777, 9223372036854775807)
# Ditto with a minus sign and parentheses
self.assertEqual(-(00), 0)
self.assertEqual(-(020), -16)
self.assertEqual(-(017777777777), -2147483647)
self.assertEqual(-(0777777777777777777777), -9223372036854775807)
# Ditto with a minus sign and NO parentheses
self.assertEqual(-00, 0)
self.assertEqual(-020, -16)
self.assertEqual(-017777777777, -2147483647)
self.assertEqual(-0777777777777777777777, -9223372036854775807)
def test_oct_baseline_new(self):
# A few upper/lowercase tests
self.assertEqual(0o0, 0O0)
self.assertEqual(0o1, 0O1)
self.assertEqual(0o1234567, 0O1234567)
# Baseline tests
self.assertEqual(0o0, 0)
self.assertEqual(0o20, 16)
self.assertEqual(0o17777777777, 2147483647)
self.assertEqual(0o777777777777777777777, 9223372036854775807)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0o0), 0)
self.assertEqual(-(0o20), -16)
self.assertEqual(-(0o17777777777), -2147483647)
self.assertEqual(-(0o777777777777777777777), -9223372036854775807)
# Ditto with a minus sign and NO parentheses
self.assertEqual(-0o0, 0)
self.assertEqual(-0o20, -16)
self.assertEqual(-0o17777777777, -2147483647)
self.assertEqual(-0o777777777777777777777, -9223372036854775807)
def test_oct_unsigned(self):
# Positive constants
self.assertEqual(020000000000, 2147483648L)
self.assertEqual(037777777777, 4294967295L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(020000000000), -2147483648L)
self.assertEqual(-(037777777777), -4294967295L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-020000000000, -2147483648L)
self.assertEqual(-037777777777, -4294967295L)
# Positive constants
self.assertEqual(01000000000000000000000, 9223372036854775808L)
self.assertEqual(01777777777777777777777, 18446744073709551615L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(01000000000000000000000), -9223372036854775808L)
self.assertEqual(-(01777777777777777777777), -18446744073709551615L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-01000000000000000000000, -9223372036854775808L)
self.assertEqual(-01777777777777777777777, -18446744073709551615L)
def test_oct_unsigned_new(self):
# Positive constants
self.assertEqual(0o20000000000, 2147483648L)
self.assertEqual(0o37777777777, 4294967295L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0o20000000000), -2147483648L)
self.assertEqual(-(0o37777777777), -4294967295L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0o20000000000, -2147483648L)
self.assertEqual(-0o37777777777, -4294967295L)
# Positive constants
self.assertEqual(0o1000000000000000000000, 9223372036854775808L)
self.assertEqual(0o1777777777777777777777, 18446744073709551615L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0o1000000000000000000000), -9223372036854775808L)
self.assertEqual(-(0o1777777777777777777777), -18446744073709551615L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0o1000000000000000000000, -9223372036854775808L)
self.assertEqual(-0o1777777777777777777777, -18446744073709551615L)
def test_bin_baseline(self):
# A few upper/lowercase tests
self.assertEqual(0b0, 0B0)
self.assertEqual(0b1, 0B1)
self.assertEqual(0b10101010101, 0B10101010101)
# Baseline tests
self.assertEqual(0b0, 0)
self.assertEqual(0b10000, 16)
self.assertEqual(0b1111111111111111111111111111111, 2147483647)
self.assertEqual(0b111111111111111111111111111111111111111111111111111111111111111, 9223372036854775807)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0b0), 0)
self.assertEqual(-(0b10000), -16)
self.assertEqual(-(0b1111111111111111111111111111111), -2147483647)
self.assertEqual(-(0b111111111111111111111111111111111111111111111111111111111111111), -9223372036854775807)
# Ditto with a minus sign and NO parentheses
self.assertEqual(-0b0, 0)
self.assertEqual(-0b10000, -16)
self.assertEqual(-0b1111111111111111111111111111111, -2147483647)
self.assertEqual(-0b111111111111111111111111111111111111111111111111111111111111111, -9223372036854775807)
def test_bin_unsigned(self):
# Positive constants
self.assertEqual(0b10000000000000000000000000000000, 2147483648L)
self.assertEqual(0b11111111111111111111111111111111, 4294967295L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0b10000000000000000000000000000000), -2147483648L)
self.assertEqual(-(0b11111111111111111111111111111111), -4294967295L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0b10000000000000000000000000000000, -2147483648L)
self.assertEqual(-0b11111111111111111111111111111111, -4294967295L)
# Positive constants
self.assertEqual(0b1000000000000000000000000000000000000000000000000000000000000000, 9223372036854775808L)
self.assertEqual(0b1111111111111111111111111111111111111111111111111111111111111111, 18446744073709551615L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0b1000000000000000000000000000000000000000000000000000000000000000), -9223372036854775808L)
self.assertEqual(-(0b1111111111111111111111111111111111111111111111111111111111111111), -18446744073709551615L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0b1000000000000000000000000000000000000000000000000000000000000000, -9223372036854775808L)
self.assertEqual(-0b1111111111111111111111111111111111111111111111111111111111111111, -18446744073709551615L)
def test_main():
test_support.run_unittest(TestHexOctBin)
if __name__ == "__main__":
test_main()
| gpl-2.0 |
MphasisWyde/eWamSublimeAdaptor | POC/v0_4_POC_with_generic_cmd_and_swagger/third-party/jsonschema/_validators.py | 22 | 11790 | import re
from jsonschema import _utils
from jsonschema.exceptions import FormatError, ValidationError
from jsonschema.compat import iteritems
def patternProperties(validator, patternProperties, instance, schema):
if not validator.is_type(instance, "object"):
return
for pattern, subschema in iteritems(patternProperties):
for k, v in iteritems(instance):
if re.search(pattern, k):
for error in validator.descend(
v, subschema, path=k, schema_path=pattern,
):
yield error
def additionalProperties(validator, aP, instance, schema):
if not validator.is_type(instance, "object"):
return
extras = set(_utils.find_additional_properties(instance, schema))
if validator.is_type(aP, "object"):
for extra in extras:
for error in validator.descend(instance[extra], aP, path=extra):
yield error
elif not aP and extras:
error = "Additional properties are not allowed (%s %s unexpected)"
yield ValidationError(error % _utils.extras_msg(extras))
def items(validator, items, instance, schema):
if not validator.is_type(instance, "array"):
return
if validator.is_type(items, "object"):
for index, item in enumerate(instance):
for error in validator.descend(item, items, path=index):
yield error
else:
for (index, item), subschema in zip(enumerate(instance), items):
for error in validator.descend(
item, subschema, path=index, schema_path=index,
):
yield error
def additionalItems(validator, aI, instance, schema):
if (
not validator.is_type(instance, "array") or
validator.is_type(schema.get("items", {}), "object")
):
return
len_items = len(schema.get("items", []))
if validator.is_type(aI, "object"):
for index, item in enumerate(instance[len_items:], start=len_items):
for error in validator.descend(item, aI, path=index):
yield error
elif not aI and len(instance) > len(schema.get("items", [])):
error = "Additional items are not allowed (%s %s unexpected)"
yield ValidationError(
error %
_utils.extras_msg(instance[len(schema.get("items", [])):])
)
def minimum(validator, minimum, instance, schema):
if not validator.is_type(instance, "number"):
return
if schema.get("exclusiveMinimum", False):
failed = instance <= minimum
cmp = "less than or equal to"
else:
failed = instance < minimum
cmp = "less than"
if failed:
yield ValidationError(
"%r is %s the minimum of %r" % (instance, cmp, minimum)
)
def maximum(validator, maximum, instance, schema):
if not validator.is_type(instance, "number"):
return
if schema.get("exclusiveMaximum", False):
failed = instance >= maximum
cmp = "greater than or equal to"
else:
failed = instance > maximum
cmp = "greater than"
if failed:
yield ValidationError(
"%r is %s the maximum of %r" % (instance, cmp, maximum)
)
def multipleOf(validator, dB, instance, schema):
if not validator.is_type(instance, "number"):
return
if isinstance(dB, float):
quotient = instance / dB
failed = int(quotient) != quotient
else:
failed = instance % dB
if failed:
yield ValidationError("%r is not a multiple of %r" % (instance, dB))
def minItems(validator, mI, instance, schema):
if validator.is_type(instance, "array") and len(instance) < mI:
yield ValidationError("%r is too short" % (instance,))
def maxItems(validator, mI, instance, schema):
if validator.is_type(instance, "array") and len(instance) > mI:
yield ValidationError("%r is too long" % (instance,))
def uniqueItems(validator, uI, instance, schema):
if (
uI and
validator.is_type(instance, "array") and
not _utils.uniq(instance)
):
yield ValidationError("%r has non-unique elements" % instance)
def pattern(validator, patrn, instance, schema):
if (
validator.is_type(instance, "string") and
not re.search(patrn, instance)
):
yield ValidationError("%r does not match %r" % (instance, patrn))
def format(validator, format, instance, schema):
if validator.format_checker is not None:
try:
validator.format_checker.check(instance, format)
except FormatError as error:
yield ValidationError(error.message, cause=error.cause)
def minLength(validator, mL, instance, schema):
if validator.is_type(instance, "string") and len(instance) < mL:
yield ValidationError("%r is too short" % (instance,))
def maxLength(validator, mL, instance, schema):
if validator.is_type(instance, "string") and len(instance) > mL:
yield ValidationError("%r is too long" % (instance,))
def dependencies(validator, dependencies, instance, schema):
if not validator.is_type(instance, "object"):
return
for property, dependency in iteritems(dependencies):
if property not in instance:
continue
if validator.is_type(dependency, "object"):
for error in validator.descend(
instance, dependency, schema_path=property,
):
yield error
else:
dependencies = _utils.ensure_list(dependency)
for dependency in dependencies:
if dependency not in instance:
yield ValidationError(
"%r is a dependency of %r" % (dependency, property)
)
def enum(validator, enums, instance, schema):
if instance not in enums:
yield ValidationError("%r is not one of %r" % (instance, enums))
def ref(validator, ref, instance, schema):
resolve = getattr(validator.resolver, "resolve", None)
if resolve is None:
with validator.resolver.resolving(ref) as resolved:
for error in validator.descend(instance, resolved):
yield error
else:
scope, resolved = validator.resolver.resolve(ref)
validator.resolver.push_scope(scope)
try:
for error in validator.descend(instance, resolved):
yield error
finally:
validator.resolver.pop_scope()
def type_draft3(validator, types, instance, schema):
types = _utils.ensure_list(types)
all_errors = []
for index, type in enumerate(types):
if type == "any":
return
if validator.is_type(type, "object"):
errors = list(validator.descend(instance, type, schema_path=index))
if not errors:
return
all_errors.extend(errors)
else:
if validator.is_type(instance, type):
return
else:
yield ValidationError(
_utils.types_msg(instance, types), context=all_errors,
)
def properties_draft3(validator, properties, instance, schema):
if not validator.is_type(instance, "object"):
return
for property, subschema in iteritems(properties):
if property in instance:
for error in validator.descend(
instance[property],
subschema,
path=property,
schema_path=property,
):
yield error
elif subschema.get("required", False):
error = ValidationError("%r is a required property" % property)
error._set(
validator="required",
validator_value=subschema["required"],
instance=instance,
schema=schema,
)
error.path.appendleft(property)
error.schema_path.extend([property, "required"])
yield error
def disallow_draft3(validator, disallow, instance, schema):
for disallowed in _utils.ensure_list(disallow):
if validator.is_valid(instance, {"type" : [disallowed]}):
yield ValidationError(
"%r is disallowed for %r" % (disallowed, instance)
)
def extends_draft3(validator, extends, instance, schema):
if validator.is_type(extends, "object"):
for error in validator.descend(instance, extends):
yield error
return
for index, subschema in enumerate(extends):
for error in validator.descend(instance, subschema, schema_path=index):
yield error
def type_draft4(validator, types, instance, schema):
types = _utils.ensure_list(types)
if not any(validator.is_type(instance, type) for type in types):
yield ValidationError(_utils.types_msg(instance, types))
def properties_draft4(validator, properties, instance, schema):
if not validator.is_type(instance, "object"):
return
for property, subschema in iteritems(properties):
if property in instance:
for error in validator.descend(
instance[property],
subschema,
path=property,
schema_path=property,
):
yield error
def required_draft4(validator, required, instance, schema):
if not validator.is_type(instance, "object"):
return
for property in required:
if property not in instance:
yield ValidationError("%r is a required property" % property)
def minProperties_draft4(validator, mP, instance, schema):
if validator.is_type(instance, "object") and len(instance) < mP:
yield ValidationError(
"%r does not have enough properties" % (instance,)
)
def maxProperties_draft4(validator, mP, instance, schema):
if not validator.is_type(instance, "object"):
return
if validator.is_type(instance, "object") and len(instance) > mP:
yield ValidationError("%r has too many properties" % (instance,))
def allOf_draft4(validator, allOf, instance, schema):
for index, subschema in enumerate(allOf):
for error in validator.descend(instance, subschema, schema_path=index):
yield error
def oneOf_draft4(validator, oneOf, instance, schema):
subschemas = enumerate(oneOf)
all_errors = []
for index, subschema in subschemas:
errs = list(validator.descend(instance, subschema, schema_path=index))
if not errs:
first_valid = subschema
break
all_errors.extend(errs)
else:
yield ValidationError(
"%r is not valid under any of the given schemas" % (instance,),
context=all_errors,
)
more_valid = [s for i, s in subschemas if validator.is_valid(instance, s)]
if more_valid:
more_valid.append(first_valid)
reprs = ", ".join(repr(schema) for schema in more_valid)
yield ValidationError(
"%r is valid under each of %s" % (instance, reprs)
)
def anyOf_draft4(validator, anyOf, instance, schema):
all_errors = []
for index, subschema in enumerate(anyOf):
errs = list(validator.descend(instance, subschema, schema_path=index))
if not errs:
break
all_errors.extend(errs)
else:
yield ValidationError(
"%r is not valid under any of the given schemas" % (instance,),
context=all_errors,
)
def not_draft4(validator, not_schema, instance, schema):
if validator.is_valid(instance, not_schema):
yield ValidationError(
"%r is not allowed for %r" % (not_schema, instance)
)
| mit |
dodi/google-python-exercises | basic/solution/wordcount.py | 211 | 3529 | #!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Wordcount exercise
Google's Python class
The main() below is already defined and complete. It calls print_words()
and print_top() functions which you write.
1. For the --count flag, implement a print_words(filename) function that counts
how often each word appears in the text and prints:
word1 count1
word2 count2
...
Print the above list in order sorted by word (python will sort punctuation to
come before letters -- that's fine). Store all the words as lowercase,
so 'The' and 'the' count as the same word.
2. For the --topcount flag, implement a print_top(filename) which is similar
to print_words() but which prints just the top 20 most common words sorted
so the most common word is first, then the next most common, and so on.
Use str.split() (no arguments) to split on all whitespace.
Workflow: don't build the whole program at once. Get it to an intermediate
milestone and print your data structure and sys.exit(0).
When that's working, try for the next milestone.
Optional: define a helper function to avoid code duplication inside
print_words() and print_top().
"""
import sys
# +++your code here+++
# Define print_words(filename) and print_top(filename) functions.
# You could write a helper utility function that reads a file
# and builds and returns a word/count dict for it.
# Then print_words() and print_top() can just call the utility function.
#### LAB(begin solution)
def word_count_dict(filename):
"""Returns a word/count dict for this filename."""
# Utility used by count() and Topcount().
word_count = {} # Map each word to its count
input_file = open(filename, 'r')
for line in input_file:
words = line.split()
for word in words:
word = word.lower()
# Special case if we're seeing this word for the first time.
if not word in word_count:
word_count[word] = 1
else:
word_count[word] = word_count[word] + 1
input_file.close() # Not strictly required, but good form.
return word_count
def print_words(filename):
"""Prints one per line '<word> <count>' sorted by word for the given file."""
word_count = word_count_dict(filename)
words = sorted(word_count.keys())
for word in words:
print word, word_count[word]
def get_count(word_count_tuple):
"""Returns the count from a dict word/count tuple -- used for custom sort."""
return word_count_tuple[1]
def print_top(filename):
"""Prints the top count listing for the given file."""
word_count = word_count_dict(filename)
# Each item is a (word, count) tuple.
# Sort them so the big counts are first using key=get_count() to extract count.
items = sorted(word_count.items(), key=get_count, reverse=True)
# Print the first 20
for item in items[:20]:
print item[0], item[1]
##### LAB(end solution)
# This basic command line argument parsing code is provided and
# calls the print_words() and print_top() functions which you must define.
def main():
if len(sys.argv) != 3:
print 'usage: ./wordcount.py {--count | --topcount} file'
sys.exit(1)
option = sys.argv[1]
filename = sys.argv[2]
if option == '--count':
print_words(filename)
elif option == '--topcount':
print_top(filename)
else:
print 'unknown option: ' + option
sys.exit(1)
if __name__ == '__main__':
main()
| apache-2.0 |
Preston4tw/elearning | coursera/algo-pt1/week2/programming_question_2.py | 1 | 4681 | #!/usr/bin/env python3
"""
The file QuickSort.txt contains all of the integers between 1 and 10,000
(inclusive, with no repeats) in unsorted order. The integer in the ith row of
the file gives you the ith entry of an input array.
Your task is to compute the total number of comparisons used to sort the given
input file by QuickSort. As you know, the number of comparisons depends on which
elements are chosen as pivots, so we'll ask you to explore three different
pivoting rules. You should not count comparisons one-by-one. Rather, when there
is a recursive call on a subarray of length m, you should simply add m - 1
to your running total of comparisons. (This is because the pivot element is
compared to each of the other m - 1 elements in the subarray in this
recursive call.)
WARNING: The Partition subroutine can be implemented in several different ways,
and different implementations can give you differing numbers of comparisons.
For this problem, you should implement the Partition subroutine exactly as it is
described in the video lectures (otherwise you might get the wrong answer).
DIRECTIONS FOR THIS PROBLEM:
For the first part of the programming assignment, you should always use the
first element of the array as the pivot element.
HOW TO GIVE US YOUR ANSWER:
Type the numeric answer in the space provided. So if your answer is
1198233847, then just type 1198233847 in the space provided without any space /
commas / other punctuation marks. You have 5 attempts to get the correct
answer. (We do not require you to submit your code, so feel free to use the
programming language of your choice, just type the numeric answer in the
following space.)
"""
import itertools
import math
"""
Choose pivot functions should return indexes because to get the correct answers
for the programming exercise, as a pre-processing step we will swap the first
element of the array with the pivot
"""
def choose_pivot(array):
# Return the first element of the array
return 0
# Return the last element of the array
#return len(array) - 1
length = len(array)
i1 = 0
i2 = math.ceil(length / 2) - 1
i3 = length - 1
v1 = array[i1]
v2 = array[i2]
v3 = array[i3]
if v1 < v2 < v3:
return i2
if v1 < v3 < v2:
return i3
if v2 < v1 < v3:
return i1
if v2 < v3 < v1:
return i3
if v3 < v1 < v2:
return i1
if v3 < v2 < v1:
return i2
def quicksort(array):
global comparisons
# Base cases
if not array:
return list()
if len(array) == 1:
return array
if len(array) == 2:
comparisons += len(array) - 1
if array[0] < array[1]:
return array
else:
return [array[1],array[0]]
comparisons += len(array) - 1
# Partition index starts between array[0] and array[1]
# The subarray syntax for arrays in python can be thought of as 'up to but
# not including' when used as [:n], ex.
# a = [1,2,3,4,5]; a[:2] == [1,2]
# When picking elements to the left and right of the partition index:
# left = partition_index - 1
# right = partition_index
partition_index = 1
# choose_pivot returns the array index of the chosen pivot value
# ex. array = [1,2,3,4,5], if 3 is the chosen pivot, pivot_index will be 2,
# as a[2] == 3
pivot_index = choose_pivot(array)
# The coursera course assumes as a pre-processing step that the pivot is
# moved to the first element of the array. Swap in place with whatever is
# there
array[0], array[pivot_index] = array[pivot_index], array[0]
pivot = array[0]
# As the array is being modified as its being traversed, I don't think
# enumerate(array) or for value in array can be used. This may need some
# testing later to better understand pythons implicit use of values vs
# references.
for index in range(1,len(array)):
value = array[index]
if pivot < value:
continue
if pivot > value:
array[index], array[partition_index] = array[partition_index], array[index]
partition_index += 1
# Move the pivot into place
array[0], array[partition_index-1] = array[partition_index-1], array[0]
first_half = quicksort(array[:partition_index-1])
second_half = quicksort(array[partition_index:])
return list(itertools.chain(first_half, [pivot], second_half))
def main():
global comparisons
comparisons = 0
f = open("QuickSort.txt")
array = f.readlines()
array = [int(i) for i in array]
sorted_array = quicksort(array)
print(comparisons)
if __name__ == "__main__":
main()
| mit |
drbild/boto | boto/pyami/installers/ubuntu/ebs.py | 115 | 9938 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""
Automated installer to attach, format and mount an EBS volume.
This installer assumes that you want the volume formatted as
an XFS file system. To drive this installer, you need the
following section in the boto config passed to the new instance.
You also need to install dateutil by listing python-dateutil
in the list of packages to be installed in the Pyami seciont
of your boto config file.
If there is already a device mounted at the specified mount point,
the installer assumes that it is the ephemeral drive and unmounts
it, remounts it as /tmp and chmods it to 777.
Config file section::
[EBS]
volume_id = <the id of the EBS volume, should look like vol-xxxxxxxx>
logical_volume_name = <the name of the logical volume that contaings
a reference to the physical volume to be mounted. If this parameter
is supplied, it overrides the volume_id setting.>
device = <the linux device the EBS volume should be mounted on>
mount_point = <directory to mount device, defaults to /ebs>
"""
import boto
from boto.manage.volume import Volume
from boto.exception import EC2ResponseError
import os, time
from boto.pyami.installers.ubuntu.installer import Installer
from string import Template
BackupScriptTemplate = """#!/usr/bin/env python
# Backup EBS volume
import boto
from boto.pyami.scriptbase import ScriptBase
import traceback
class Backup(ScriptBase):
def main(self):
try:
ec2 = boto.connect_ec2()
self.run("/usr/sbin/xfs_freeze -f ${mount_point}", exit_on_error = True)
snapshot = ec2.create_snapshot('${volume_id}')
boto.log.info("Snapshot created: %s " % snapshot)
except Exception, e:
self.notify(subject="${instance_id} Backup Failed", body=traceback.format_exc())
boto.log.info("Snapshot created: ${volume_id}")
except Exception, e:
self.notify(subject="${instance_id} Backup Failed", body=traceback.format_exc())
finally:
self.run("/usr/sbin/xfs_freeze -u ${mount_point}")
if __name__ == "__main__":
b = Backup()
b.main()
"""
BackupCleanupScript= """#!/usr/bin/env python
import boto
from boto.manage.volume import Volume
# Cleans Backups of EBS volumes
for v in Volume.all():
v.trim_snapshots(True)
"""
TagBasedBackupCleanupScript= """#!/usr/bin/env python
import boto
# Cleans Backups of EBS volumes
ec2 = boto.connect_ec2()
ec2.trim_snapshots()
"""
class EBSInstaller(Installer):
"""
Set up the EBS stuff
"""
def __init__(self, config_file=None):
super(EBSInstaller, self).__init__(config_file)
self.instance_id = boto.config.get('Instance', 'instance-id')
self.device = boto.config.get('EBS', 'device', '/dev/sdp')
self.volume_id = boto.config.get('EBS', 'volume_id')
self.logical_volume_name = boto.config.get('EBS', 'logical_volume_name')
self.mount_point = boto.config.get('EBS', 'mount_point', '/ebs')
def attach(self):
ec2 = boto.connect_ec2()
if self.logical_volume_name:
# if a logical volume was specified, override the specified volume_id
# (if there was one) with the current AWS volume for the logical volume:
logical_volume = next(Volume.find(name=self.logical_volume_name))
self.volume_id = logical_volume._volume_id
volume = ec2.get_all_volumes([self.volume_id])[0]
# wait for the volume to be available. The volume may still be being created
# from a snapshot.
while volume.update() != 'available':
boto.log.info('Volume %s not yet available. Current status = %s.' % (volume.id, volume.status))
time.sleep(5)
instance = ec2.get_only_instances([self.instance_id])[0]
attempt_attach = True
while attempt_attach:
try:
ec2.attach_volume(self.volume_id, self.instance_id, self.device)
attempt_attach = False
except EC2ResponseError as e:
if e.error_code != 'IncorrectState':
# if there's an EC2ResonseError with the code set to IncorrectState, delay a bit for ec2
# to realize the instance is running, then try again. Otherwise, raise the error:
boto.log.info('Attempt to attach the EBS volume %s to this instance (%s) returned %s. Trying again in a bit.' % (self.volume_id, self.instance_id, e.errors))
time.sleep(2)
else:
raise e
boto.log.info('Attached volume %s to instance %s as device %s' % (self.volume_id, self.instance_id, self.device))
# now wait for the volume device to appear
while not os.path.exists(self.device):
boto.log.info('%s still does not exist, waiting 2 seconds' % self.device)
time.sleep(2)
def make_fs(self):
boto.log.info('make_fs...')
has_fs = self.run('fsck %s' % self.device)
if has_fs != 0:
self.run('mkfs -t xfs %s' % self.device)
def create_backup_script(self):
t = Template(BackupScriptTemplate)
s = t.substitute(volume_id=self.volume_id, instance_id=self.instance_id,
mount_point=self.mount_point)
fp = open('/usr/local/bin/ebs_backup', 'w')
fp.write(s)
fp.close()
self.run('chmod +x /usr/local/bin/ebs_backup')
def create_backup_cleanup_script(self, use_tag_based_cleanup=False):
fp = open('/usr/local/bin/ebs_backup_cleanup', 'w')
if use_tag_based_cleanup:
fp.write(TagBasedBackupCleanupScript)
else:
fp.write(BackupCleanupScript)
fp.close()
self.run('chmod +x /usr/local/bin/ebs_backup_cleanup')
def handle_mount_point(self):
boto.log.info('handle_mount_point')
if not os.path.isdir(self.mount_point):
boto.log.info('making directory')
# mount directory doesn't exist so create it
self.run("mkdir %s" % self.mount_point)
else:
boto.log.info('directory exists already')
self.run('mount -l')
lines = self.last_command.output.split('\n')
for line in lines:
t = line.split()
if t and t[2] == self.mount_point:
# something is already mounted at the mount point
# unmount that and mount it as /tmp
if t[0] != self.device:
self.run('umount %s' % self.mount_point)
self.run('mount %s /tmp' % t[0])
break
self.run('chmod 777 /tmp')
# Mount up our new EBS volume onto mount_point
self.run("mount %s %s" % (self.device, self.mount_point))
self.run('xfs_growfs %s' % self.mount_point)
def update_fstab(self):
f = open("/etc/fstab", "a")
f.write('%s\t%s\txfs\tdefaults 0 0\n' % (self.device, self.mount_point))
f.close()
def install(self):
# First, find and attach the volume
self.attach()
# Install the xfs tools
self.run('apt-get -y install xfsprogs xfsdump')
# Check to see if the filesystem was created or not
self.make_fs()
# create the /ebs directory for mounting
self.handle_mount_point()
# create the backup script
self.create_backup_script()
# Set up the backup script
minute = boto.config.get('EBS', 'backup_cron_minute', '0')
hour = boto.config.get('EBS', 'backup_cron_hour', '4,16')
self.add_cron("ebs_backup", "/usr/local/bin/ebs_backup", minute=minute, hour=hour)
# Set up the backup cleanup script
minute = boto.config.get('EBS', 'backup_cleanup_cron_minute')
hour = boto.config.get('EBS', 'backup_cleanup_cron_hour')
if (minute is not None) and (hour is not None):
# Snapshot clean up can either be done via the manage module, or via the new tag based
# snapshot code, if the snapshots have been tagged with the name of the associated
# volume. Check for the presence of the new configuration flag, and use the appropriate
# cleanup method / script:
use_tag_based_cleanup = boto.config.has_option('EBS', 'use_tag_based_snapshot_cleanup')
self.create_backup_cleanup_script(use_tag_based_cleanup)
self.add_cron("ebs_backup_cleanup", "/usr/local/bin/ebs_backup_cleanup", minute=minute, hour=hour)
# Set up the fstab
self.update_fstab()
def main(self):
if not os.path.exists(self.device):
self.install()
else:
boto.log.info("Device %s is already attached, skipping EBS Installer" % self.device)
| mit |
ramitalat/odoo | addons/account_payment/account_payment.py | 89 | 19099 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import time
from openerp.osv import fields, osv
_logger = logging.getLogger(__name__)
class payment_mode(osv.osv):
_name= 'payment.mode'
_description= 'Payment Mode'
_columns = {
'name': fields.char('Name', required=True, help='Mode of Payment'),
'bank_id': fields.many2one('res.partner.bank', "Bank account",
required=True,help='Bank Account for the Payment Mode'),
'journal': fields.many2one('account.journal', 'Journal', required=True,
domain=[('type', 'in', ('bank','cash'))], help='Bank or Cash Journal for the Payment Mode'),
'company_id': fields.many2one('res.company', 'Company',required=True),
'partner_id':fields.related('company_id','partner_id',type='many2one',relation='res.partner',string='Partner',store=True,),
}
_defaults = {
'company_id': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id
}
def suitable_bank_types(self, cr, uid, payment_code=None, context=None):
"""Return the codes of the bank type that are suitable
for the given payment type code"""
if not payment_code:
return []
cr.execute(""" SELECT pb.state
FROM res_partner_bank pb
JOIN payment_mode pm ON (pm.bank_id = pb.id)
WHERE pm.id = %s """, [payment_code])
return [x[0] for x in cr.fetchall()]
def onchange_company_id (self, cr, uid, ids, company_id=False, context=None):
result = {}
if company_id:
partner_id = self.pool.get('res.company').browse(cr, uid, company_id, context=context).partner_id.id
result['partner_id'] = partner_id
return {'value': result}
class payment_order(osv.osv):
_name = 'payment.order'
_description = 'Payment Order'
_rec_name = 'reference'
_order = 'id desc'
#dead code
def get_wizard(self, type):
_logger.warning("No wizard found for the payment type '%s'.", type)
return None
def _total(self, cursor, user, ids, name, args, context=None):
if not ids:
return {}
res = {}
for order in self.browse(cursor, user, ids, context=context):
if order.line_ids:
res[order.id] = reduce(lambda x, y: x + y.amount, order.line_ids, 0.0)
else:
res[order.id] = 0.0
return res
_columns = {
'date_scheduled': fields.date('Scheduled Date', states={'done':[('readonly', True)]}, help='Select a date if you have chosen Preferred Date to be fixed.'),
'reference': fields.char('Reference', required=1, states={'done': [('readonly', True)]}, copy=False),
'mode': fields.many2one('payment.mode', 'Payment Mode', select=True, required=1, states={'done': [('readonly', True)]}, help='Select the Payment Mode to be applied.'),
'state': fields.selection([
('draft', 'Draft'),
('cancel', 'Cancelled'),
('open', 'Confirmed'),
('done', 'Done')], 'Status', select=True, copy=False,
help='When an order is placed the status is \'Draft\'.\n Once the bank is confirmed the status is set to \'Confirmed\'.\n Then the order is paid the status is \'Done\'.'),
'line_ids': fields.one2many('payment.line', 'order_id', 'Payment lines', states={'done': [('readonly', True)]}),
'total': fields.function(_total, string="Total", type='float'),
'user_id': fields.many2one('res.users', 'Responsible', required=True, states={'done': [('readonly', True)]}),
'date_prefered': fields.selection([
('now', 'Directly'),
('due', 'Due date'),
('fixed', 'Fixed date')
], "Preferred Date", change_default=True, required=True, states={'done': [('readonly', True)]}, help="Choose an option for the Payment Order:'Fixed' stands for a date specified by you.'Directly' stands for the direct execution.'Due date' stands for the scheduled date of execution."),
'date_created': fields.date('Creation Date', readonly=True),
'date_done': fields.date('Execution Date', readonly=True),
'company_id': fields.related('mode', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
}
_defaults = {
'user_id': lambda self,cr,uid,context: uid,
'state': 'draft',
'date_prefered': 'due',
'date_created': lambda *a: time.strftime('%Y-%m-%d'),
'reference': lambda self,cr,uid,context: self.pool.get('ir.sequence').get(cr, uid, 'payment.order'),
}
def set_to_draft(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {'state': 'draft'})
self.create_workflow(cr, uid, ids)
return True
def action_open(self, cr, uid, ids, *args):
ir_seq_obj = self.pool.get('ir.sequence')
for order in self.read(cr, uid, ids, ['reference']):
if not order['reference']:
reference = ir_seq_obj.get(cr, uid, 'payment.order')
self.write(cr, uid, order['id'], {'reference':reference})
return True
def set_done(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {'date_done': time.strftime('%Y-%m-%d')})
self.signal_workflow(cr, uid, ids, 'done')
return True
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
payment_line_obj = self.pool.get('payment.line')
payment_line_ids = []
if (vals.get('date_prefered', False) == 'fixed' and not vals.get('date_scheduled', False)) or vals.get('date_scheduled', False):
for order in self.browse(cr, uid, ids, context=context):
for line in order.line_ids:
payment_line_ids.append(line.id)
payment_line_obj.write(cr, uid, payment_line_ids, {'date': vals.get('date_scheduled', False)}, context=context)
elif vals.get('date_prefered', False) == 'due':
vals.update({'date_scheduled': False})
for order in self.browse(cr, uid, ids, context=context):
for line in order.line_ids:
payment_line_obj.write(cr, uid, [line.id], {'date': line.ml_maturity_date}, context=context)
elif vals.get('date_prefered', False) == 'now':
vals.update({'date_scheduled': False})
for order in self.browse(cr, uid, ids, context=context):
for line in order.line_ids:
payment_line_ids.append(line.id)
payment_line_obj.write(cr, uid, payment_line_ids, {'date': False}, context=context)
return super(payment_order, self).write(cr, uid, ids, vals, context=context)
class payment_line(osv.osv):
_name = 'payment.line'
_description = 'Payment Line'
def translate(self, orig):
return {
"due_date": "date_maturity",
"reference": "ref"}.get(orig, orig)
def _info_owner(self, cr, uid, ids, name=None, args=None, context=None):
result = {}
for line in self.browse(cr, uid, ids, context=context):
owner = line.order_id.mode.bank_id.partner_id
result[line.id] = self._get_info_partner(cr, uid, owner, context=context)
return result
def _get_info_partner(self,cr, uid, partner_record, context=None):
if not partner_record:
return False
st = partner_record.street or ''
st1 = partner_record.street2 or ''
zip = partner_record.zip or ''
city = partner_record.city or ''
zip_city = zip + ' ' + city
cntry = partner_record.country_id and partner_record.country_id.name or ''
return partner_record.name + "\n" + st + " " + st1 + "\n" + zip_city + "\n" +cntry
def _info_partner(self, cr, uid, ids, name=None, args=None, context=None):
result = {}
for line in self.browse(cr, uid, ids, context=context):
result[line.id] = False
if not line.partner_id:
break
result[line.id] = self._get_info_partner(cr, uid, line.partner_id, context=context)
return result
#dead code
def select_by_name(self, cr, uid, ids, name, args, context=None):
if not ids: return {}
partner_obj = self.pool.get('res.partner')
cr.execute("""SELECT pl.id, ml.%s
FROM account_move_line ml
INNER JOIN payment_line pl
ON (ml.id = pl.move_line_id)
WHERE pl.id IN %%s"""% self.translate(name),
(tuple(ids),))
res = dict(cr.fetchall())
if name == 'partner_id':
partner_name = {}
for p_id, p_name in partner_obj.name_get(cr, uid,
filter(lambda x:x and x != 0,res.values()), context=context):
partner_name[p_id] = p_name
for id in ids:
if id in res and partner_name:
res[id] = (res[id],partner_name[res[id]])
else:
res[id] = (False,False)
else:
for id in ids:
res.setdefault(id, (False, ""))
return res
def _amount(self, cursor, user, ids, name, args, context=None):
if not ids:
return {}
currency_obj = self.pool.get('res.currency')
if context is None:
context = {}
res = {}
for line in self.browse(cursor, user, ids, context=context):
ctx = context.copy()
ctx['date'] = line.order_id.date_done or time.strftime('%Y-%m-%d')
res[line.id] = currency_obj.compute(cursor, user, line.currency.id,
line.company_currency.id,
line.amount_currency, context=ctx)
return res
def _get_currency(self, cr, uid, context=None):
user_obj = self.pool.get('res.users')
currency_obj = self.pool.get('res.currency')
user = user_obj.browse(cr, uid, uid, context=context)
if user.company_id:
return user.company_id.currency_id.id
else:
return currency_obj.search(cr, uid, [('rate', '=', 1.0)])[0]
def _get_date(self, cr, uid, context=None):
if context is None:
context = {}
payment_order_obj = self.pool.get('payment.order')
date = False
if context.get('order_id') and context['order_id']:
order = payment_order_obj.browse(cr, uid, context['order_id'], context=context)
if order.date_prefered == 'fixed':
date = order.date_scheduled
else:
date = time.strftime('%Y-%m-%d')
return date
def _get_ml_inv_ref(self, cr, uid, ids, *a):
res = {}
for id in self.browse(cr, uid, ids):
res[id.id] = False
if id.move_line_id:
if id.move_line_id.invoice:
res[id.id] = id.move_line_id.invoice.id
return res
def _get_ml_maturity_date(self, cr, uid, ids, *a):
res = {}
for id in self.browse(cr, uid, ids):
if id.move_line_id:
res[id.id] = id.move_line_id.date_maturity
else:
res[id.id] = False
return res
def _get_ml_created_date(self, cr, uid, ids, *a):
res = {}
for id in self.browse(cr, uid, ids):
if id.move_line_id:
res[id.id] = id.move_line_id.date_created
else:
res[id.id] = False
return res
_columns = {
'name': fields.char('Your Reference', required=True),
'communication': fields.char('Communication', required=True, help="Used as the message between ordering customer and current company. Depicts 'What do you want to say to the recipient about this order ?'"),
'communication2': fields.char('Communication 2', help='The successor message of Communication.'),
'move_line_id': fields.many2one('account.move.line', 'Entry line', domain=[('reconcile_id', '=', False), ('account_id.type', '=', 'payable')], help='This Entry Line will be referred for the information of the ordering customer.'),
'amount_currency': fields.float('Amount in Partner Currency', digits=(16, 2),
required=True, help='Payment amount in the partner currency'),
'currency': fields.many2one('res.currency','Partner Currency', required=True),
'company_currency': fields.many2one('res.currency', 'Company Currency', readonly=True),
'bank_id': fields.many2one('res.partner.bank', 'Destination Bank Account'),
'order_id': fields.many2one('payment.order', 'Order', required=True,
ondelete='cascade', select=True),
'partner_id': fields.many2one('res.partner', string="Partner", required=True, help='The Ordering Customer'),
'amount': fields.function(_amount, string='Amount in Company Currency',
type='float',
help='Payment amount in the company currency'),
'ml_date_created': fields.function(_get_ml_created_date, string="Effective Date",
type='date', help="Invoice Effective Date"),
'ml_maturity_date': fields.function(_get_ml_maturity_date, type='date', string='Due Date'),
'ml_inv_ref': fields.function(_get_ml_inv_ref, type='many2one', relation='account.invoice', string='Invoice Ref.'),
'info_owner': fields.function(_info_owner, string="Owner Account", type="text", help='Address of the Main Partner'),
'info_partner': fields.function(_info_partner, string="Destination Account", type="text", help='Address of the Ordering Customer.'),
'date': fields.date('Payment Date', help="If no payment date is specified, the bank will treat this payment line directly"),
'create_date': fields.datetime('Created', readonly=True),
'state': fields.selection([('normal','Free'), ('structured','Structured')], 'Communication Type', required=True),
'bank_statement_line_id': fields.many2one('account.bank.statement.line', 'Bank statement line'),
'company_id': fields.related('order_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
}
_defaults = {
'name': lambda obj, cursor, user, context: obj.pool.get('ir.sequence'
).get(cursor, user, 'payment.line'),
'state': 'normal',
'currency': _get_currency,
'company_currency': _get_currency,
'date': _get_date,
}
_sql_constraints = [
('name_uniq', 'UNIQUE(name)', 'The payment line name must be unique!'),
]
def onchange_move_line(self, cr, uid, ids, move_line_id, payment_type, date_prefered, date_scheduled, currency=False, company_currency=False, context=None):
data = {}
move_line_obj = self.pool.get('account.move.line')
data['amount_currency'] = data['communication'] = data['partner_id'] = data['bank_id'] = data['amount'] = False
if move_line_id:
line = move_line_obj.browse(cr, uid, move_line_id, context=context)
data['amount_currency'] = line.amount_residual_currency
res = self.onchange_amount(cr, uid, ids, data['amount_currency'], currency,
company_currency, context)
if res:
data['amount'] = res['value']['amount']
data['partner_id'] = line.partner_id.id
temp = line.currency_id and line.currency_id.id or False
if not temp:
if line.invoice:
data['currency'] = line.invoice.currency_id.id
else:
data['currency'] = temp
# calling onchange of partner and updating data dictionary
temp_dict = self.onchange_partner(cr, uid, ids, line.partner_id.id, payment_type)
data.update(temp_dict['value'])
data['communication'] = line.ref
if date_prefered == 'now':
#no payment date => immediate payment
data['date'] = False
elif date_prefered == 'due':
data['date'] = line.date_maturity
elif date_prefered == 'fixed':
data['date'] = date_scheduled
return {'value': data}
def onchange_amount(self, cr, uid, ids, amount, currency, cmpny_currency, context=None):
if (not amount) or (not cmpny_currency):
return {'value': {'amount': False}}
res = {}
currency_obj = self.pool.get('res.currency')
company_amount = currency_obj.compute(cr, uid, currency, cmpny_currency, amount)
res['amount'] = company_amount
return {'value': res}
def onchange_partner(self, cr, uid, ids, partner_id, payment_type, context=None):
data = {}
partner_obj = self.pool.get('res.partner')
payment_mode_obj = self.pool.get('payment.mode')
data['info_partner'] = data['bank_id'] = False
if partner_id:
part_obj = partner_obj.browse(cr, uid, partner_id, context=context)
partner = part_obj.name or ''
data['info_partner'] = self._get_info_partner(cr, uid, part_obj, context=context)
if part_obj.bank_ids and payment_type:
bank_type = payment_mode_obj.suitable_bank_types(cr, uid, payment_type, context=context)
for bank in part_obj.bank_ids:
if bank.state in bank_type:
data['bank_id'] = bank.id
break
return {'value': data}
def fields_get(self, cr, uid, fields=None, context=None):
res = super(payment_line, self).fields_get(cr, uid, fields, context)
if 'communication2' in res:
res['communication2'].setdefault('states', {})
res['communication2']['states']['structured'] = [('readonly', True)]
res['communication2']['states']['normal'] = [('readonly', False)]
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sumanthha/fundafriend | django/utils/unittest/result.py | 570 | 6105 | """Test result object"""
import sys
import traceback
import unittest
from StringIO import StringIO
from django.utils.unittest import util
from django.utils.unittest.compatibility import wraps
__unittest = True
def failfast(method):
@wraps(method)
def inner(self, *args, **kw):
if getattr(self, 'failfast', False):
self.stop()
return method(self, *args, **kw)
return inner
STDOUT_LINE = '\nStdout:\n%s'
STDERR_LINE = '\nStderr:\n%s'
class TestResult(unittest.TestResult):
"""Holder for test result information.
Test results are automatically managed by the TestCase and TestSuite
classes, and do not need to be explicitly manipulated by writers of tests.
Each instance holds the total number of tests run, and collections of
failures and errors that occurred among those test runs. The collections
contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
formatted traceback of the error that occurred.
"""
_previousTestClass = None
_moduleSetUpFailed = False
def __init__(self):
self.failfast = False
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
self.buffer = False
self._stdout_buffer = None
self._stderr_buffer = None
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
self._mirrorOutput = False
def startTest(self, test):
"Called when the given test is about to be run"
self.testsRun += 1
self._mirrorOutput = False
if self.buffer:
if self._stderr_buffer is None:
self._stderr_buffer = StringIO()
self._stdout_buffer = StringIO()
sys.stdout = self._stdout_buffer
sys.stderr = self._stderr_buffer
def startTestRun(self):
"""Called once before any tests are executed.
See startTest for a method called before each test.
"""
def stopTest(self, test):
"""Called when the given test has been run"""
if self.buffer:
if self._mirrorOutput:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
self._original_stdout.write(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
self._original_stderr.write(STDERR_LINE % error)
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
self._stdout_buffer.seek(0)
self._stdout_buffer.truncate()
self._stderr_buffer.seek(0)
self._stderr_buffer.truncate()
self._mirrorOutput = False
def stopTestRun(self):
"""Called once after all tests are executed.
See stopTest for a method called after each test.
"""
@failfast
def addError(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.errors.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
@failfast
def addFailure(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info()."""
self.failures.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
def addSuccess(self, test):
"Called when a test has completed successfully"
pass
def addSkip(self, test, reason):
"""Called when a test is skipped."""
self.skipped.append((test, reason))
def addExpectedFailure(self, test, err):
"""Called when an expected failure/error occured."""
self.expectedFailures.append(
(test, self._exc_info_to_string(err, test)))
@failfast
def addUnexpectedSuccess(self, test):
"""Called when a test was expected to fail, but succeed."""
self.unexpectedSuccesses.append(test)
def wasSuccessful(self):
"Tells whether or not this result was a success"
return (len(self.failures) + len(self.errors) == 0)
def stop(self):
"Indicates that the tests should be aborted"
self.shouldStop = True
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string."""
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
msgLines = traceback.format_exception(exctype, value, tb, length)
else:
msgLines = traceback.format_exception(exctype, value, tb)
if self.buffer:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
msgLines.append(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
msgLines.append(STDERR_LINE % error)
return ''.join(msgLines)
def _is_relevant_tb_level(self, tb):
return '__unittest' in tb.tb_frame.f_globals
def _count_relevant_tb_levels(self, tb):
length = 0
while tb and not self._is_relevant_tb_level(tb):
length += 1
tb = tb.tb_next
return length
def __repr__(self):
return "<%s run=%i errors=%i failures=%i>" % \
(util.strclass(self.__class__), self.testsRun, len(self.errors),
len(self.failures))
| bsd-3-clause |
sotdjin/glibglab | venv/lib/python2.7/site-packages/flask_restful/utils/cors.py | 42 | 2084 | from datetime import timedelta
from flask import make_response, request, current_app
from functools import update_wrapper
def crossdomain(origin=None, methods=None, headers=None, expose_headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True, credentials=False):
"""
http://flask.pocoo.org/snippets/56/
"""
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, str):
headers = ', '.join(x.upper() for x in headers)
if expose_headers is not None and not isinstance(expose_headers, str):
expose_headers = ', '.join(x.upper() for x in expose_headers)
if not isinstance(origin, str):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if credentials:
h['Access-Control-Allow-Credentials'] = 'true'
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
if expose_headers is not None:
h['Access-Control-Expose-Headers'] = expose_headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
| mit |
cfelton/minnesota | mn/cores/sdram/_sdram_sdr.py | 1 | 2470 |
from myhdl import *
from ...system import Clock
from _intf import SDRAM
def m_sdram(clock, reset, ibus, extram, refresh=True):
""" SDRAM controller
This module is an SDRAM controller to interface and control
SDRAM modules. This module contains a state-machine that
controls the different SDRAM modes including refresh etc.
This module provides the translation from a flat memory-mapped
bus (e.g. Wishbone, Avalon, AXI, etc.) to the SDRAM interface.
Also intended to support memory-mapped and streaming (FIFO
mode). In streaming / FIFO mode the external memory acts as
one large FIFO. The SDRAM controller type is determine by
the internal interface (ibus) passed.
This SDRAM controller is a port of the Xess SDRAM controller
for the Xula boards.
https://github.com/xesscorp/XuLA/blob/master/FPGA/XuLA_lib/SdramCntl.vhd
"""
States = enum('INITWAIT', 'INITPCHG', 'INITSETMODE', 'INITRFSH',
'RW', 'ACTIVATE', 'REFRESHROW', 'SELFREFRESH')
Commands = enum('nop', 'active', 'read', 'write',
'pchg', 'mode', 'rfsh',
encoding='binary')
cmdlut = (intbv('011100')[5:],
intbv('001100')[5:],
intbv('010100')[5:],
intbv('010000')[5:],
intbv('001000')[5:],
intbv('000000')[5:],
intbv('000100')[5:])
sdram = extram
sdram.cmd = Signal(intbv(0)[5:])
timer = Signal(intbv(0, min=0, max=sdram.cyc_init))
ras_timer = Signal(intbv(0, min=0, max=sdram.cyc_ras))
wr_timer = Signal(intbv(0, min=0, max=sdram.cyc_wr))
state = Signal(States.INITWAIT)
@always_seq(clock.posedge, reset=reset)
def rtl_sdram_controller():
# this is one big state-machine but ...
if state == States.INITWAIT:
if sdram.lock:
timer.next = sdram.cyc_init
state.next = States.initpchg
else:
sdram.sd_cke.next = False
sdram.status.next = 1
elif state == States.INITPCHG:
sdram.cmd.next = Commands.PCHG
sdram.addr[CMDBITS] = Commands.ALL_BANKS
timer.next = sdram.cycles
return rtl_sdram_controller
# default portmap
clock = Clock(0, frequency=100e6)
m_sdram.portmap = {
'clock': clock,
'reset': ResetSignal(0, active=0, async=False),
'ibus': None,
'extmem': SDRAM(clock)
}
| gpl-3.0 |
LePastis/pyload | module/plugins/hoster/PornhubCom.py | 13 | 2476 | # -*- coding: utf-8 -*-
import re
from module.plugins.internal.Hoster import Hoster
class PornhubCom(Hoster):
__name__ = "PornhubCom"
__type__ = "hoster"
__version__ = "0.52"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?pornhub\.com/view_video\.php\?viewkey=\w+'
__description__ = """Pornhub.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("jeix", "jeix@hasnomail.de")]
def process(self, pyfile):
self.download_html()
if not self.file_exists():
self.offline()
pyfile.name = self.get_file_name()
self.download(self.get_file_url())
def download_html(self):
url = self.pyfile.url
self.html = self.load(url)
def get_file_url(self):
"""
Returns the absolute downloadable filepath
"""
if not self.html:
self.download_html()
url = "http://www.pornhub.com//gateway.php"
video_id = self.pyfile.url.split('=')[-1]
#: Thanks to jD team for this one v
post_data = "\x00\x03\x00\x00\x00\x01\x00\x0c\x70\x6c\x61\x79\x65\x72\x43\x6f\x6e\x66\x69\x67\x00\x02\x2f\x31\x00\x00\x00\x44\x0a\x00\x00\x00\x03\x02\x00"
post_data += chr(len(video_id))
post_data += video_id
post_data += "\x02\x00\x02\x2d\x31\x02\x00\x20"
post_data += "add299463d4410c6d1b1c418868225f7"
content = self.load(url, post=str(post_data))
new_content = ""
for x in content:
if ord(x) < 32 or ord(x) > 176:
new_content += '#'
else:
new_content += x
content = new_content
return re.search(r'flv_url.*(http.*?)##post_roll', content).group(1)
def get_file_name(self):
if not self.html:
self.download_html()
m = re.search(r'<title.+?>([^<]+) - ', self.html)
if m:
name = m.group(1)
else:
matches = re.findall('<h1>(.*?)</h1>', self.html)
if len(matches) > 1:
name = matches[1]
else:
name = matches[0]
return name + '.flv'
def file_exists(self):
"""
Returns True or False
"""
if not self.html:
self.download_html()
if re.search(r'This video is no longer in our database or is in conversion', self.html):
return False
else:
return True
| gpl-3.0 |
adobecs5/urp2015 | lib/python3.4/site-packages/setuptools/tests/environment.py | 151 | 1611 | import os
import sys
import unicodedata
from subprocess import Popen as _Popen, PIPE as _PIPE
def _which_dirs(cmd):
result = set()
for path in os.environ.get('PATH', '').split(os.pathsep):
filename = os.path.join(path, cmd)
if os.access(filename, os.X_OK):
result.add(path)
return result
def run_setup_py(cmd, pypath=None, path=None,
data_stream=0, env=None):
"""
Execution command for tests, separate from those used by the
code directly to prevent accidental behavior issues
"""
if env is None:
env = dict()
for envname in os.environ:
env[envname] = os.environ[envname]
#override the python path if needed
if pypath is not None:
env["PYTHONPATH"] = pypath
#overide the execution path if needed
if path is not None:
env["PATH"] = path
if not env.get("PATH", ""):
env["PATH"] = _which_dirs("tar").union(_which_dirs("gzip"))
env["PATH"] = os.pathsep.join(env["PATH"])
cmd = [sys.executable, "setup.py"] + list(cmd)
# http://bugs.python.org/issue8557
shell = sys.platform == 'win32'
try:
proc = _Popen(
cmd, stdout=_PIPE, stderr=_PIPE, shell=shell, env=env,
)
data = proc.communicate()[data_stream]
except OSError:
return 1, ''
#decode the console string if needed
if hasattr(data, "decode"):
# use the default encoding
data = data.decode()
data = unicodedata.normalize('NFC', data)
#communciate calls wait()
return proc.returncode, data
| apache-2.0 |
ayepezv/GAD_ERP | addons/website_customer/controllers/main.py | 1 | 4619 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import werkzeug.urls
from odoo import http
from odoo.addons.website.models.website import unslug
from odoo.tools.translate import _
from odoo.http import request
class WebsiteCustomer(http.Controller):
_references_per_page = 20
@http.route([
'/customers',
'/customers/page/<int:page>',
'/customers/country/<int:country_id>',
'/customers/country/<country_name>-<int:country_id>',
'/customers/country/<int:country_id>/page/<int:page>',
'/customers/country/<country_name>-<int:country_id>/page/<int:page>',
'/customers/tag/<tag_id>',
'/customers/tag/<tag_id>/page/<int:page>',
'/customers/tag/<tag_id>/country/<int:country_id>',
'/customers/tag/<tag_id>/country/<country_name>-<int:country_id>',
'/customers/tag/<tag_id>/country/<int:country_id>/page/<int:page>',
'/customers/tag/<tag_id>/country/<country_name>-<int:country_id>/page/<int:page>',
], type='http', auth="public", website=True)
def customers(self, country_id=0, page=0, country_name='', tag_id=0, **post):
Country = request.env['res.country']
Tag = request.env['res.partner.tag']
Partner = request.env['res.partner']
partner_name = post.get('search', '')
domain = [('website_published', '=', True), ('assigned_partner_id', '!=', False)]
if partner_name:
domain += [
'|',
('name', 'ilike', post.get("search")),
('website_description', 'ilike', post.get("search"))
]
if tag_id:
tag_id = unslug(tag_id)[1] or 0
domain += [('tag_ids', 'in', tag_id)]
# group by country, based on customers found with the search(domain)
countries = Partner.sudo().read_group(domain, ["id", "country_id"], groupby="country_id", orderby="country_id")
country_count = Partner.sudo().search_count(domain)
if country_id:
domain += [('country_id', '=', country_id)]
if not any(x['country_id'][0] == country_id for x in countries if x['country_id']):
country = Country.browse(country_id).read(['name'])
if country:
countries.append({
'country_id_count': 0,
'country_id': (country_id, country['name'])
})
countries.sort(key=lambda d: d['country_id'] and d['country_id'][1])
curr_country = Country.browse(country_id)
countries.insert(0, {
'country_id_count': country_count,
'country_id': (0, _("All Countries"))
})
# search customers to display
partner_count = Partner.sudo().search_count(domain)
# pager
url = '/customers'
if country_id:
url += '/country/%s' % country_id
pager = request.website.pager(
url=url, total=partner_count, page=page, step=self._references_per_page,
scope=7, url_args=post
)
partners = Partner.sudo().search(domain, offset=pager['offset'], limit=self._references_per_page)
google_map_partner_ids = ','.join(map(str, partners.ids))
tags = Tag.search([('website_published', '=', True), ('partner_ids', 'in', partners.ids)], order='classname, name ASC')
tag = tag_id and Tag.browse(tag_id) or False
values = {
'countries': countries,
'current_country_id': country_id or 0,
'current_country': curr_country if country_id else False,
'partners': partners,
'google_map_partner_ids': google_map_partner_ids,
'pager': pager,
'post': post,
'search_path': "?%s" % werkzeug.url_encode(post),
'tag': tag,
'tags': tags,
}
return request.render("website_customer.index", values)
# Do not use semantic controller due to SUPERUSER_ID
@http.route(['/customers/<partner_id>'], type='http', auth="public", website=True)
def partners_detail(self, partner_id, **post):
_, partner_id = unslug(partner_id)
if partner_id:
partner = request.env['res.partner'].sudo().browse(partner_id)
if partner.exists() and partner.website_published:
values = {}
values['main_object'] = values['partner'] = partner
return request.render("website_customer.details", values)
return self.customers(**post)
| gpl-3.0 |
peraktong/Cannon-Experiment | AnniesLasso_2/cannon.py | 1 | 38188 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A pedestrian version of The Cannon.
"""
from __future__ import (division, print_function, absolute_import,
unicode_literals)
__all__ = ["CannonModel"]
import logging
from numpy.linalg import inv
import numpy as np
import scipy.optimize as op
import os
from scipy.ndimage import gaussian_filter
# by Jason
# try to use gnumpy
#import gnumpy as gpu
import math
import pickle
def log10(x):
return math.log10(x)
# sinc interpolation
def sinc_interp(x, s, u):
"""
Interpolates x, sampled at "s" instants
Output y is sampled at "u" instants ("u" for "upsampled")
from Matlab:
http://phaseportrait.blogspot.com/2008/06/sinc-interpolation-in-matlab.html
"""
if len(x) != len(s):
print("len(x) should be equal to len(s")
# Find the period
T = s[1] - s[0]
sincM = np.tile(u, (len(s), 1)) - np.tile(s[:, np.newaxis], (1, len(u)))
y = np.dot(x, np.sinc(sincM / T))
return y
from . import (model, utils)
logger = logging.getLogger(__name__)
class CannonModel(model.BaseCannonModel):
"""
A generalised Cannon model for the estimation of arbitrary stellar labels.
:param labelled_set:
A set of labelled objects. The most common input form is a table with
columns as labels, and stars/objects as rows.
:type labelled_set:
:class:`~astropy.table.Table` or a numpy structured array
:param normalized_flux:
An array of normalized fluxes for stars in the labelled set, given as
shape `(num_stars, num_pixels)`. The `num_stars` should match the number
of rows in `labelled_set`.
:type normalized_flux:
:class:`np.ndarray`
:param normalized_ivar:
An array of inverse variances on the normalized fluxes for stars in the
labelled set. The shape of the `normalized_ivar` array should match that
of `normalized_flux`.
:type normalized_ivar:
:class:`np.ndarray`
:param dispersion: [optional]
The dispersion values corresponding to the given pixels. If provided,
this should have length `num_pixels`.
:type dispersion:
:class:`np.array`
:param threads: [optional]
Specify the number of parallel threads to use. If `threads > 1`, the
training and prediction phases will be automagically parallelised.
:type threads:
int
:param pool: [optional]
Specify an optional multiprocessing pool to map jobs onto.
This argument is only used if specified and if `threads > 1`.
:type pool:
bool
"""
def __init__(self, *args, **kwargs):
super(CannonModel, self).__init__(*args, **kwargs)
@model.requires_model_description
def train(self, fixed_scatter=True, **kwargs):
"""
Train the model based on the labelled set.
"""
# Experimental/asthetic keywords:
# use_neighbouring_pixels, progressbar
assert fixed_scatter, "Are you refactoring?"
if self.s2 is None:
logger.warn("Fixing and assuming s2 = 0")
self.s2 = 0
if fixed_scatter and self.s2 is None:
raise ValueError("intrinsic pixel variance (s2) must be set "
"before training if fixed_scatter is set to True")
# We default use_neighbouring_pixels to None so that we can default it
# to True later if we want, but we can warn the user if they explicitly
# set it to True and we intend to ignore it.
use_neighbouring_pixels = kwargs.pop("use_neighbouring_pixels", None)
if self.theta is None:
if use_neighbouring_pixels is None:
use_neighbouring_pixels = True
initial_theta = [None] * self.dispersion.size
else:
# Since theta is already set, we will ignore neighbouring pixels.
if use_neighbouring_pixels is True:
use_neighbouring_pixels = False
logger.warn("Ignoring neighbouring pixels because theta is "
"already provided.")
initial_theta = self.theta.copy()
# Initialize the scatter.
initial_s2 = self.s2 if fixed_scatter \
else 0.01**2 * np.ones_like(self.dispersion)
# Prepare the method and arguments.
fitting_function = kwargs.pop("function", _fit_pixel)
kwds = {
"fixed_scatter": fixed_scatter,
"design_matrix": self.design_matrix,
"op_kwargs": kwargs.pop("op_kwargs", {}),
"op_bfgs_kwargs": kwargs.pop("op_bfgs_kwargs", {})
}
N_stars, N_pixels = self.normalized_flux.shape
logger.info("Training {0}-label {1} with {2} stars and {3} pixels/star"\
.format(len(self.vectorizer.label_names), type(self).__name__,
N_stars, N_pixels))
# Arguments:
# initial_theta, initial_s2, flux, ivar, design_matrix_mask,
# [additional_args],
# design_matrix, **kwargs
args = [initial_theta, initial_s2, self.normalized_flux.T,
self.normalized_ivar.T, self.censored_vectorizer_terms]
args.extend(kwargs.get("additional_args", []))
# Write the design matrix to a temporary file.
temporary_filenames = []
"""
Not clear whether this is still needed because this issue was
complicated by some Legacy Python issues. But I'm not ready to remove
this comment because I have forgotten about this issue twice before in
the past and it ruined my day.
temporary_filename = utils._pack_value(self.design_matrix)
kwds["design_matrix"] = temporary_filename
temporary_filenames.append(temporary_filename)
"""
N_items = N_pixels if kwargs.get("progressbar", True) else 0
# Wrap the function so we can parallelize it out.
mapper = map if self.pool is None else self.pool.map
try:
f = utils.wrapper(fitting_function, None, kwds, N_items)
if self.pool is None and use_neighbouring_pixels:
output = []
last_theta = []
for j, row in enumerate(zip(*args)):
if j > 0:
# Update with determined theta from neighbour pixel.
row = list(row)
row[0] = last_theta
row = tuple(row)
output.append(f(row))
last_theta = output[-1][0][:self.design_matrix.shape[1]]
else:
output = mapper(f, [row for row in zip(*args)])
except KeyboardInterrupt:
logger.warn("Keyboard interrupted training step!")
# Clean up any temporary files in case we are debugging.
for filename in temporary_filenames:
if os.path.exists(filename): os.remove(filename)
# re-raise a suppressed exception?
raise
# Clean up any temporary files.
for filename in temporary_filenames:
if os.path.exists(filename): os.remove(filename)
# Unpack the results.
results, metadata = zip(*output)
results = np.array(results)
self.theta, self.s2 = (results[:, :-1], results[:, -1])
return None
@model.requires_training_wheels
def predict(self, labels, **kwargs):
"""
Predict spectra from the trained model, given the labels.
:param labels:
The label values to predict model spectra of. The length and order
should match what is required of the vectorizer
(`CannonModel.vectorizer.label_names`).
"""
return np.dot(self.theta, self.vectorizer(labels).T).T
@model.requires_training_wheels
# By Jason Cao
def fitting_spectrum_parameters(self,normalized_flux,normalized_ivar,inf_flux):
nor = normalized_flux
inf = inf_flux
ivar = normalized_ivar
n_pixel = nor[0, :].size
n_star = inf[:, 0].size
one = np.ones(n_star)
# new method for building matrix
x_data = np.c_[one,inf]
x_data = x_data[:,0:n_pixel]
y_data =inf
z_data = np.c_[inf,one]
z_data = z_data[:,1:n_pixel+1]
# fit
# It's not good. let's do it one star each time.
left = np.zeros((3,3))
right = np.zeros(3)
for p in range(0, n_star):
x_data_p = x_data[p, :]
y_data_p = y_data[p, :]
z_data_p = z_data[p, :]
nor_p = nor[p, :]
ivar_p = ivar[p, :]
# construct
ivar_r = ivar_p.ravel()
ni = len(ivar_r)
print("calculating parameters",p,"{:.2f}%".format(p/n_star*100))
c = np.zeros((ni, ni))
for i in range(0, ni):
c[i, i] = ivar_r[i]
y = nor_p.ravel()
a = np.c_[np.c_[x_data_p.ravel(), y_data_p.ravel()], z_data_p.ravel()]
left += np.dot(np.dot(a.T, c), a)
right += np.dot(np.dot(a.T,c), y)
parameters = np.dot(inv(left), right)
opt_flux = parameters[0]*x_data+parameters[1]*y_data+parameters[2]*z_data
print("finish fitting")
# build theta:
zero = np.ones(n_pixel)
theta_x = np.c_[zero,self.theta]
theta_x = x_data[:,0:n_pixel]
theta_y =inf
theta_z = np.c_[self.theta,zero]
theta_z = z_data[:,1:n_pixel+1]
theta_opt = parameters[0]*theta_x+parameters[1]*theta_y+parameters[2]*theta_z
return opt_flux,theta_opt,parameters
# return the parameters of each star.
# Now the uncertainty of parameters is also calculated
# The structure of the uncertainty is each row is aa,ab,ac ba....
# so the dimension is 3*3*N, which is a 3 dimension array
# use self.uncertainty to store
# Now the model
def fitting_spectrum_parameters_single(self,normalized_flux,normalized_ivar,inf_flux):
nor = normalized_flux
inf = inf_flux
ivar = normalized_ivar
n_pixel = nor[0, :].size
n_star = inf[:, 0].size
one = np.ones(n_star)
# new method for building matrix
z_data = np.c_[one,inf]
z_data = z_data[:,0:n_pixel]
y_data =inf
x_data = np.c_[inf,one]
x_data = x_data[:,1:n_pixel+1]
self.x_data =x_data
self.y_data =y_data
self.z_data =z_data
# fit
# It's not good. let's do it one star each time.
left = np.zeros((3,3))
right = np.zeros(3)
un = np.zeros((3,3))
parameters=np.array([0,1,0])
opt_flux = np.ones(n_pixel)
for p in range(0, n_star):
x_data_p = x_data[p, :]
y_data_p = y_data[p, :]
z_data_p = z_data[p, :]
nor_p = nor[p, :]
ivar_p = ivar[p, :]
# construct
ivar_r = ivar_p.ravel()
ni = len(ivar_r)
print("calculating parameters",p,"{:.2f}%".format(p/n_star*100))
c = np.zeros((ni, ni))
for i in range(0, ni):
c[i, i] = ivar_r[i]
y = nor_p.ravel()
a = np.c_[np.c_[x_data_p.ravel(), y_data_p.ravel()], z_data_p.ravel()]
left = np.dot(np.dot(a.T, c), a)
right = np.dot(np.dot(a.T,c), y)
un_p = inv(left)
parameters_p =np.dot(inv(left), right)
opt_flux = np.vstack((opt_flux,parameters_p[0]*x_data_p+parameters_p[1]*y_data_p+parameters_p[2]*z_data_p))
parameters = np.vstack((parameters,parameters_p))
print(parameters_p)
un = np.dstack((un,un_p))
print("finish fitting")
# reshape
parameters = parameters[1:(n_star+1),:]
opt_flux = opt_flux[1:(n_star + 1), :]
# new method for calculating velocity uncertainty:
gamma = un[:,:,1:(n_star + 1)]
# calculate for each star
N_star = len(parameters[:,0])
RV_un = []
for i in range(0,N_star):
a = parameters[i,0]
b = parameters[i,1]
c = parameters[i,2]
J = np.array([-(2.*c+b)/(a+b+c)**2., (a-c)/(a+b+c)**2. , (2.*a+b)/(a+b+c)**2.])
gamma_i = gamma[:,:,i]
RV_un.append(4144.68*(np.dot(np.dot(J,gamma_i),J.T))**0.5)
RV_un = np.array(RV_un)
self.uncertainty = RV_un
self.opt_flux = opt_flux
# the shape of the uncertainty is 3*3*N
print(parameters.shape,n_star,opt_flux.shape,RV_un.shape)
return opt_flux,parameters
def fitting_spectrum_parameters_single_5(self,normalized_flux,normalized_ivar,inf_flux):
nor = normalized_flux
inf = inf_flux
ivar = normalized_ivar
n_pixel = nor[0, :].size
n_star = inf[:, 0].size
one = np.ones(n_star)
# new method for building matrix
l_1 = np.c_[one,inf]
l_1 = l_1[:,0:n_pixel]
l_2 = np.c_[one,l_1]
l_2 = l_2[:, 0:n_pixel]
m_0 =inf
r_1 = np.c_[inf,one]
r_1 = r_1[:,1:n_pixel+1]
r_2 = np.c_[r_1,one]
r_2 = r_2[:, 1:n_pixel + 1]
# fit
# It's not good. let's do it one star each time.
un = np.zeros((5,5))
parameters=np.array([0,0,0,0,0])
opt_flux = np.ones(n_pixel)
for p in range(0, n_star):
l_2_p =l_2[p,:]
l_1_p = l_1[p, :]
m_0_p = m_0[p, :]
r_1_p = r_1[p, :]
r_2_p = r_2[p, :]
nor_p = nor[p, :]
ivar_p = ivar[p, :]
# construct
ivar_r = ivar_p.ravel()
ni = len(ivar_r)
print("calculating parameters_5",p,"{:.2f}%".format(p/n_star*100))
c = np.zeros((ni, ni))
for i in range(0, ni):
c[i, i] = ivar_r[i]
y = nor_p.ravel()
a = np.c_[np.c_[l_2_p.ravel(), l_1_p.ravel()], m_0_p.ravel()]
a = np.c_[a,r_1_p.ravel()]
a = np.c_[a,r_2_p.ravel()]
left = np.dot(np.dot(a.T, c), a)
right = np.dot(np.dot(a.T,c), y)
un_p = inv(left)
parameters_p =np.dot(inv(left), right)
opt_flux = np.vstack((opt_flux,parameters_p[0]*l_2_p+parameters_p[1]*l_1_p+parameters_p[2]*m_0_p
+parameters_p[3]*r_1_p+parameters_p[4]*r_2_p))
parameters = np.vstack((parameters,np.dot(inv(left), right)))
un = np.dstack((un,un_p))
print("finish fitting 5 parameters")
# reshape
parameters = parameters[1:(n_star+1),:]
opt_flux = opt_flux[1:(n_star + 1), :]
un = un[:,:,1:(n_star + 1)]
self.uncertainty = un
self.opt_flux = opt_flux
# the shape of the uncertainty is 3*3*N
print(parameters.shape,n_star,opt_flux.shape,un.shape)
return opt_flux,parameters
##
# CUDA version fitting parameters
# This is a optimized version of your module.
# use gnumpy, use CUDA
def fitting_spectrum_para_a0b0c0(self,wl,nor,ivar,inf):
# use sinc interpolation
# move half pixel
wl_log = list(map(log10, wl))
wl_log = np.array(wl_log)
n_pixel = nor[0, :].size
n_star = inf[:, 0].size
one = np.ones(n_star)
# new method for building matrix
# move half pixel
# sinc interpolation
x_data = []
y_data = []
z_data = []
y_data =inf
ones = np.ones(len(wl))
# x moves left and z moves right
wl_log_x = wl_log + (-0.5) * (6 * 10 ** (-6)) * ones
wl_log_z = wl_log + (0.5) * (6 * 10 ** (-6)) * ones
for i in range(0,n_star):
print("Doing sinc interpolation on star %d of %d"%(i+1,n_star))
x_data_i = sinc_interp(inf[i,:],wl_log_x,wl_log)
z_data_i = sinc_interp(inf[i,:],wl_log_z,wl_log)
x_data.append(x_data_i)
z_data.append(z_data_i)
x_data = np.array(x_data)
z_data = np.array(z_data)
self.x_data = x_data
self.y_data = y_data
self.z_data = z_data
# fit
# It's not good. let's do it one star each time.
left = np.zeros((3, 3))
right = np.zeros(3)
un = np.zeros((3, 3))
parameters = np.array([0, 1, 0])
opt_flux = np.ones(n_pixel)
for p in range(0, n_star):
x_data_p = x_data[p, :]
y_data_p = y_data[p, :]
z_data_p = z_data[p, :]
nor_p = nor[p, :]
ivar_p = ivar[p, :]
# construct
ivar_r = ivar_p.ravel()
ni = len(ivar_r)
print("calculating parameters", p, "{:.2f}%".format(p / n_star * 100))
c = np.zeros((ni, ni))
for i in range(0, ni):
c[i, i] = ivar_r[i]
y = nor_p.ravel()
a = np.c_[np.c_[x_data_p.ravel(), y_data_p.ravel()], z_data_p.ravel()]
left = np.dot(np.dot(a.T, c), a)
right = np.dot(np.dot(a.T, c), y)
un_p = inv(left)
parameters_p = np.dot(inv(left), right)
opt_flux = np.vstack(
(opt_flux, parameters_p[0] * x_data_p + parameters_p[1] * y_data_p + parameters_p[2] * z_data_p))
parameters = np.vstack((parameters, np.dot(inv(left), right)))
un = np.dstack((un, un_p))
print("finish fitting")
# reshape
parameters = parameters[1:(n_star + 1), :]
opt_flux = opt_flux[1:(n_star + 1), :]
un = un[:, :, 1:(n_star + 1)]
self.uncertainty = un
self.opt_flux = opt_flux
# the shape of the uncertainty is 3*3*N
print(parameters.shape, n_star, opt_flux.shape, un.shape)
return opt_flux, parameters
# Return delta_chi_squared, which should be bigger than 0
def delta_chi_squared(self,normalzied_flux,normalized_ivar,inf_flux):
opt_flux = self.opt_flux
N_star = len(inf_flux[:,0])
delta_chi = []
chi = []
for p in range(0, N_star):
ivar_r = normalized_ivar[p, :]
ni = len(ivar_r)
c = np.zeros((ni, ni))
print("Calculating delta-chi-squared",p,"{:.2f}%".format(p/N_star*100))
for i in range(0, ni):
c[i, i] = ivar_r[i]
# correct chi-squared
a_old = np.dot(np.dot(normalzied_flux[p, :] - inf_flux[p, :], c), (normalzied_flux[p, :] - inf_flux[p, :]).T)
a_opt = np.dot(np.dot(normalzied_flux[p, :] - opt_flux[p, :], c), (normalzied_flux[p, :] - opt_flux[p, :]).T)
delta_p = a_old-a_opt
chi.append(a_old)
delta_chi.append(delta_p)
delta_chi = np.array(delta_chi)
chi = np.array(chi)
self.chi_squared = chi
return delta_chi
def chi_squared(self,normalized_flux,normalized_ivar,inf_flux):
N_star = len(inf_flux[:,0])
chi = []
for p in range(0, N_star):
ivar_r = normalized_ivar[p, :]
ni = len(ivar_r)
c = np.zeros((ni, ni))
print("Calculating delta-chi-squared",p,"{:.2f}%".format(p/N_star*100))
for i in range(0, ni):
c[i, i] = ivar_r[i]
# correct chi-squared
a_old = np.dot(np.dot(normalized_flux[p, :] - inf_flux[p, :], c), (normalized_flux[p, :] - inf_flux[p, :]).T)
chi.append(a_old)
chi = np.array(chi)
return chi
def fit(self, normalized_flux, normalized_ivar, initial_labels=None,
model_lsf=False, model_redshift=False, full_output=False, **kwargs):
"""
Solve the labels for the given normalized fluxes and inverse variances.
:param normalized_flux:
A `(N_star, N_pixels)` shape of normalized fluxes that are on the
same dispersion scale as the trained data.
:param normalized_ivar:
The inverse variances of the normalized flux values. This should
have the same shape as `normalized_flux`.
:param initial_labels: [optional]
The initial points to optimize from. If not given, only one
initialization will be made from the fiducial label point.
:param model_lsf: [optional]
Optionally convolve the spectral model with a Gaussian broadening
kernel of unknown width when fitting the data.
:param model_redshift: [optional]
Optionally redshift the spectral model when fitting the data.
:returns:
The labels. If `full_output` is set to True, then a three-length
tuple of `(labels, covariance_matrix, metadata)` will be returned.
"""
normalized_flux = np.atleast_2d(normalized_flux)
normalized_ivar = np.atleast_2d(normalized_ivar)
N_spectra = normalized_flux.shape[0]
if initial_labels is None:
initial_labels = self.vectorizer.fiducials
initial_labels = np.atleast_2d(initial_labels)
# Prepare the wrapper function and data.
message = None if not kwargs.pop("progressbar", True) \
else "Fitting {0} spectra".format(N_spectra)
f = utils.wrapper(_fit_spectrum,
(self.dispersion, initial_labels, self.vectorizer, self.theta,
self.s2, model_lsf, model_redshift),
kwargs, N_spectra, message=message)
args = (normalized_flux, normalized_ivar)
mapper = map if self.pool is None else self.pool.map
labels, cov, metadata = zip(*mapper(f, zip(*args)))
labels, cov = (np.array(labels), np.array(cov))
return (labels, cov, metadata) if full_output else labels
def fit_opt(self, normalized_flux, normalized_ivar, initial_labels=None,
model_lsf=False, model_redshift=False, full_output=False, **kwargs):
"""
Solve the labels for the given normalized fluxes and inverse variances.
:param normalized_flux:
A `(N_star, N_pixels)` shape of normalized fluxes that are on the
same dispersion scale as the trained data.
:param normalized_ivar:
The inverse variances of the normalized flux values. This should
have the same shape as `normalized_flux`.
:param initial_labels: [optional]
The initial points to optimize from. If not given, only one
initialization will be made from the fiducial label point.
:param model_lsf: [optional]
Optionally convolve the spectral model with a Gaussian broadening
kernel of unknown width when fitting the data.
:param model_redshift: [optional]
Optionally redshift the spectral model when fitting the data.
:returns:
The labels. If `full_output` is set to True, then a three-length
tuple of `(labels, covariance_matrix, metadata)` will be returned.
"""
normalized_flux = np.atleast_2d(normalized_flux)
normalized_ivar = np.atleast_2d(normalized_ivar)
N_spectra = normalized_flux.shape[0]
if initial_labels is None:
initial_labels = self.vectorizer.fiducials
initial_labels = np.atleast_2d(initial_labels)
# Prepare the wrapper function and data.
message = None if not kwargs.pop("progressbar", True) \
else "Fitting {0} spectra".format(N_spectra)
# add something
inferred_labels = self.fit_labelled_set()
inf = np.dot(self.theta, self.vectorizer(inferred_labels).T).T
opt_flux,theta_opt,parameters = self.fitting_spectrum_parameters(normalized_flux,normalized_ivar,inf)
f = utils.wrapper(_fit_spectrum,
(self.dispersion, initial_labels, self.vectorizer, theta_opt,
self.s2, model_lsf, model_redshift),
kwargs, N_spectra, message=message)
args = (normalized_flux, normalized_ivar)
mapper = map if self.pool is None else self.pool.map
print("OPT")
labels, cov, metadata = zip(*mapper(f, zip(*args)))
labels, cov = (np.array(labels), np.array(cov))
return (labels, cov, metadata) if full_output else labels
@model.requires_training_wheels
def _set_s2_by_hogg_heuristic(self):
"""
Set the pixel scatter by Hogg's heuristic.
See https://github.com/andycasey/AnniesLasso_2/issues/31 for more details.
"""
model_flux = self.predict(self.labels_array)
residuals_squared = (model_flux - self.normalized_flux)**2
def objective_function(s, residuals_squared, ivar):
adjusted_ivar = ivar/(1. + ivar * s**2)
chi_sq = residuals_squared * adjusted_ivar
return (np.mean(chi_sq) - 1.0)**2
s = []
for j in range(self.dispersion.size):
s.append(op.fmin(objective_function, 0,
args=(residuals_squared[:, j], self.normalized_ivar[:, j]),
disp=False))
self.s2 = np.array(s)**2
return True
def _estimate_label_vector(theta, s2, normalized_flux, normalized_ivar,
**kwargs):
"""
Perform a matrix inversion to estimate the values of the label vector given
some normalized fluxes and associated inverse variances.
:param theta:
The theta coefficients obtained from the training phase.
:param s2:
The intrinsic pixel variance.
:param normalized_flux:
The normalized flux values. These should be on the same dispersion scale
as the labelled data set.
:param normalized_ivar:
The inverse variance of the normalized flux values. This should have the
same shape as `normalized_flux`.
"""
inv_var = normalized_ivar/(1. + normalized_ivar * s2)
A = np.dot(theta.T, inv_var[:, None] * theta)
B = np.dot(theta.T, inv_var * normalized_flux)
return np.linalg.solve(A, B)
def _fit_spectrum(normalized_flux, normalized_ivar, dispersion, initial_labels,
vectorizer, theta, s2, model_lsf=False, model_redshift=False, **kwargs):
"""
Fit a single spectrum by least-squared fitting.
:param normalized_flux:
The normalized flux values.
:param normalized_ivar:
The inverse variance array for the normalized fluxes.
:param dispersion:
The dispersion (e.g., wavelength) points for the normalized fluxes.
:param initial_labels:
The point(s) to initialize optimization from.
:param vectorizer:
The vectorizer to use when fitting the data.
:param theta:
The theta coefficients (spectral derivatives) of the trained model.
:param s2:
The pixel scatter (s^2) array for each pixel.
:param model_lsf: [optional]
Convolve the spectral model with a Gaussian kernel at fitting time.
:param model_redshift: [optional]
Allow for a residual redshift in the spectral model at fitting time.
"""
adjusted_ivar = normalized_ivar/(1. + normalized_ivar * s2)
adjusted_sigma = np.sqrt(1.0/adjusted_ivar)
# Exclude non-finite points (e.g., points with zero inverse variance
# or non-finite flux values, but the latter shouldn't exist anyway).
use = np.isfinite(adjusted_sigma * normalized_flux)
N_labels = vectorizer.scales.size
if not np.any(use):
logger.warn("No information in spectrum!")
return (np.nan * np.ones(N_labels), None, {
"fail_message": "Pixels contained no information"})
normalized_flux = normalized_flux[use]
adjusted_sigma = adjusted_sigma[use]
max_abs_velocity = abs(kwargs.get("max_abs_velocity", 10))
# Check the vectorizer whether it has a derivative built in.
if kwargs.get("Dfun", False):
try:
vectorizer.get_label_vector_derivative(vectorizer.fiducials)
except NotImplementedError:
Dfun = None
logger.debug("No label vector derivative available!")
except:
logger.exception("Exception raised when trying to calculate the "
"label vector derivative at the fiducial values:")
raise
else:
# Use the label vector derivative.
"""
# Presumably because of the way leastsq works, the adjusted_inv_sigma
# does not enter here, otherwise we get incorrect results.
Dfun = lambda xdata, l: \
np.dot(theta, vectorizer.get_label_vector_derivative(*l)).T[use]
"""
raise NotImplementedError("requires a thinko")
def Dfun(labels, xdata, ydata, f, adjusted_inv_sigma):
return np.dot(theta,
vectorizer.get_label_vector_derivative(labels)).T[:, use]
else:
Dfun = None
mean_pixel_scale = 1.0/np.diff(dispersion).mean() # px/Angstrom
def f(xdata, *parameters):
y = np.dot(theta, vectorizer(parameters[:N_labels]).T)[:, 0]
# Convolve?
if model_lsf:
# This will always be the last parameter.
y = gaussian_filter(y, abs(parameters[-1]) * mean_pixel_scale)
# Redshift?
if model_redshift:
index = -2 if model_lsf else -1
v = parameters[index]
if np.abs(v) >= max_abs_velocity:
logger.debug("Returning NaNs because outside of max velocity")
return np.nan * np.ones(sum(use))
y = np.interp(dispersion,
dispersion * (1 + v/299792.458), y,
left=np.nan, right=np.nan)
return y[use]
kwds = {
"f": f,
"xdata": None,
"ydata": normalized_flux,
"sigma": adjusted_sigma,
"absolute_sigma": True,
# These get passed through to leastsq:
"Dfun": Dfun,
"col_deriv": True,
"ftol": 7./3 - 4./3 - 1, # Machine precision.
"xtol": 7./3 - 4./3 - 1, # Machine precision.
"gtol": 0.0,
"maxfev": 100000, # MAGIC
"epsfcn": None,
"factor": 0.1, # Smallest step size available for gradient approximation
"diag": 1.0/vectorizer.scales
}
# Only update the keywords with things that op.curve_fit/op.leastsq expects.
for key in set(kwargs).intersection(kwds):
if key == "Dfun": continue
kwds[key] = kwargs[key]
results = []
for p0 in np.atleast_2d(initial_labels):
kwds["p0"] = list(p0)
if model_redshift:
kwds["p0"] += [0]
if model_lsf:
kwds["p0"] += [5] # MAGIC
try:
op_labels, cov = op.curve_fit(**kwds)
except RuntimeError:
logger.exception("Exception in fitting from {}".format(p0))
continue
fvec = f(None, *op_labels)
meta = {
"p0": kwds["p0"],
"fvec": fvec,
"chi_sq": np.sum((fvec - normalized_flux)**2 / adjusted_sigma**2),
}
results.append((op_labels, cov, meta))
if len(results) == 0:
logger.warn("No results found!")
return (np.nan * np.ones(N_labels), None, {"fail_message": "No results found"})
best_result_index = np.nanargmin([m["chi_sq"] for (o, c, m) in results])
op_labels, cov, meta = results[best_result_index]
if np.allclose(op_labels, meta["p0"]):
logger.warn("Discarding optimized result because it is the same as the "
"initial value!")
# We are in dire straits. We should not trust the result.
op_labels *= np.nan
if not np.any(np.isfinite(cov)):
logger.warn("Non-finite covariance matrix returned!")
# Defaults for LSF/redshift parameters
meta.update(kernel=0, redshift=0)
for key, effect in zip(("kernel", "redshift"), (model_lsf, model_redshift)):
if effect:
meta[key] = op_labels[-1]
op_labels = op_labels[:-1]
# Save additional information.
meta.update({
"kernel": abs(meta["kernel"]),
"label_names": vectorizer.label_names,
"best_result_index": best_result_index,
"method": "curve_fit",
"derivatives_used": Dfun is not None,
"snr": np.nanmedian(normalized_flux * np.sqrt(normalized_ivar[use])),
"r_chi_sq": meta["chi_sq"]/(use.sum() - len(vectorizer.fiducials) - 1),
"model_flux": np.dot(theta, vectorizer(op_labels).T).flatten(),
})
for key in ("ftol", "xtol", "gtol", "maxfev", "factor", "epsfcn"):
meta[key] = kwds[key]
return (op_labels, cov, meta)
def _fit_pixel(initial_theta, initial_s2, normalized_flux, normalized_ivar,
design_matrix, fixed_scatter, **kwargs):
"""
Return the optimal model coefficients and pixel scatter given the normalized
flux, the normalized inverse variance, and the design matrix.
:param initial_theta:
The initial model coefficients to optimize from.
:param initial_s2:
The initial pixel scatter (s^2) terms to optimize from (if fixed_scatter
is False).
:param normalized_flux:
The normalized flux values for a given pixel, from all stars.
:param normalized_ivar:
The inverse variance of the normalized flux values for a given pixel,
from all stars.
:param design_matrix:
The design matrix for the spectral model.
:param fixed_scatter:
Keep the pixel scatter term fixed.
:returns:
The optimised label vector coefficients and scatter for this pixel, even
if it was supplied by the user.
"""
design_matrix = utils._unpack_value(design_matrix)
raise a
# This initial theta will also be returned if we have no valid fluxes.
initial_theta = np.hstack([1, np.zeros(design_matrix.shape[1] - 1)])
if np.all(normalized_ivar == 0):
return np.hstack([initial_theta, scatter if fixed_scatter else 0])
# Optimize the parameters.
kwds = {
"maxiter": np.inf,
"maxfun": np.inf,
"disp": False,
"full_output": True
}
kwds.update(kwargs.get("op_kwargs", {}))
args = (normalized_flux, normalized_ivar, design_matrix)
logger.debug("Optimizer kwds: {}".format(kwds))
if fixed_scatter:
p0 = initial_theta
func = _model_pixel_fixed_scatter
args = tuple([scatter] + list(args))
else:
p0 = np.hstack([initial_theta, p0_scatter])
func = _model_pixel
op_params, fopt, direc, n_iter, n_funcs, warnflag = op.fmin_powell(
func, p0, args=args, **kwds)
if warnflag > 0:
logger.warning("Warning: {}".format([
"Maximum number of function evaluations made during optimisation.",
"Maximum number of iterations made during optimisation."
][warnflag - 1]))
return np.hstack([op_params, scatter]) if fixed_scatter else op_params
def _model_pixel(theta, scatter, normalized_flux, normalized_ivar,
design_matrix, **kwargs):
inv_var = normalized_ivar/(1. + normalized_ivar * scatter**2)
return model._chi_sq(theta, design_matrix, normalized_flux, inv_var)
def _model_pixel_fixed_scatter(parameters, normalized_flux, normalized_ivar,
design_matrix, **kwargs):
theta, scatter = parameters[:-1], parameters[-1]
return _model_pixel(
theta, scatter, normalized_flux, normalized_ivar, design_matrix)
def _fit_pixel_with_fixed_scatter(scatter, normalized_flux, normalized_ivar,
design_matrix, **kwargs):
"""
Fit the normalized flux for a single pixel (across many stars) given some
pixel variance term, and return the best-fit theta coefficients.
:param scatter:
The additional scatter to adopt in the pixel.
:param normalized_flux:
The normalized flux values for a single pixel across many stars.
:param normalized_ivar:
The inverse variance of the normalized flux values for a single pixel
across many stars.
:param design_matrix:
The design matrix for the model.
"""
theta, ATCiAinv, inv_var = _fit_theta(normalized_flux, normalized_ivar,
scatter**2, design_matrix)
return_theta = kwargs.get("__return_theta", False)
if ATCiAinv is None:
return 0.0 if not return_theta else (0.0, theta)
# We take inv_var back from _fit_theta because it is the same quantity we
# need to calculate, and it saves us one operation.
Q = model._chi_sq(theta, design_matrix, normalized_flux, inv_var)
return (Q, theta) if return_theta else Q
def _fit_theta(normalized_flux, normalized_ivar, s2, design_matrix):
"""
Fit theta coefficients to a set of normalized fluxes for a single pixel.
:param normalized_flux:
The normalized fluxes for a single pixel (across many stars).
:param normalized_ivar:
The inverse variance of the normalized flux values for a single pixel
across many stars.
:param scatter:
The additional scatter to adopt in the pixel.
:param design_matrix:
The model design matrix.
:returns:
The label vector coefficients for the pixel, the inverse variance matrix
and the total inverse variance.
"""
ivar = normalized_ivar/(1. + normalized_ivar * s2)
CiA = design_matrix * np.tile(ivar, (design_matrix.shape[1], 1)).T
try:
ATCiAinv = np.linalg.inv(np.dot(design_matrix.T, CiA))
except np.linalg.linalg.LinAlgError:
#if logger.getEffectiveLevel() == logging.DEBUG: raise
return (np.hstack([1, [0] * (design_matrix.shape[1] - 1)]), None, ivar)
ATY = np.dot(design_matrix.T, normalized_flux * ivar)
theta = np.dot(ATCiAinv, ATY)
return (theta, ATCiAinv, ivar)
| mit |
Allow2CEO/browser-ios | brave/node_modules/ad-block/node_modules/hashset-cpp/scripts/lib/config.py | 9 | 1627 | #!/usr/bin/env python
import errno
import os
import platform
import sys
BASE_URL = os.getenv('LIBCHROMIUMCONTENT_MIRROR') or \
'https://s3.amazonaws.com/github-janky-artifacts/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = 'cfbe8ec7e14af4cabd1474386f54e197db1f7ac1'
PLATFORM = {
'cygwin': 'win32',
'darwin': 'darwin',
'linux2': 'linux',
'win32': 'win32',
}[sys.platform]
verbose_mode = False
def get_platform_key():
if os.environ.has_key('MAS_BUILD'):
return 'mas'
else:
return PLATFORM
def get_target_arch():
try:
target_arch_path = os.path.join(__file__, '..', '..', '..', 'vendor',
'brightray', 'vendor', 'download',
'libchromiumcontent', '.target_arch')
with open(os.path.normpath(target_arch_path)) as f:
return f.read().strip()
except IOError as e:
if e.errno != errno.ENOENT:
raise
if PLATFORM == 'win32':
return 'ia32'
else:
return 'x64'
def get_chromedriver_version():
return 'v2.15'
def s3_config():
config = (os.environ.get('ATOM_SHELL_S3_BUCKET', ''),
os.environ.get('ATOM_SHELL_S3_ACCESS_KEY', ''),
os.environ.get('ATOM_SHELL_S3_SECRET_KEY', ''))
message = ('Error: Please set the $ATOM_SHELL_S3_BUCKET, '
'$ATOM_SHELL_S3_ACCESS_KEY, and '
'$ATOM_SHELL_S3_SECRET_KEY environment variables')
assert all(len(c) for c in config), message
return config
def enable_verbose_mode():
print 'Running in verbose mode'
global verbose_mode
verbose_mode = True
def is_verbose_mode():
return verbose_mode
| mpl-2.0 |
xfournet/intellij-community | python/lib/Lib/site-packages/django/contrib/messages/storage/user_messages.py | 308 | 2303 | """
Storages used to assist in the deprecation of contrib.auth User messages.
"""
from django.contrib.messages import constants
from django.contrib.messages.storage.base import BaseStorage, Message
from django.contrib.auth.models import User
from django.contrib.messages.storage.fallback import FallbackStorage
class UserMessagesStorage(BaseStorage):
"""
Retrieves messages from the User, using the legacy user.message_set API.
This storage is "read-only" insofar as it can only retrieve and delete
messages, not store them.
"""
session_key = '_messages'
def _get_messages_queryset(self):
"""
Returns the QuerySet containing all user messages (or ``None`` if
request.user is not a contrib.auth User).
"""
user = getattr(self.request, 'user', None)
if isinstance(user, User):
return user._message_set.all()
def add(self, *args, **kwargs):
raise NotImplementedError('This message storage is read-only.')
def _get(self, *args, **kwargs):
"""
Retrieves a list of messages assigned to the User. This backend never
stores anything, so all_retrieved is assumed to be False.
"""
queryset = self._get_messages_queryset()
if queryset is None:
# This is a read-only and optional storage, so to ensure other
# storages will also be read if used with FallbackStorage an empty
# list is returned rather than None.
return [], False
messages = []
for user_message in queryset:
messages.append(Message(constants.INFO, user_message.message))
return messages, False
def _store(self, messages, *args, **kwargs):
"""
Removes any messages assigned to the User and returns the list of
messages (since no messages are stored in this read-only storage).
"""
queryset = self._get_messages_queryset()
if queryset is not None:
queryset.delete()
return messages
class LegacyFallbackStorage(FallbackStorage):
"""
Works like ``FallbackStorage`` but also handles retrieving (and clearing)
contrib.auth User messages.
"""
storage_classes = (UserMessagesStorage,) + FallbackStorage.storage_classes
| apache-2.0 |
DavidNorman/tensorflow | tensorflow/python/data/experimental/kernel_tests/serialization/rebatch_dataset_serialization_test.py | 9 | 1819 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the _RebatchDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import distribute
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.platform import test
class RebatchDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase,
parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testCore(self):
def build_dataset(num_elements, batch_size):
return distribute._RebatchDataset(
dataset_ops.Dataset.range(num_elements).batch(
4 * batch_size, drop_remainder=True),
num_replicas=4)
self.run_core_tests(lambda: build_dataset(200, 10), 20)
if __name__ == "__main__":
test.main()
| apache-2.0 |
destinmoulton/squabble | pythonclient/venv/lib/python2.7/site-packages/Crypto/Hash/MD2.py | 124 | 2734 | # -*- coding: utf-8 -*-
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""MD2 cryptographic hash algorithm.
MD2 is specified in RFC1319_ and it produces the 128 bit digest of a message.
>>> from Crypto.Hash import MD2
>>>
>>> h = MD2.new()
>>> h.update(b'Hello')
>>> print h.hexdigest()
MD2 stand for Message Digest version 2, and it was invented by Rivest in 1989.
This algorithm is both slow and insecure. Do not use it for new designs.
.. _RFC1319: http://tools.ietf.org/html/rfc1319
"""
_revision__ = "$Id$"
__all__ = ['new', 'digest_size', 'MD2Hash' ]
from Crypto.Util.py3compat import *
from Crypto.Hash.hashalgo import HashAlgo
import Crypto.Hash._MD2 as _MD2
hashFactory = _MD2
class MD2Hash(HashAlgo):
"""Class that implements an MD2 hash
:undocumented: block_size
"""
#: ASN.1 Object identifier (OID)::
#:
#: id-md2 OBJECT IDENTIFIER ::= {
#: iso(1) member-body(2) us(840) rsadsi(113549)
#: digestAlgorithm(2) 2
#: }
#:
#: This value uniquely identifies the MD2 algorithm.
oid = b('\x06\x08\x2a\x86\x48\x86\xf7\x0d\x02\x02')
digest_size = 16
block_size = 16
def __init__(self, data=None):
HashAlgo.__init__(self, hashFactory, data)
def new(self, data=None):
return MD2Hash(data)
def new(data=None):
"""Return a fresh instance of the hash object.
:Parameters:
data : byte string
The very first chunk of the message to hash.
It is equivalent to an early call to `MD2Hash.update()`.
Optional.
:Return: An `MD2Hash` object
"""
return MD2Hash().new(data)
#: The size of the resulting hash in bytes.
digest_size = MD2Hash.digest_size
#: The internal block size of the hash algorithm in bytes.
block_size = MD2Hash.block_size
| mit |
MITRECND/multiscanner | docs/conf.py | 2 | 5216 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# MultiScanner documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 22 13:35:06 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'MultiScanner'
copyright = '2017, MITRE'
author = 'MITRE'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
# html_sidebars = {
# '**': [
# 'relations.html', # needs 'show_related': True theme option to display
# 'searchbox.html',
# ]
# }
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'MultiScannerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'MultiScanner.tex', 'MultiScanner Documentation',
'MITRE', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'multiscanner', 'MultiScanner Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'MultiScanner', 'MultiScanner Documentation',
author, 'MultiScanner', 'One line description of project.',
'Miscellaneous'),
]
def setup(app):
app.add_stylesheet('theme_overrides.css')
| mpl-2.0 |
hsfzxjy/wisecitymbc | site_packages/rest_framework/tests/test_urlpatterns.py | 2 | 2972 | from __future__ import unicode_literals
from collections import namedtuple
from django.core import urlresolvers
from django.test import TestCase
from rest_framework.test import APIRequestFactory
from rest_framework.compat import patterns, url, include
from rest_framework.urlpatterns import format_suffix_patterns
# A container class for test paths for the test case
URLTestPath = namedtuple('URLTestPath', ['path', 'args', 'kwargs'])
def dummy_view(request, *args, **kwargs):
pass
class FormatSuffixTests(TestCase):
"""
Tests `format_suffix_patterns` against different URLPatterns to ensure the URLs still resolve properly, including any captured parameters.
"""
def _resolve_urlpatterns(self, urlpatterns, test_paths):
factory = APIRequestFactory()
try:
urlpatterns = format_suffix_patterns(urlpatterns)
except Exception:
self.fail("Failed to apply `format_suffix_patterns` on the supplied urlpatterns")
resolver = urlresolvers.RegexURLResolver(r'^/', urlpatterns)
for test_path in test_paths:
request = factory.get(test_path.path)
try:
callback, callback_args, callback_kwargs = resolver.resolve(request.path_info)
except Exception:
self.fail("Failed to resolve URL: %s" % request.path_info)
self.assertEqual(callback_args, test_path.args)
self.assertEqual(callback_kwargs, test_path.kwargs)
def test_format_suffix(self):
urlpatterns = patterns(
'',
url(r'^test$', dummy_view),
)
test_paths = [
URLTestPath('/test', (), {}),
URLTestPath('/test.api', (), {'format': 'api'}),
URLTestPath('/test.asdf', (), {'format': 'asdf'}),
]
self._resolve_urlpatterns(urlpatterns, test_paths)
def test_default_args(self):
urlpatterns = patterns(
'',
url(r'^test$', dummy_view, {'foo': 'bar'}),
)
test_paths = [
URLTestPath('/test', (), {'foo': 'bar', }),
URLTestPath('/test.api', (), {'foo': 'bar', 'format': 'api'}),
URLTestPath('/test.asdf', (), {'foo': 'bar', 'format': 'asdf'}),
]
self._resolve_urlpatterns(urlpatterns, test_paths)
def test_included_urls(self):
nested_patterns = patterns(
'',
url(r'^path$', dummy_view)
)
urlpatterns = patterns(
'',
url(r'^test/', include(nested_patterns), {'foo': 'bar'}),
)
test_paths = [
URLTestPath('/test/path', (), {'foo': 'bar', }),
URLTestPath('/test/path.api', (), {'foo': 'bar', 'format': 'api'}),
URLTestPath('/test/path.asdf', (), {'foo': 'bar', 'format': 'asdf'}),
]
self._resolve_urlpatterns(urlpatterns, test_paths)
| gpl-2.0 |
HankCHTsai/eye4cash | model/slim/datasets/download_and_convert_mnist.py | 34 | 7389 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Downloads and converts MNIST data to TFRecords of TF-Example protos.
This module downloads the MNIST data, uncompresses it, reads the files
that make up the MNIST data and creates two TFRecord datasets: one for train
and one for test. Each TFRecord dataset is comprised of a set of TF-Example
protocol buffers, each of which contain a single image and label.
The script should take about a minute to run.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import sys
import numpy as np
from six.moves import urllib
import tensorflow as tf
from datasets import dataset_utils
# The URLs where the MNIST data can be downloaded.
_DATA_URL = 'http://yann.lecun.com/exdb/mnist/'
_TRAIN_DATA_FILENAME = 'train-images-idx3-ubyte.gz'
_TRAIN_LABELS_FILENAME = 'train-labels-idx1-ubyte.gz'
_TEST_DATA_FILENAME = 't10k-images-idx3-ubyte.gz'
_TEST_LABELS_FILENAME = 't10k-labels-idx1-ubyte.gz'
_IMAGE_SIZE = 28
_NUM_CHANNELS = 1
# The names of the classes.
_CLASS_NAMES = [
'zero',
'one',
'two',
'three',
'four',
'five',
'size',
'seven',
'eight',
'nine',
]
def _extract_images(filename, num_images):
"""Extract the images into a numpy array.
Args:
filename: The path to an MNIST images file.
num_images: The number of images in the file.
Returns:
A numpy array of shape [number_of_images, height, width, channels].
"""
print('Extracting images from: ', filename)
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(
_IMAGE_SIZE * _IMAGE_SIZE * num_images * _NUM_CHANNELS)
data = np.frombuffer(buf, dtype=np.uint8)
data = data.reshape(num_images, _IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS)
return data
def _extract_labels(filename, num_labels):
"""Extract the labels into a vector of int64 label IDs.
Args:
filename: The path to an MNIST labels file.
num_labels: The number of labels in the file.
Returns:
A numpy array of shape [number_of_labels]
"""
print('Extracting labels from: ', filename)
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_labels)
labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)
return labels
def _add_to_tfrecord(data_filename, labels_filename, num_images,
tfrecord_writer):
"""Loads data from the binary MNIST files and writes files to a TFRecord.
Args:
data_filename: The filename of the MNIST images.
labels_filename: The filename of the MNIST labels.
num_images: The number of images in the dataset.
tfrecord_writer: The TFRecord writer to use for writing.
"""
images = _extract_images(data_filename, num_images)
labels = _extract_labels(labels_filename, num_images)
shape = (_IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS)
with tf.Graph().as_default():
image = tf.placeholder(dtype=tf.uint8, shape=shape)
encoded_png = tf.image.encode_png(image)
with tf.Session('') as sess:
for j in range(num_images):
sys.stdout.write('\r>> Converting image %d/%d' % (j + 1, num_images))
sys.stdout.flush()
png_string = sess.run(encoded_png, feed_dict={image: images[j]})
example = dataset_utils.image_to_tfexample(
png_string, 'png'.encode(), _IMAGE_SIZE, _IMAGE_SIZE, labels[j])
tfrecord_writer.write(example.SerializeToString())
def _get_output_filename(dataset_dir, split_name):
"""Creates the output filename.
Args:
dataset_dir: The directory where the temporary files are stored.
split_name: The name of the train/test split.
Returns:
An absolute file path.
"""
return '%s/mnist_%s.tfrecord' % (dataset_dir, split_name)
def _download_dataset(dataset_dir):
"""Downloads MNIST locally.
Args:
dataset_dir: The directory where the temporary files are stored.
"""
for filename in [_TRAIN_DATA_FILENAME,
_TRAIN_LABELS_FILENAME,
_TEST_DATA_FILENAME,
_TEST_LABELS_FILENAME]:
filepath = os.path.join(dataset_dir, filename)
if not os.path.exists(filepath):
print('Downloading file %s...' % filename)
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %.1f%%' % (
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(_DATA_URL + filename,
filepath,
_progress)
print()
with tf.gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded', filename, size, 'bytes.')
def _clean_up_temporary_files(dataset_dir):
"""Removes temporary files used to create the dataset.
Args:
dataset_dir: The directory where the temporary files are stored.
"""
for filename in [_TRAIN_DATA_FILENAME,
_TRAIN_LABELS_FILENAME,
_TEST_DATA_FILENAME,
_TEST_LABELS_FILENAME]:
filepath = os.path.join(dataset_dir, filename)
tf.gfile.Remove(filepath)
def run(dataset_dir):
"""Runs the download and conversion operation.
Args:
dataset_dir: The dataset directory where the dataset is stored.
"""
if not tf.gfile.Exists(dataset_dir):
tf.gfile.MakeDirs(dataset_dir)
training_filename = _get_output_filename(dataset_dir, 'train')
testing_filename = _get_output_filename(dataset_dir, 'test')
if tf.gfile.Exists(training_filename) and tf.gfile.Exists(testing_filename):
print('Dataset files already exist. Exiting without re-creating them.')
return
_download_dataset(dataset_dir)
# First, process the training data:
with tf.python_io.TFRecordWriter(training_filename) as tfrecord_writer:
data_filename = os.path.join(dataset_dir, _TRAIN_DATA_FILENAME)
labels_filename = os.path.join(dataset_dir, _TRAIN_LABELS_FILENAME)
_add_to_tfrecord(data_filename, labels_filename, 60000, tfrecord_writer)
# Next, process the testing data:
with tf.python_io.TFRecordWriter(testing_filename) as tfrecord_writer:
data_filename = os.path.join(dataset_dir, _TEST_DATA_FILENAME)
labels_filename = os.path.join(dataset_dir, _TEST_LABELS_FILENAME)
_add_to_tfrecord(data_filename, labels_filename, 10000, tfrecord_writer)
# Finally, write the labels file:
labels_to_class_names = dict(zip(range(len(_CLASS_NAMES)), _CLASS_NAMES))
dataset_utils.write_label_file(labels_to_class_names, dataset_dir)
_clean_up_temporary_files(dataset_dir)
print('\nFinished converting the MNIST dataset!')
| apache-2.0 |
zwb800/home-assistant | tests/components/automation/test_event.py | 16 | 4001 | """
tests.components.automation.test_event
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests event automation.
"""
import unittest
import homeassistant.core as ha
import homeassistant.components.automation as automation
class TestAutomationEvent(unittest.TestCase):
""" Test the event automation. """
def setUp(self): # pylint: disable=invalid-name
self.hass = ha.HomeAssistant()
self.calls = []
def record_call(service):
self.calls.append(service)
self.hass.services.register('test', 'automation', record_call)
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
def test_old_config_if_fires_on_event(self):
self.assertTrue(automation.setup(self.hass, {
automation.DOMAIN: {
'platform': 'event',
'event_type': 'test_event',
'execute_service': 'test.automation'
}
}))
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_old_config_if_fires_on_event_with_data(self):
self.assertTrue(automation.setup(self.hass, {
automation.DOMAIN: {
'platform': 'event',
'event_type': 'test_event',
'event_data': {'some_attr': 'some_value'},
'execute_service': 'test.automation'
}
}))
self.hass.bus.fire('test_event', {'some_attr': 'some_value'})
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_old_config_if_not_fires_if_event_data_not_matches(self):
self.assertTrue(automation.setup(self.hass, {
automation.DOMAIN: {
'platform': 'event',
'event_type': 'test_event',
'event_data': {'some_attr': 'some_value'},
'execute_service': 'test.automation'
}
}))
self.hass.bus.fire('test_event', {'some_attr': 'some_other_value'})
self.hass.pool.block_till_done()
self.assertEqual(0, len(self.calls))
def test_if_fires_on_event(self):
self.assertTrue(automation.setup(self.hass, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'action': {
'service': 'test.automation',
}
}
}))
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_on_event_with_data(self):
self.assertTrue(automation.setup(self.hass, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
'event_data': {'some_attr': 'some_value'}
},
'action': {
'service': 'test.automation',
}
}
}))
self.hass.bus.fire('test_event', {'some_attr': 'some_value',
'another': 'value'})
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_not_fires_if_event_data_not_matches(self):
self.assertTrue(automation.setup(self.hass, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
'event_data': {'some_attr': 'some_value'}
},
'action': {
'service': 'test.automation',
}
}
}))
self.hass.bus.fire('test_event', {'some_attr': 'some_other_value'})
self.hass.pool.block_till_done()
self.assertEqual(0, len(self.calls))
| mit |
pdellaert/ansible | lib/ansible/modules/net_tools/omapi_host.py | 40 | 11682 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# copyright: (c) 2016, Loic Blot <loic.blot@unix-experience.fr>
# Sponsored by Infopro Digital. http://www.infopro-digital.com/
# Sponsored by E.T.A.I. http://www.etai.fr/
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: omapi_host
short_description: Setup OMAPI hosts.
description: Manage OMAPI hosts into compatible DHCPd servers
version_added: "2.3"
requirements:
- pypureomapi
author:
- Loic Blot (@nerzhul)
options:
state:
description:
- Create or remove OMAPI host.
type: str
required: true
choices: [ absent, present ]
hostname:
description:
- Sets the host lease hostname (mandatory if state=present).
type: str
aliases: [ name ]
host:
description:
- Sets OMAPI server host to interact with.
type: str
default: localhost
port:
description:
- Sets the OMAPI server port to interact with.
type: int
default: 7911
key_name:
description:
- Sets the TSIG key name for authenticating against OMAPI server.
type: str
required: true
key:
description:
- Sets the TSIG key content for authenticating against OMAPI server.
type: str
required: true
macaddr:
description:
- Sets the lease host MAC address.
type: str
required: true
ip:
description:
- Sets the lease host IP address.
type: str
statements:
description:
- Attach a list of OMAPI DHCP statements with host lease (without ending semicolon).
type: list
default: []
ddns:
description:
- Enable dynamic DNS updates for this host.
type: bool
default: no
'''
EXAMPLES = r'''
- name: Add a host using OMAPI
omapi_host:
key_name: defomapi
key: +bFQtBCta6j2vWkjPkNFtgA==
host: 10.98.4.55
macaddr: 44:dd:ab:dd:11:44
name: server01
ip: 192.168.88.99
ddns: yes
statements:
- filename "pxelinux.0"
- next-server 1.1.1.1
state: present
- name: Remove a host using OMAPI
omapi_host:
key_name: defomapi
key: +bFQtBCta6j2vWkjPkNFtgA==
host: 10.1.1.1
macaddr: 00:66:ab:dd:11:44
state: absent
'''
RETURN = r'''
lease:
description: dictionary containing host information
returned: success
type: complex
contains:
ip-address:
description: IP address, if there is.
returned: success
type: str
sample: '192.168.1.5'
hardware-address:
description: MAC address
returned: success
type: str
sample: '00:11:22:33:44:55'
hardware-type:
description: hardware type, generally '1'
returned: success
type: int
sample: 1
name:
description: hostname
returned: success
type: str
sample: 'mydesktop'
'''
import binascii
import socket
import struct
import traceback
PUREOMAPI_IMP_ERR = None
try:
from pypureomapi import Omapi, OmapiMessage, OmapiError, OmapiErrorNotFound
from pypureomapi import pack_ip, unpack_ip, pack_mac, unpack_mac
from pypureomapi import OMAPI_OP_STATUS, OMAPI_OP_UPDATE
pureomapi_found = True
except ImportError:
PUREOMAPI_IMP_ERR = traceback.format_exc()
pureomapi_found = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_bytes, to_native
class OmapiHostManager:
def __init__(self, module):
self.module = module
self.omapi = None
self.connect()
def connect(self):
try:
self.omapi = Omapi(self.module.params['host'], self.module.params['port'], self.module.params['key_name'],
self.module.params['key'])
except binascii.Error:
self.module.fail_json(msg="Unable to open OMAPI connection. 'key' is not a valid base64 key.")
except OmapiError as e:
self.module.fail_json(msg="Unable to open OMAPI connection. Ensure 'host', 'port', 'key' and 'key_name' "
"are valid. Exception was: %s" % to_native(e))
except socket.error as e:
self.module.fail_json(msg="Unable to connect to OMAPI server: %s" % to_native(e))
def get_host(self, macaddr):
msg = OmapiMessage.open(to_bytes("host", errors='surrogate_or_strict'))
msg.obj.append((to_bytes("hardware-address", errors='surrogate_or_strict'), pack_mac(macaddr)))
msg.obj.append((to_bytes("hardware-type", errors='surrogate_or_strict'), struct.pack("!I", 1)))
response = self.omapi.query_server(msg)
if response.opcode != OMAPI_OP_UPDATE:
return None
return response
@staticmethod
def unpack_facts(obj):
result = dict(obj)
if 'hardware-address' in result:
result['hardware-address'] = unpack_mac(result['hardware-address'])
if 'ip-address' in result:
result['ip-address'] = unpack_ip(result['ip-address'])
if 'hardware-type' in result:
result['hardware-type'] = struct.unpack("!I", result['hardware-type'])
return result
def setup_host(self):
if self.module.params['hostname'] is None or len(self.module.params['hostname']) == 0:
self.module.fail_json(msg="name attribute could not be empty when adding or modifying host.")
msg = None
host_response = self.get_host(self.module.params['macaddr'])
# If host was not found using macaddr, add create message
if host_response is None:
msg = OmapiMessage.open(to_bytes('host', errors='surrogate_or_strict'))
msg.message.append(('create', struct.pack('!I', 1)))
msg.message.append(('exclusive', struct.pack('!I', 1)))
msg.obj.append(('hardware-address', pack_mac(self.module.params['macaddr'])))
msg.obj.append(('hardware-type', struct.pack('!I', 1)))
msg.obj.append(('name', self.module.params['hostname']))
if self.module.params['ip'] is not None:
msg.obj.append((to_bytes("ip-address", errors='surrogate_or_strict'), pack_ip(self.module.params['ip'])))
stmt_join = ""
if self.module.params['ddns']:
stmt_join += 'ddns-hostname "{0}"; '.format(self.module.params['hostname'])
try:
if len(self.module.params['statements']) > 0:
stmt_join += "; ".join(self.module.params['statements'])
stmt_join += "; "
except TypeError as e:
self.module.fail_json(msg="Invalid statements found: %s" % to_native(e))
if len(stmt_join) > 0:
msg.obj.append(('statements', stmt_join))
try:
response = self.omapi.query_server(msg)
if response.opcode != OMAPI_OP_UPDATE:
self.module.fail_json(msg="Failed to add host, ensure authentication and host parameters "
"are valid.")
self.module.exit_json(changed=True, lease=self.unpack_facts(response.obj))
except OmapiError as e:
self.module.fail_json(msg="OMAPI error: %s" % to_native(e))
# Forge update message
else:
response_obj = self.unpack_facts(host_response.obj)
fields_to_update = {}
if to_bytes('ip-address', errors='surrogate_or_strict') not in response_obj or \
unpack_ip(response_obj[to_bytes('ip-address', errors='surrogate_or_strict')]) != self.module.params['ip']:
fields_to_update['ip-address'] = pack_ip(self.module.params['ip'])
# Name cannot be changed
if 'name' not in response_obj or response_obj['name'] != self.module.params['hostname']:
self.module.fail_json(msg="Changing hostname is not supported. Old was %s, new is %s. "
"Please delete host and add new." %
(response_obj['name'], self.module.params['hostname']))
"""
# It seems statements are not returned by OMAPI, then we cannot modify them at this moment.
if 'statements' not in response_obj and len(self.module.params['statements']) > 0 or \
response_obj['statements'] != self.module.params['statements']:
with open('/tmp/omapi', 'w') as fb:
for (k,v) in iteritems(response_obj):
fb.writelines('statements: %s %s\n' % (k, v))
"""
if len(fields_to_update) == 0:
self.module.exit_json(changed=False, lease=response_obj)
else:
msg = OmapiMessage.update(host_response.handle)
msg.update_object(fields_to_update)
try:
response = self.omapi.query_server(msg)
if response.opcode != OMAPI_OP_STATUS:
self.module.fail_json(msg="Failed to modify host, ensure authentication and host parameters "
"are valid.")
self.module.exit_json(changed=True)
except OmapiError as e:
self.module.fail_json(msg="OMAPI error: %s" % to_native(e))
def remove_host(self):
try:
self.omapi.del_host(self.module.params['macaddr'])
self.module.exit_json(changed=True)
except OmapiErrorNotFound:
self.module.exit_json()
except OmapiError as e:
self.module.fail_json(msg="OMAPI error: %s" % to_native(e))
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', required=True, choices=['absent', 'present']),
host=dict(type='str', default="localhost"),
port=dict(type='int', default=7911),
key_name=dict(type='str', required=True),
key=dict(type='str', required=True, no_log=True),
macaddr=dict(type='str', required=True),
hostname=dict(type='str', aliases=['name']),
ip=dict(type='str'),
ddns=dict(type='bool', default=False),
statements=dict(type='list', default=[]),
),
supports_check_mode=False,
)
if not pureomapi_found:
module.fail_json(msg=missing_required_lib('pypureomapi'), exception=PUREOMAPI_IMP_ERR)
if module.params['key'] is None or len(module.params["key"]) == 0:
module.fail_json(msg="'key' parameter cannot be empty.")
if module.params['key_name'] is None or len(module.params["key_name"]) == 0:
module.fail_json(msg="'key_name' parameter cannot be empty.")
host_manager = OmapiHostManager(module)
try:
if module.params['state'] == 'present':
host_manager.setup_host()
elif module.params['state'] == 'absent':
host_manager.remove_host()
except ValueError as e:
module.fail_json(msg="OMAPI input value error: %s" % to_native(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
WillieMaddox/scipy | scipy/sparse/csc.py | 58 | 6330 | """Compressed Sparse Column matrix format"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['csc_matrix', 'isspmatrix_csc']
import numpy as np
from scipy._lib.six import xrange
from ._sparsetools import csc_tocsr
from . import _sparsetools
from .sputils import upcast, isintlike, IndexMixin, get_index_dtype
from .compressed import _cs_matrix
class csc_matrix(_cs_matrix, IndexMixin):
"""
Compressed Sparse Column matrix
This can be instantiated in several ways:
csc_matrix(D)
with a dense matrix or rank-2 ndarray D
csc_matrix(S)
with another sparse matrix S (equivalent to S.tocsc())
csc_matrix((M, N), [dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
csc_matrix((data, (row_ind, col_ind)), [shape=(M, N)])
where ``data``, ``row_ind`` and ``col_ind`` satisfy the
relationship ``a[row_ind[k], col_ind[k]] = data[k]``.
csc_matrix((data, indices, indptr), [shape=(M, N)])
is the standard CSC representation where the row indices for
column i are stored in ``indices[indptr[i]:indptr[i+1]]``
and their corresponding values are stored in
``data[indptr[i]:indptr[i+1]]``. If the shape parameter is
not supplied, the matrix dimensions are inferred from
the index arrays.
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of nonzero elements
data
Data array of the matrix
indices
CSC format index array
indptr
CSC format index pointer array
has_sorted_indices
Whether indices are sorted
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
Advantages of the CSC format
- efficient arithmetic operations CSC + CSC, CSC * CSC, etc.
- efficient column slicing
- fast matrix vector products (CSR, BSR may be faster)
Disadvantages of the CSC format
- slow row slicing operations (consider CSR)
- changes to the sparsity structure are expensive (consider LIL or DOK)
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_matrix
>>> csc_matrix((3, 4), dtype=np.int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> row = np.array([0, 2, 2, 0, 1, 2])
>>> col = np.array([0, 0, 1, 2, 2, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6])
>>> csc_matrix((data, (row, col)), shape=(3, 3)).toarray()
array([[1, 0, 4],
[0, 0, 5],
[2, 3, 6]])
>>> indptr = np.array([0, 2, 3, 6])
>>> indices = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6])
>>> csc_matrix((data, indices, indptr), shape=(3, 3)).toarray()
array([[1, 0, 4],
[0, 0, 5],
[2, 3, 6]])
"""
def transpose(self, copy=False):
from .csr import csr_matrix
M,N = self.shape
return csr_matrix((self.data,self.indices,self.indptr),(N,M),copy=copy)
def __iter__(self):
csr = self.tocsr()
for r in xrange(self.shape[0]):
yield csr[r,:]
def tocsc(self, copy=False):
if copy:
return self.copy()
else:
return self
def tocsr(self):
M,N = self.shape
idx_dtype = get_index_dtype((self.indptr, self.indices),
maxval=max(self.nnz, N))
indptr = np.empty(M + 1, dtype=idx_dtype)
indices = np.empty(self.nnz, dtype=idx_dtype)
data = np.empty(self.nnz, dtype=upcast(self.dtype))
csc_tocsr(M, N,
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
self.data,
indptr,
indices,
data)
from .csr import csr_matrix
A = csr_matrix((data, indices, indptr), shape=self.shape)
A.has_sorted_indices = True
return A
def __getitem__(self, key):
# Use CSR to implement fancy indexing.
row, col = self._unpack_index(key)
# Things that return submatrices. row or col is a int or slice.
if (isinstance(row, slice) or isinstance(col, slice) or
isintlike(row) or isintlike(col)):
return self.T[col, row].T
# Things that return a sequence of values.
else:
return self.T[col, row]
def nonzero(self):
# CSC can't use _cs_matrix's .nonzero method because it
# returns the indices sorted for self transposed.
# Get row and col indices, from _cs_matrix.tocoo
major_dim, minor_dim = self._swap(self.shape)
minor_indices = self.indices
major_indices = np.empty(len(minor_indices), dtype=self.indptr.dtype)
_sparsetools.expandptr(major_dim, self.indptr, major_indices)
row, col = self._swap((major_indices, minor_indices))
# Sort them to be in C-style order
ind = np.lexsort((col, row))
row = row[ind]
col = col[ind]
return row, col
nonzero.__doc__ = _cs_matrix.nonzero.__doc__
def getrow(self, i):
"""Returns a copy of row i of the matrix, as a (1 x n)
CSR matrix (row vector).
"""
# we convert to CSR to maintain compatibility with old impl.
# in spmatrix.getrow()
return self._get_submatrix(i, slice(None)).tocsr()
def getcol(self, i):
"""Returns a copy of column i of the matrix, as a (m x 1)
CSC matrix (column vector).
"""
return self._get_submatrix(slice(None), i)
# these functions are used by the parent class (_cs_matrix)
# to remove redudancy between csc_matrix and csr_matrix
def _swap(self,x):
"""swap the members of x if this is a column-oriented matrix
"""
return (x[1],x[0])
def isspmatrix_csc(x):
return isinstance(x, csc_matrix)
| bsd-3-clause |
amir-qayyum-khan/edx-platform | common/lib/xmodule/xmodule/modulestore/split_mongo/split_draft.py | 15 | 32679 | """
Module for the dual-branch fall-back Draft->Published Versioning ModuleStore
"""
from xmodule.modulestore.split_mongo.split import SplitMongoModuleStore, EXCLUDE_ALL
from xmodule.exceptions import InvalidVersionError
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.exceptions import InsufficientSpecificationError, ItemNotFoundError
from xmodule.modulestore.draft_and_published import (
ModuleStoreDraftAndPublished, DIRECT_ONLY_CATEGORIES, UnsupportedRevisionError
)
from opaque_keys.edx.locator import CourseLocator, LibraryLocator, LibraryUsageLocator
from xmodule.modulestore.split_mongo import BlockKey
from contracts import contract
class DraftVersioningModuleStore(SplitMongoModuleStore, ModuleStoreDraftAndPublished):
"""
A subclass of Split that supports a dual-branch fall-back versioning framework
with a Draft branch that falls back to a Published branch.
"""
def create_course(self, org, course, run, user_id, skip_auto_publish=False, **kwargs):
"""
Creates and returns the course.
Args:
org (str): the organization that owns the course
course (str): the name of the course
run (str): the name of the run
user_id: id of the user creating the course
kwargs: Any optional arguments understood by a subset of modulestores to customize instantiation
Returns: a CourseDescriptor
"""
master_branch = kwargs.pop('master_branch', ModuleStoreEnum.BranchName.draft)
with self.bulk_operations(CourseLocator(org, course, run), ignore_case=True):
item = super(DraftVersioningModuleStore, self).create_course(
org, course, run, user_id, master_branch=master_branch, **kwargs
)
if master_branch == ModuleStoreEnum.BranchName.draft and not skip_auto_publish:
# any other value is hopefully only cloning or doing something which doesn't want this value add
self._auto_publish_no_children(item.location, item.location.category, user_id, **kwargs)
# create any other necessary things as a side effect: ensure they populate the draft branch
# and rely on auto publish to populate the published branch: split's create course doesn't
# call super b/c it needs the auto publish above to have happened before any of the create_items
# in this; so, this manually calls the grandparent and above methods.
with self.branch_setting(ModuleStoreEnum.Branch.draft_preferred, item.id):
# NOTE: DO NOT CHANGE THE SUPER. See comment above
super(SplitMongoModuleStore, self).create_course(
org, course, run, user_id, runtime=item.runtime, **kwargs
)
return item
def get_course(self, course_id, depth=0, **kwargs):
course_id = self._map_revision_to_branch(course_id)
return super(DraftVersioningModuleStore, self).get_course(course_id, depth=depth, **kwargs)
def get_library(self, library_id, depth=0, head_validation=True, **kwargs):
if not head_validation and library_id.version_guid:
return SplitMongoModuleStore.get_library(
self, library_id, depth=depth, head_validation=head_validation, **kwargs
)
library_id = self._map_revision_to_branch(library_id)
return super(DraftVersioningModuleStore, self).get_library(library_id, depth=depth, **kwargs)
def clone_course(self, source_course_id, dest_course_id, user_id, fields=None, revision=None, **kwargs):
"""
See :py:meth: xmodule.modulestore.split_mongo.split.SplitMongoModuleStore.clone_course
"""
dest_course_id = self._map_revision_to_branch(dest_course_id, revision=revision)
return super(DraftVersioningModuleStore, self).clone_course(
source_course_id, dest_course_id, user_id, fields=fields, **kwargs
)
def get_course_summaries(self, **kwargs):
"""
Returns course summaries on the Draft or Published branch depending on the branch setting.
"""
branch_setting = self.get_branch_setting()
if branch_setting == ModuleStoreEnum.Branch.draft_preferred:
return super(DraftVersioningModuleStore, self).get_course_summaries(
ModuleStoreEnum.BranchName.draft, **kwargs
)
elif branch_setting == ModuleStoreEnum.Branch.published_only:
return super(DraftVersioningModuleStore, self).get_course_summaries(
ModuleStoreEnum.BranchName.published, **kwargs
)
else:
raise InsufficientSpecificationError()
def get_courses(self, **kwargs):
"""
Returns all the courses on the Draft or Published branch depending on the branch setting.
"""
branch_setting = self.get_branch_setting()
if branch_setting == ModuleStoreEnum.Branch.draft_preferred:
return super(DraftVersioningModuleStore, self).get_courses(ModuleStoreEnum.BranchName.draft, **kwargs)
elif branch_setting == ModuleStoreEnum.Branch.published_only:
return super(DraftVersioningModuleStore, self).get_courses(ModuleStoreEnum.BranchName.published, **kwargs)
else:
raise InsufficientSpecificationError()
def _auto_publish_no_children(self, location, category, user_id, **kwargs):
"""
Publishes item if the category is DIRECT_ONLY. This assumes another method has checked that
location points to the head of the branch and ignores the version. If you call this in any
other context, you may blow away another user's changes.
NOTE: only publishes the item at location: no children get published.
"""
if location.branch == ModuleStoreEnum.BranchName.draft and category in DIRECT_ONLY_CATEGORIES:
# version_agnostic b/c of above assumption in docstring
self.publish(location.version_agnostic(), user_id, blacklist=EXCLUDE_ALL, **kwargs)
def copy_from_template(self, source_keys, dest_key, user_id, **kwargs):
"""
See :py:meth `SplitMongoModuleStore.copy_from_template`
"""
source_keys = [self._map_revision_to_branch(key) for key in source_keys]
dest_key = self._map_revision_to_branch(dest_key)
head_validation = kwargs.get('head_validation')
new_keys = super(DraftVersioningModuleStore, self).copy_from_template(
source_keys, dest_key, user_id, head_validation
)
if dest_key.branch == ModuleStoreEnum.BranchName.draft:
# Check if any of new_keys or their descendants need to be auto-published.
# We don't use _auto_publish_no_children since children may need to be published.
with self.bulk_operations(dest_key.course_key):
keys_to_check = list(new_keys)
while keys_to_check:
usage_key = keys_to_check.pop()
if usage_key.category in DIRECT_ONLY_CATEGORIES:
self.publish(usage_key.version_agnostic(), user_id, blacklist=EXCLUDE_ALL, **kwargs)
children = getattr(self.get_item(usage_key, **kwargs), "children", [])
# e.g. if usage_key is a chapter, it may have an auto-publish sequential child
keys_to_check.extend(children)
return new_keys
def update_item(self, descriptor, user_id, allow_not_found=False, force=False, asides=None, **kwargs):
old_descriptor_locn = descriptor.location
descriptor.location = self._map_revision_to_branch(old_descriptor_locn)
emit_signals = descriptor.location.branch == ModuleStoreEnum.BranchName.published \
or descriptor.location.block_type in DIRECT_ONLY_CATEGORIES
with self.bulk_operations(descriptor.location.course_key, emit_signals=emit_signals):
item = super(DraftVersioningModuleStore, self).update_item(
descriptor,
user_id,
allow_not_found=allow_not_found,
force=force,
asides=asides,
**kwargs
)
self._auto_publish_no_children(item.location, item.location.category, user_id, **kwargs)
descriptor.location = old_descriptor_locn
return item
def create_item(self, user_id, course_key, block_type, block_id=None, # pylint: disable=too-many-statements
definition_locator=None, fields=None, asides=None, force=False, skip_auto_publish=False, **kwargs):
"""
See :py:meth `ModuleStoreDraftAndPublished.create_item`
"""
course_key = self._map_revision_to_branch(course_key)
emit_signals = course_key.branch == ModuleStoreEnum.BranchName.published \
or block_type in DIRECT_ONLY_CATEGORIES
with self.bulk_operations(course_key, emit_signals=emit_signals):
item = super(DraftVersioningModuleStore, self).create_item(
user_id, course_key, block_type, block_id=block_id,
definition_locator=definition_locator, fields=fields, asides=asides,
force=force, **kwargs
)
if not skip_auto_publish:
self._auto_publish_no_children(item.location, item.location.category, user_id, **kwargs)
return item
def create_child(
self, user_id, parent_usage_key, block_type, block_id=None,
fields=None, asides=None, **kwargs
):
parent_usage_key = self._map_revision_to_branch(parent_usage_key)
with self.bulk_operations(parent_usage_key.course_key):
item = super(DraftVersioningModuleStore, self).create_child(
user_id, parent_usage_key, block_type, block_id=block_id,
fields=fields, asides=asides, **kwargs
)
# Publish both the child and the parent, if the child is a direct-only category
self._auto_publish_no_children(item.location, item.location.category, user_id, **kwargs)
self._auto_publish_no_children(parent_usage_key, item.location.category, user_id, **kwargs)
return item
def delete_item(self, location, user_id, revision=None, skip_auto_publish=False, **kwargs):
"""
Delete the given item from persistence. kwargs allow modulestore specific parameters.
Args:
location: UsageKey of the item to be deleted
user_id: id of the user deleting the item
revision:
None - deletes the item and its subtree, and updates the parents per description above
ModuleStoreEnum.RevisionOption.published_only - removes only Published versions
ModuleStoreEnum.RevisionOption.all - removes both Draft and Published parents
currently only provided by contentstore.views.item.orphan_handler
Otherwise, raises a ValueError.
"""
allowed_revisions = [
None,
ModuleStoreEnum.RevisionOption.published_only,
ModuleStoreEnum.RevisionOption.all
]
if revision not in allowed_revisions:
raise UnsupportedRevisionError(allowed_revisions)
autopublish_parent = False
with self.bulk_operations(location.course_key):
if isinstance(location, LibraryUsageLocator):
branches_to_delete = [ModuleStoreEnum.BranchName.library] # Libraries don't yet have draft/publish support
elif location.category in DIRECT_ONLY_CATEGORIES:
branches_to_delete = [ModuleStoreEnum.BranchName.published, ModuleStoreEnum.BranchName.draft]
elif revision == ModuleStoreEnum.RevisionOption.all:
branches_to_delete = [ModuleStoreEnum.BranchName.published, ModuleStoreEnum.BranchName.draft]
else:
if revision == ModuleStoreEnum.RevisionOption.published_only:
branches_to_delete = [ModuleStoreEnum.BranchName.published]
elif revision is None:
branches_to_delete = [ModuleStoreEnum.BranchName.draft]
parent_loc = self.get_parent_location(location.for_branch(ModuleStoreEnum.BranchName.draft))
autopublish_parent = (
not skip_auto_publish and
parent_loc is not None and
parent_loc.block_type in DIRECT_ONLY_CATEGORIES
)
self._flag_publish_event(location.course_key)
for branch in branches_to_delete:
branched_location = location.for_branch(branch)
super(DraftVersioningModuleStore, self).delete_item(branched_location, user_id)
if autopublish_parent:
self.publish(parent_loc.version_agnostic(), user_id, blacklist=EXCLUDE_ALL, **kwargs)
def _map_revision_to_branch(self, key, revision=None):
"""
Maps RevisionOptions to BranchNames, inserting them into the key
"""
if isinstance(key, (LibraryLocator, LibraryUsageLocator)):
# Libraries don't yet have draft/publish support:
draft_branch = ModuleStoreEnum.BranchName.library
published_branch = ModuleStoreEnum.BranchName.library
else:
draft_branch = ModuleStoreEnum.BranchName.draft
published_branch = ModuleStoreEnum.BranchName.published
if revision == ModuleStoreEnum.RevisionOption.published_only:
return key.for_branch(published_branch)
elif revision == ModuleStoreEnum.RevisionOption.draft_only:
return key.for_branch(draft_branch)
elif revision is None:
if key.branch is not None:
return key
elif self.get_branch_setting(key) == ModuleStoreEnum.Branch.draft_preferred:
return key.for_branch(draft_branch)
else:
return key.for_branch(published_branch)
else:
raise UnsupportedRevisionError()
def has_item(self, usage_key, revision=None):
"""
Returns True if location exists in this ModuleStore.
"""
usage_key = self._map_revision_to_branch(usage_key, revision=revision)
return super(DraftVersioningModuleStore, self).has_item(usage_key)
def get_item(self, usage_key, depth=0, revision=None, **kwargs):
"""
Returns the item identified by usage_key and revision.
"""
usage_key = self._map_revision_to_branch(usage_key, revision=revision)
return super(DraftVersioningModuleStore, self).get_item(usage_key, depth=depth, **kwargs)
def get_items(self, course_locator, revision=None, **kwargs):
"""
Returns a list of XModuleDescriptor instances for the matching items within the course with
the given course_locator.
"""
course_locator = self._map_revision_to_branch(course_locator, revision=revision)
return super(DraftVersioningModuleStore, self).get_items(course_locator, **kwargs)
def get_parent_location(self, location, revision=None, **kwargs):
'''
Returns the given location's parent location in this course.
Args:
revision:
None - uses the branch setting for the revision
ModuleStoreEnum.RevisionOption.published_only
- return only the PUBLISHED parent if it exists, else returns None
ModuleStoreEnum.RevisionOption.draft_preferred
- return either the DRAFT or PUBLISHED parent, preferring DRAFT, if parent(s) exists,
else returns None
'''
if revision == ModuleStoreEnum.RevisionOption.draft_preferred:
revision = ModuleStoreEnum.RevisionOption.draft_only
location = self._map_revision_to_branch(location, revision=revision)
return super(DraftVersioningModuleStore, self).get_parent_location(location, **kwargs)
def get_block_original_usage(self, usage_key):
"""
If a block was inherited into another structure using copy_from_template,
this will return the original block usage locator from which the
copy was inherited.
"""
usage_key = self._map_revision_to_branch(usage_key)
return super(DraftVersioningModuleStore, self).get_block_original_usage(usage_key)
def get_orphans(self, course_key, **kwargs):
course_key = self._map_revision_to_branch(course_key)
return super(DraftVersioningModuleStore, self).get_orphans(course_key, **kwargs)
def fix_not_found(self, course_key, user_id):
"""
Fix any children which point to non-existent blocks in the course's published and draft branches
"""
for branch in [ModuleStoreEnum.RevisionOption.published_only, ModuleStoreEnum.RevisionOption.draft_only]:
super(DraftVersioningModuleStore, self).fix_not_found(
self._map_revision_to_branch(course_key, branch),
user_id
)
def has_changes(self, xblock):
"""
Checks if the given block has unpublished changes
:param xblock: the block to check
:return: True if the draft and published versions differ
"""
def get_course(branch_name):
return self._lookup_course(xblock.location.course_key.for_branch(branch_name)).structure
def get_block(course_structure, block_key):
return self._get_block_from_structure(course_structure, block_key)
draft_course = get_course(ModuleStoreEnum.BranchName.draft)
published_course = get_course(ModuleStoreEnum.BranchName.published)
def has_changes_subtree(block_key):
draft_block = get_block(draft_course, block_key)
if draft_block is None: # temporary fix for bad pointers TNL-1141
return True
published_block = get_block(published_course, block_key)
if published_block is None:
return True
# check if the draft has changed since the published was created
if self._get_version(draft_block) != self._get_version(published_block):
return True
# check the children in the draft
if 'children' in draft_block.fields:
return any(
[has_changes_subtree(child_block_id) for child_block_id in draft_block.fields['children']]
)
return False
return has_changes_subtree(BlockKey.from_usage_key(xblock.location))
def publish(self, location, user_id, blacklist=None, **kwargs):
"""
Publishes the subtree under location from the draft branch to the published branch
Returns the newly published item.
"""
super(DraftVersioningModuleStore, self).copy(
user_id,
# Directly using the replace function rather than the for_branch function
# because for_branch obliterates the version_guid and will lead to missed version conflicts.
# TODO Instead, the for_branch implementation should be fixed in the Opaque Keys library.
location.course_key.replace(branch=ModuleStoreEnum.BranchName.draft),
# We clear out the version_guid here because the location here is from the draft branch, and that
# won't have the same version guid
location.course_key.replace(branch=ModuleStoreEnum.BranchName.published, version_guid=None),
[location],
blacklist=blacklist
)
self._flag_publish_event(location.course_key)
return self.get_item(location.for_branch(ModuleStoreEnum.BranchName.published), **kwargs)
def unpublish(self, location, user_id, **kwargs):
"""
Deletes the published version of the item.
Returns the newly unpublished item.
"""
if location.block_type in DIRECT_ONLY_CATEGORIES:
raise InvalidVersionError(location)
with self.bulk_operations(location.course_key):
self.delete_item(location, user_id, revision=ModuleStoreEnum.RevisionOption.published_only)
return self.get_item(location.for_branch(ModuleStoreEnum.BranchName.draft), **kwargs)
def revert_to_published(self, location, user_id):
"""
Reverts an item to its last published version (recursively traversing all of its descendants).
If no published version exists, a VersionConflictError is thrown.
If a published version exists but there is no draft version of this item or any of its descendants, this
method is a no-op.
:raises InvalidVersionError: if no published version exists for the location specified
"""
if location.category in DIRECT_ONLY_CATEGORIES:
return
draft_course_key = location.course_key.for_branch(ModuleStoreEnum.BranchName.draft)
with self.bulk_operations(draft_course_key):
# get head version of Published branch
published_course_structure = self._lookup_course(
location.course_key.for_branch(ModuleStoreEnum.BranchName.published)
).structure
published_block = self._get_block_from_structure(
published_course_structure,
BlockKey.from_usage_key(location)
)
if published_block is None:
raise InvalidVersionError(location)
# create a new versioned draft structure
draft_course_structure = self._lookup_course(draft_course_key).structure
new_structure = self.version_structure(draft_course_key, draft_course_structure, user_id)
# remove the block and its descendants from the new structure
self._remove_subtree(BlockKey.from_usage_key(location), new_structure['blocks'])
# copy over the block and its descendants from the published branch
def copy_from_published(root_block_id):
"""
copies root_block_id and its descendants from published_course_structure to new_structure
"""
self._update_block_in_structure(
new_structure,
root_block_id,
self._get_block_from_structure(published_course_structure, root_block_id)
)
block = self._get_block_from_structure(new_structure, root_block_id)
for child_block_id in block.fields.get('children', []):
copy_from_published(child_block_id)
copy_from_published(BlockKey.from_usage_key(location))
# update course structure and index
self.update_structure(draft_course_key, new_structure)
index_entry = self._get_index_if_valid(draft_course_key)
if index_entry is not None:
self._update_head(draft_course_key, index_entry, ModuleStoreEnum.BranchName.draft, new_structure['_id'])
def force_publish_course(self, course_locator, user_id, commit=False):
"""
Helper method to forcefully publish a course,
making the published branch point to the same structure as the draft branch.
"""
versions = None
index_entry = self.get_course_index(course_locator)
if index_entry is not None:
versions = index_entry['versions']
if commit:
# update published branch version only if publish and draft point to different versions
if versions['published-branch'] != versions['draft-branch']:
self._update_head(
course_locator,
index_entry,
'published-branch',
index_entry['versions']['draft-branch']
)
self._flag_publish_event(course_locator)
return self.get_course_index(course_locator)['versions']
return versions
def get_course_history_info(self, course_locator):
"""
See :py:meth `xmodule.modulestore.split_mongo.split.SplitMongoModuleStore.get_course_history_info`
"""
course_locator = self._map_revision_to_branch(course_locator)
return super(DraftVersioningModuleStore, self).get_course_history_info(course_locator)
def get_course_successors(self, course_locator, version_history_depth=1):
"""
See :py:meth `xmodule.modulestore.split_mongo.split.SplitMongoModuleStore.get_course_successors`
"""
course_locator = self._map_revision_to_branch(course_locator)
return super(DraftVersioningModuleStore, self).get_course_successors(
course_locator, version_history_depth=version_history_depth
)
def get_block_generations(self, block_locator):
"""
See :py:meth `xmodule.modulestore.split_mongo.split.SplitMongoModuleStore.get_block_generations`
"""
block_locator = self._map_revision_to_branch(block_locator)
return super(DraftVersioningModuleStore, self).get_block_generations(block_locator)
def has_published_version(self, xblock):
"""
Returns whether this xblock has a published version (whether it's up to date or not).
"""
return self._get_head(xblock, ModuleStoreEnum.BranchName.published) is not None
def convert_to_draft(self, location, user_id):
"""
Create a copy of the source and mark its revision as draft.
:param source: the location of the source (its revision must be None)
"""
# This is a no-op in Split since a draft version of the data always remains
pass
def _get_head(self, xblock, branch):
""" Gets block at the head of specified branch """
try:
course_structure = self._lookup_course(xblock.location.course_key.for_branch(branch)).structure
except ItemNotFoundError:
# There is no published version xblock container, e.g. Library
return None
return self._get_block_from_structure(course_structure, BlockKey.from_usage_key(xblock.location))
def _get_version(self, block):
"""
Return the version of the given database representation of a block.
"""
source_version = block.edit_info.source_version
return source_version if source_version is not None else block.edit_info.update_version
def import_xblock(self, user_id, course_key, block_type, block_id, fields=None, runtime=None, **kwargs):
"""
Split-based modulestores need to import published blocks to both branches
"""
with self.bulk_operations(course_key):
# hardcode course root block id
if block_type == 'course':
block_id = self.DEFAULT_ROOT_COURSE_BLOCK_ID
elif block_type == 'library':
block_id = self.DEFAULT_ROOT_LIBRARY_BLOCK_ID
new_usage_key = course_key.make_usage_key(block_type, block_id)
# Both the course and library import process calls import_xblock().
# If importing a course -and- the branch setting is published_only,
# then the non-draft course blocks are being imported.
is_course = isinstance(course_key, CourseLocator)
if is_course and self.get_branch_setting() == ModuleStoreEnum.Branch.published_only:
# Override any existing drafts (PLAT-297, PLAT-299). This import/publish step removes
# any local changes during the course import.
draft_course = course_key.for_branch(ModuleStoreEnum.BranchName.draft)
with self.branch_setting(ModuleStoreEnum.Branch.draft_preferred, draft_course):
# Importing the block and publishing the block links the draft & published blocks' version history.
draft_block = self.import_xblock(user_id, draft_course, block_type, block_id, fields,
runtime, **kwargs)
return self.publish(draft_block.location.version_agnostic(), user_id, blacklist=EXCLUDE_ALL, **kwargs)
# do the import
partitioned_fields = self.partition_fields_by_scope(block_type, fields)
course_key = self._map_revision_to_branch(course_key) # cast to branch_setting
return self._update_item_from_fields(
user_id, course_key, BlockKey(block_type, block_id), partitioned_fields, None,
allow_not_found=True, force=True, **kwargs
) or self.get_item(new_usage_key)
def compute_published_info_internal(self, xblock):
"""
Get the published branch and find when it was published if it was. Cache the results in the xblock
"""
published_block = self._get_head(xblock, ModuleStoreEnum.BranchName.published)
if published_block is not None:
# pylint: disable=protected-access
xblock._published_by = published_block.edit_info.edited_by
xblock._published_on = published_block.edit_info.edited_on
@contract(asset_key='AssetKey')
def find_asset_metadata(self, asset_key, **kwargs):
return super(DraftVersioningModuleStore, self).find_asset_metadata(
self._map_revision_to_branch(asset_key), **kwargs
)
def get_all_asset_metadata(self, course_key, asset_type, start=0, maxresults=-1, sort=None, **kwargs):
return super(DraftVersioningModuleStore, self).get_all_asset_metadata(
self._map_revision_to_branch(course_key), asset_type, start, maxresults, sort, **kwargs
)
def _update_course_assets(self, user_id, asset_key, update_function):
"""
Updates both the published and draft branches
"""
# if one call gets an exception, don't do the other call but pass on the exception
super(DraftVersioningModuleStore, self)._update_course_assets(
user_id, self._map_revision_to_branch(asset_key, ModuleStoreEnum.RevisionOption.published_only),
update_function
)
super(DraftVersioningModuleStore, self)._update_course_assets(
user_id, self._map_revision_to_branch(asset_key, ModuleStoreEnum.RevisionOption.draft_only),
update_function
)
def save_asset_metadata_list(self, asset_metadata_list, user_id, import_only=False):
"""
Updates both the published and draft branches
"""
# Convert each asset key to the proper branch before saving.
asset_keys = [asset_md.asset_id for asset_md in asset_metadata_list]
for asset_md in asset_metadata_list:
asset_key = asset_md.asset_id
asset_md.asset_id = self._map_revision_to_branch(asset_key, ModuleStoreEnum.RevisionOption.published_only)
super(DraftVersioningModuleStore, self).save_asset_metadata_list(asset_metadata_list, user_id, import_only)
for asset_md in asset_metadata_list:
asset_key = asset_md.asset_id
asset_md.asset_id = self._map_revision_to_branch(asset_key, ModuleStoreEnum.RevisionOption.draft_only)
super(DraftVersioningModuleStore, self).save_asset_metadata_list(asset_metadata_list, user_id, import_only)
# Change each asset key back to its original state.
for k in asset_keys:
asset_md.asset_id = k
def _find_course_asset(self, asset_key):
return super(DraftVersioningModuleStore, self)._find_course_asset(
self._map_revision_to_branch(asset_key)
)
def _find_course_assets(self, course_key):
"""
Split specific lookup
"""
return super(DraftVersioningModuleStore, self)._find_course_assets(
self._map_revision_to_branch(course_key)
)
def copy_all_asset_metadata(self, source_course_key, dest_course_key, user_id):
"""
Copies to and from both branches
"""
for revision in [ModuleStoreEnum.RevisionOption.published_only, ModuleStoreEnum.RevisionOption.draft_only]:
super(DraftVersioningModuleStore, self).copy_all_asset_metadata(
self._map_revision_to_branch(source_course_key, revision),
self._map_revision_to_branch(dest_course_key, revision),
user_id
)
| agpl-3.0 |
odyaka341/pyglet | contrib/scene2d/tests/scene2d/RECT_FLAT_MOUSE.py | 29 | 1999 | #!/usr/bin/env python
'''Testing mouse interaction
The cell the mouse is hovering over should highlight in red.
Clicking in a cell should highliht that cell green. Clicking again will
clear the highlighting.
Clicking on the ball sprite should highlight it and not underlying cells.
You may press the arrow keys to scroll the focus around the map (this
will move the map eventually)
Press escape or close the window to finish the test.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import unittest
from render_base import RenderBase
from scene2d import Tile, Sprite
from pyglet.event import event
from scene2d.event import for_cells, for_sprites
from scene2d.drawable import TintEffect
from scene2d.debug import gen_rect_map
class RectFlatMouseTest(RenderBase):
def test_main(self):
self.init_window(256, 256)
self.set_map(gen_rect_map([[{}]*10]*10, 32, 32))
self.w.push_handlers(self.view)
self.view.allow_oob = False
@event(self.view)
@for_cells()
def on_mouse_enter(cells):
for cell in cells:
e = TintEffect((1, .5, .5, 1))
cell.properties['hover'] = e
cell.add_effect(e)
@event(self.view)
@for_cells()
def on_mouse_leave(cells):
for cell in cells:
cell.remove_effect(cell.properties['hover'])
@event(self.view)
@for_cells()
@for_sprites()
def on_mouse_press(objs, x, y, button, modifiers):
for obj in objs:
if 'clicked' in obj.properties:
obj.remove_effect(obj.properties['clicked'])
del obj.properties['clicked']
else:
e = TintEffect((.5, 1, .5, 1))
obj.properties['clicked'] = e
obj.add_effect(e)
return
self.show_focus()
self.run_test()
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
jhroot/elife-bot | tests/activity/test_activity_apply_version_number.py | 2 | 13707 | import unittest
from ddt import ddt, data, unpack
import settings_mock
from activity.activity_ApplyVersionNumber import activity_ApplyVersionNumber
from mock import mock, patch
import test_activity_data as test_data
from classes_mock import FakeSession
import shutil
import helpers
example_key_names = [u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-fig1-figsupp1.tif',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-fig1-figsupp2.tif',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-fig1.tif',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-fig2-figsupp1.tif',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-fig2-figsupp2.tif',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-fig2.tif',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-fig3-figsupp1.tif',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-fig3.tif',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-fig4-figsupp1.tif',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-fig4.tif',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-fig5-figsupp1.tif',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-fig5.tif',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-figures.pdf',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-resp-fig1.tif',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-resp-fig2.tif',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-resp-fig3.tif',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224.pdf',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224.xml',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-media1-code1.wrl',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-media.mp4',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-media1.mov',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-resp-media1.avi']
example_file_name_map = {u'elife-15224-fig2-figsupp1.tif': u'elife-15224-fig2-figsupp1-v1.tif',
u'elife-15224-fig3.tif': u'elife-15224-fig3-v1.tif',
u'elife-15224-fig4.tif': u'elife-15224-fig4-v1.tif',
u'elife-15224.xml': u'elife-15224-v1.xml',
u'elife-15224-resp-fig2.tif': u'elife-15224-resp-fig2-v1.tif',
u'elife-15224-fig4-figsupp1.tif': u'elife-15224-fig4-figsupp1-v1.tif',
u'elife-15224-resp-fig3.tif': u'elife-15224-resp-fig3-v1.tif',
u'elife-15224-figures.pdf': u'elife-15224-figures-v1.pdf',
u'elife-15224-resp-fig1.tif': u'elife-15224-resp-fig1-v1.tif',
u'elife-15224-fig5-figsupp1.tif': u'elife-15224-fig5-figsupp1-v1.tif',
u'elife-15224.pdf': u'elife-15224-v1.pdf',
u'elife-15224-fig1-figsupp2.tif': u'elife-15224-fig1-figsupp2-v1.tif',
u'elife-15224-fig1-figsupp1.tif': u'elife-15224-fig1-figsupp1-v1.tif',
u'elife-15224-fig3-figsupp1.tif': u'elife-15224-fig3-figsupp1-v1.tif',
u'elife-15224-fig1.tif': u'elife-15224-fig1-v1.tif',
u'elife-15224-fig2.tif': u'elife-15224-fig2-v1.tif',
u'elife-15224-fig2-figsupp2.tif': u'elife-15224-fig2-figsupp2-v1.tif',
u'elife-15224-fig5.tif': u'elife-15224-fig5-v1.tif',
u'elife-15224-media1-code1.wrl': u'elife-15224-media1-code1-v1.wrl',
u'elife-15224-media.mp4': u'elife-15224-media.mp4',
u'elife-15224-media1.mov': u'elife-15224-media1.mov',
u'elife-15224-resp-media1.avi': u'elife-15224-resp-media1.avi'}
example_key_names_with_version = [u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-fig1-figsupp1-v1.tif',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-fig1-figsupp2-v1.tif',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-fig1-v1.tif',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-fig2-figsupp1-v1.tif',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-fig2-figsupp2-v1.tif',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-fig2-v1.tif',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-fig3-figsupp1-v1.tif',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-fig3-v1.tif',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-fig4-figsupp1-v1.tif',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-fig4-v1.tif',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-fig5-figsupp1-v1.tif',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-fig5-v1.tif',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-figures-v1.pdf',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-resp-fig1-v1.tif',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-resp-fig2-v1.tif',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-resp-fig3-v1.tif',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-v1.pdf',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-v1.xml',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-media1-code1-v1.wrl',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-media.mp4',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-media1.mov',
u'15224.1/fec8dcd1-76df-4921-93de-4bf8b8ab70eb/elife-15224-resp-media1.avi']
example_file_name_map_with_version = {u'elife-15224-fig1-figsupp1-v1.tif': u'elife-15224-fig1-figsupp1-v2.tif',
u'elife-15224-fig2-figsupp1-v1.tif': u'elife-15224-fig2-figsupp1-v2.tif',
u'elife-15224-fig3-v1.tif': u'elife-15224-fig3-v2.tif',
u'elife-15224-fig4-v1.tif': u'elife-15224-fig4-v2.tif',
u'elife-15224-resp-fig2-v1.tif': u'elife-15224-resp-fig2-v2.tif',
u'elife-15224-fig4-figsupp1-v1.tif': u'elife-15224-fig4-figsupp1-v2.tif',
u'elife-15224-resp-fig3-v1.tif': u'elife-15224-resp-fig3-v2.tif',
u'elife-15224-figures-v1.pdf': u'elife-15224-figures-v2.pdf',
u'elife-15224-resp-fig1-v1.tif': u'elife-15224-resp-fig1-v2.tif',
u'elife-15224-fig5-figsupp1-v1.tif': u'elife-15224-fig5-figsupp1-v2.tif',
u'elife-15224-v1.pdf': u'elife-15224-v2.pdf',
u'elife-15224-fig1-figsupp2-v1.tif': u'elife-15224-fig1-figsupp2-v2.tif',
u'elife-15224-fig3-figsupp1-v1.tif': u'elife-15224-fig3-figsupp1-v2.tif',
u'elife-15224-fig1-v1.tif': u'elife-15224-fig1-v2.tif',
u'elife-15224-fig2-v1.tif': u'elife-15224-fig2-v2.tif',
u'elife-15224-fig2-figsupp2-v1.tif': u'elife-15224-fig2-figsupp2-v2.tif',
u'elife-15224-fig5-v1.tif': u'elife-15224-fig5-v2.tif',
u'elife-15224-v1.xml': u'elife-15224-v2.xml',
u'elife-15224-media1-code1-v1.wrl': u'elife-15224-media1-code1-v2.wrl',
u'elife-15224-media.mp4': u'elife-15224-media.mp4',
u'elife-15224-media1.mov': u'elife-15224-media1.mov',
u'elife-15224-resp-media1.avi': u'elife-15224-resp-media1.avi'}
@ddt
class MyTestCase(unittest.TestCase):
def setUp(self):
self.applyversionnumber = activity_ApplyVersionNumber(settings_mock, None, None, None, None)
self.test_dest_folder = 'tests/files_dest_ApplyVersionNumber'
helpers.create_folder(self.test_dest_folder)
def tearDown(self):
helpers.delete_folder(self.test_dest_folder, True)
@patch.object(activity_ApplyVersionNumber, 'emit_monitor_event')
@patch('activity.activity_ApplyVersionNumber.Session')
@data(test_data.session_example)
def test_do_activity_no_version_error(self, session_example, mock_session, fake_emit_monitor_event):
#given
session_example = session_example.copy()
del session_example['version']
mock_session.return_value = FakeSession(session_example)
data = test_data.ApplyVersionNumber_data_no_renaming
#when
result = self.applyversionnumber.do_activity(data)
#then
fake_emit_monitor_event.assert_called_with(settings_mock, session_example['article_id'], None, data['run'],
self.applyversionnumber.pretty_name, "error",
"Error in applying version number to files for " +
session_example['article_id'] +
" message: No version available")
self.assertEqual(result, self.applyversionnumber.ACTIVITY_PERMANENT_FAILURE)
def test_find_xml_filename_in_map(self):
new_name = self.applyversionnumber.find_xml_filename_in_map(example_file_name_map)
self.assertEqual(new_name, u'elife-15224-v1.xml')
@unpack
@data({'key_names': example_key_names, 'version': '1', 'expected': example_file_name_map},
{'key_names': example_key_names_with_version, 'version': '2', 'expected': example_file_name_map_with_version})
def test_build_file_name_map(self, key_names, version, expected):
result = self.applyversionnumber.build_file_name_map(key_names, version)
self.assertDictEqual(result, expected)
@unpack
@data({'file': u'elife-15224.xml', 'version': '1', 'expected': u'elife-15224-v1.xml'},
{'file': u'elife-15224-v1.xml', 'version': '1', 'expected': u'elife-15224-v1.xml'},
{'file': u'elife-15224-v1.xml', 'version': '2', 'expected': u'elife-15224-v2.xml'})
def test_new_filename(self, file, version, expected):
result = self.applyversionnumber.new_filename(file, version)
self.assertEqual(result, expected)
@unpack
@data({'file': u'elife-15224-v1.xml', 'expected': u'elife-15224-v1-rewritten.xml'},
{'file': u'simple-jats-doctype-1.1d3.xml', 'expected': u'simple-jats-doctype-1.1d3.xml'},
{'file': u'simple-jats-doctype-1.1.xml', 'expected': u'simple-jats-doctype-1.1.xml'})
def test_rewrite_xml_file(self, file, expected):
# Patch here in order to use ddt data
patcher = patch('activity.activity_ApplyVersionNumber.path.join')
mock_path_join = patcher.start()
#given
shutil.copy(u'tests/files_source/ApplyVersionNumber/'+ file, u'tests/files_dest_ApplyVersionNumber/'+ file)
mock_path_join.return_value = u'tests/files_dest_ApplyVersionNumber/'+ file
#when
self.applyversionnumber.rewrite_xml_file(file, example_file_name_map)
#then
with open(u'tests/files_dest_ApplyVersionNumber/'+ file, 'r') as result_file:
result_file_content = result_file.read()
with open(u'tests/files_source/ApplyVersionNumber/' + expected, 'r') as expected_file:
expected_file_content = expected_file.read()
self.assertEqual(result_file_content, expected_file_content)
patcher.stop()
@patch('activity.activity_ApplyVersionNumber.path.join')
def test_rewrite_xml_file_no_changes(self, mock_path_join):
#given
shutil.copy(u'tests/files_source/ApplyVersionNumber/elife-15224-v1-rewritten.xml', u'tests/files_dest_ApplyVersionNumber/elife-15224-v1.xml')
mock_path_join.return_value = u'tests/files_dest_ApplyVersionNumber/elife-15224-v1.xml'
#when
self.applyversionnumber.rewrite_xml_file(u'elife-15224-v1.xml', example_file_name_map)
#then
with open(u'tests/files_dest_ApplyVersionNumber/elife-15224-v1.xml', 'r') as result_file:
result_file_content = result_file.read()
with open(u'tests/files_source/ApplyVersionNumber/elife-15224-v1-rewritten.xml', 'r') as expected_file:
expected_file_content = expected_file.read()
self.assertEqual(result_file_content, expected_file_content)
if __name__ == '__main__':
unittest.main()
| mit |
StuartLittlefair/astropy | .pyinstaller/run_astropy_tests.py | 8 | 3186 | import os
import sys
import pytest
import shutil
import astropy # noqa
ROOT = os.path.join(os.path.dirname(__file__), '../')
# Make sure we don't allow any arguments to be passed - some tests call
# sys.executable which becomes this script when producing a pyinstaller
# bundle, but we should just error in this case since this is not the
# regular Python interpreter.
if len(sys.argv) > 1:
print("Extra arguments passed, exiting early")
sys.exit(1)
for root, dirnames, files in os.walk(os.path.join(ROOT, 'astropy')):
# Copy over the astropy 'tests' directories and their contents
for dirname in dirnames:
final_dir = os.path.relpath(os.path.join(root.replace('astropy', 'astropy_tests'), dirname), ROOT)
# We only copy over 'tests' directories, but not astropy/tests (only
# astropy/tests/tests) since that is not just a directory with tests.
if dirname == 'tests' and not root.endswith('astropy'):
shutil.copytree(os.path.join(root, dirname), final_dir, dirs_exist_ok=True)
else:
# Create empty __init__.py files so that 'astropy_tests' still
# behaves like a single package, otherwise pytest gets confused
# by the different conftest.py files.
init_filename = os.path.join(final_dir, '__init__.py')
if not os.path.exists(os.path.join(final_dir, '__init__.py')):
os.makedirs(final_dir, exist_ok=True)
with open(os.path.join(final_dir, '__init__.py'), 'w') as f:
f.write("#")
# Copy over all conftest.py files
for file in files:
if file == 'conftest.py':
final_file = os.path.relpath(os.path.join(root.replace('astropy', 'astropy_tests'), file), ROOT)
shutil.copy2(os.path.join(root, file), final_file)
# Add the top-level __init__.py file
with open(os.path.join('astropy_tests', '__init__.py'), 'w') as f:
f.write("#")
# Copy the top-level conftest.py
shutil.copy2(os.path.join(ROOT, 'astropy', 'conftest.py'),
os.path.join('astropy_tests', 'conftest.py'))
# We skip a few tests, which are generally ones that rely on explicitly
# checking the name of the current module (which ends up starting with
# astropy_tests rather than astropy).
SKIP_TESTS = ['test_exception_logging_origin',
'test_log',
'test_configitem',
'test_config_noastropy_fallback',
'test_no_home',
'test_path',
'test_rename_path',
'test_data_name_third_party_package',
'test_pkg_finder',
'test_wcsapi_extension',
'test_find_current_module_bundle',
'test_download_parallel_fills_cache']
# Run the tests!
sys.exit(pytest.main(['astropy_tests',
'-k ' + ' and '.join('not ' + test for test in SKIP_TESTS)],
plugins=['pytest_doctestplus.plugin',
'pytest_openfiles.plugin',
'pytest_remotedata.plugin',
'pytest_mpl.plugin',
'pytest_astropy_header.display']))
| bsd-3-clause |
thjashin/tensorflow | tensorflow/contrib/factorization/__init__.py | 37 | 1192 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops and modules related to factorization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.factorization.python.ops.clustering_ops import *
from tensorflow.contrib.factorization.python.ops.factorization_ops import *
from tensorflow.contrib.factorization.python.ops.gmm import *
from tensorflow.contrib.factorization.python.ops.gmm_ops import *
# pylint: enable=wildcard-import
| apache-2.0 |
SteveXiSong/ECE757-SnoopingPredictions | src/arch/x86/isa/insts/simd128/integer/save_and_restore_state/save_and_restore_state.py | 20 | 6635 | # Copyright (c) 2013 Andreas Sandberg
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
# Register usage:
# t1, t2 == temporaries
# t7 == base address (RIP or SIB)
loadX87RegTemplate = '''
ld t1, seg, %(mode)s, "DISPLACEMENT + 32 + 16 * %(idx)i", dataSize=8
ld t2, seg, %(mode)s, "DISPLACEMENT + 32 + 16 * %(idx)i + 8", dataSize=2
cvtint_fp80 st(%(idx)i), t1, t2
'''
storeX87RegTemplate = '''
cvtfp80h_int t1, st(%(idx)i)
cvtfp80l_int t2, st(%(idx)i)
st t1, seg, %(mode)s, "DISPLACEMENT + 32 + 16 * %(idx)i", dataSize=8
st t2, seg, %(mode)s, "DISPLACEMENT + 32 + 16 * %(idx)i + 8", dataSize=2
'''
loadXMMRegTemplate = '''
ldfp "InstRegIndex(FLOATREG_XMM_LOW(%(idx)i))", seg, %(mode)s, \
"DISPLACEMENT + 160 + 16 * %(idx)i", dataSize=8
ldfp "InstRegIndex(FLOATREG_XMM_HIGH(%(idx)i))", seg, %(mode)s, \
"DISPLACEMENT + 160 + 16 * %(idx)i + 8", dataSize=8
'''
storeXMMRegTemplate = '''
stfp "InstRegIndex(FLOATREG_XMM_LOW(%(idx)i))", seg, %(mode)s, \
"DISPLACEMENT + 160 + 16 * %(idx)i", dataSize=8
stfp "InstRegIndex(FLOATREG_XMM_HIGH(%(idx)i))", seg, %(mode)s, \
"DISPLACEMENT + 160 + 16 * %(idx)i + 8", dataSize=8
'''
loadAllDataRegs = \
"".join([loadX87RegTemplate % { "idx" : i, "mode" : "%(mode)s" }
for i in range(8)]) + \
"".join([loadXMMRegTemplate % { "idx" : i, "mode" : "%(mode)s" }
for i in range(16)])
storeAllDataRegs = \
"".join([storeX87RegTemplate % { "idx" : i, "mode" : "%(mode)s" }
for i in range(8)]) + \
"".join([storeXMMRegTemplate % { "idx" : i, "mode" : "%(mode)s" }
for i in range(16)])
fxsaveCommonTemplate = """
rdval t1, fcw
st t1, seg, %(mode)s, "DISPLACEMENT + 0", dataSize=2
# FSW includes TOP when read
rdval t1, fsw
st t1, seg, %(mode)s, "DISPLACEMENT + 2", dataSize=2
# FTW
rdxftw t1
st t1, seg, %(mode)s, "DISPLACEMENT + 4", dataSize=1
rdval t1, "InstRegIndex(MISCREG_FOP)"
st t1, seg, %(mode)s, "DISPLACEMENT + 6", dataSize=2
rdval t1, "InstRegIndex(MISCREG_MXCSR)"
st t1, seg, %(mode)s, "DISPLACEMENT + 16 + 8", dataSize=4
# MXCSR_MASK, software assumes the default (0xFFBF) if 0.
limm t1, 0xFFFF
st t1, seg, %(mode)s, "DISPLACEMENT + 16 + 12", dataSize=4
""" + storeAllDataRegs
fxsave32Template = """
rdval t1, "InstRegIndex(MISCREG_FIOFF)"
st t1, seg, %(mode)s, "DISPLACEMENT + 8", dataSize=4
rdval t1, "InstRegIndex(MISCREG_FISEG)"
st t1, seg, %(mode)s, "DISPLACEMENT + 12", dataSize=2
rdval t1, "InstRegIndex(MISCREG_FOOFF)"
st t1, seg, %(mode)s, "DISPLACEMENT + 16 + 0", dataSize=4
rdval t1, "InstRegIndex(MISCREG_FOSEG)"
st t1, seg, %(mode)s, "DISPLACEMENT + 16 + 4", dataSize=2
"""
fxsave64Template = """
rdval t1, "InstRegIndex(MISCREG_FIOFF)"
st t1, seg, %(mode)s, "DISPLACEMENT + 8", dataSize=8
rdval t1, "InstRegIndex(MISCREG_FOOFF)"
st t1, seg, %(mode)s, "DISPLACEMENT + 16 + 0", dataSize=8
"""
fxrstorCommonTemplate = """
ld t1, seg, %(mode)s, "DISPLACEMENT + 0", dataSize=2
wrval fcw, t1
# FSW includes TOP when read
ld t1, seg, %(mode)s, "DISPLACEMENT + 2", dataSize=2
wrval fsw, t1
srli t1, t1, 11, dataSize=2
andi t1, t1, 0x7, dataSize=2
wrval "InstRegIndex(MISCREG_X87_TOP)", t1
# FTW
ld t1, seg, %(mode)s, "DISPLACEMENT + 4", dataSize=1
wrxftw t1
ld t1, seg, %(mode)s, "DISPLACEMENT + 6", dataSize=2
wrval "InstRegIndex(MISCREG_FOP)", t1
ld t1, seg, %(mode)s, "DISPLACEMENT + 16 + 8", dataSize=4
wrval "InstRegIndex(MISCREG_MXCSR)", t1
""" + loadAllDataRegs
fxrstor32Template = """
ld t1, seg, %(mode)s, "DISPLACEMENT + 8", dataSize=4
wrval "InstRegIndex(MISCREG_FIOFF)", t1
ld t1, seg, %(mode)s, "DISPLACEMENT + 12", dataSize=2
wrval "InstRegIndex(MISCREG_FISEG)", t1
ld t1, seg, %(mode)s, "DISPLACEMENT + 16 + 0", dataSize=4
wrval "InstRegIndex(MISCREG_FOOFF)", t1
ld t1, seg, %(mode)s, "DISPLACEMENT + 16 + 4", dataSize=2
wrval "InstRegIndex(MISCREG_FOSEG)", t1
"""
fxrstor64Template = """
limm t2, 0, dataSize=8
ld t1, seg, %(mode)s, "DISPLACEMENT + 8", dataSize=8
wrval "InstRegIndex(MISCREG_FIOFF)", t1
wrval "InstRegIndex(MISCREG_FISEG)", t2
ld t1, seg, %(mode)s, "DISPLACEMENT + 16 + 0", dataSize=8
wrval "InstRegIndex(MISCREG_FOOFF)", t1
wrval "InstRegIndex(MISCREG_FOSEG)", t2
"""
microcode = '''
def macroop FXSAVE_M {
''' + fxsave32Template % { "mode" : "sib" } + '''
};
def macroop FXSAVE_P {
rdip t7
''' + fxsave32Template % { "mode" : "riprel" } + '''
};
def macroop FXSAVE64_M {
''' + fxsave64Template % { "mode" : "sib" } + '''
};
def macroop FXSAVE64_P {
rdip t7
''' + fxsave64Template % { "mode" : "riprel" } + '''
};
def macroop FXRSTOR_M {
''' + fxrstor32Template % { "mode" : "sib" } + '''
};
def macroop FXRSTOR_P {
rdip t7
''' + fxrstor32Template % { "mode" : "riprel" } + '''
};
def macroop FXRSTOR64_M {
''' + fxrstor64Template % { "mode" : "sib" } + '''
};
def macroop FXRSTOR64_P {
rdip t7
''' + fxrstor64Template % { "mode" : "riprel" } + '''
};
'''
| bsd-3-clause |
pedro2d10/SickRage-FR | lib/httplib2/iri2uri.py | 885 | 3850 | """
iri2uri
Converts an IRI to a URI.
"""
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = []
__version__ = "1.0.0"
__license__ = "MIT"
__history__ = """
"""
import urlparse
# Convert an IRI to a URI following the rules in RFC 3987
#
# The characters we need to enocde and escape are defined in the spec:
#
# iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD
# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF
# / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD
# / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD
# / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD
# / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD
# / %xD0000-DFFFD / %xE1000-EFFFD
escape_range = [
(0xA0, 0xD7FF ),
(0xE000, 0xF8FF ),
(0xF900, 0xFDCF ),
(0xFDF0, 0xFFEF),
(0x10000, 0x1FFFD ),
(0x20000, 0x2FFFD ),
(0x30000, 0x3FFFD),
(0x40000, 0x4FFFD ),
(0x50000, 0x5FFFD ),
(0x60000, 0x6FFFD),
(0x70000, 0x7FFFD ),
(0x80000, 0x8FFFD ),
(0x90000, 0x9FFFD),
(0xA0000, 0xAFFFD ),
(0xB0000, 0xBFFFD ),
(0xC0000, 0xCFFFD),
(0xD0000, 0xDFFFD ),
(0xE1000, 0xEFFFD),
(0xF0000, 0xFFFFD ),
(0x100000, 0x10FFFD)
]
def encode(c):
retval = c
i = ord(c)
for low, high in escape_range:
if i < low:
break
if i >= low and i <= high:
retval = "".join(["%%%2X" % ord(o) for o in c.encode('utf-8')])
break
return retval
def iri2uri(uri):
"""Convert an IRI to a URI. Note that IRIs must be
passed in a unicode strings. That is, do not utf-8 encode
the IRI before passing it into the function."""
if isinstance(uri ,unicode):
(scheme, authority, path, query, fragment) = urlparse.urlsplit(uri)
authority = authority.encode('idna')
# For each character in 'ucschar' or 'iprivate'
# 1. encode as utf-8
# 2. then %-encode each octet of that utf-8
uri = urlparse.urlunsplit((scheme, authority, path, query, fragment))
uri = "".join([encode(c) for c in uri])
return uri
if __name__ == "__main__":
import unittest
class Test(unittest.TestCase):
def test_uris(self):
"""Test that URIs are invariant under the transformation."""
invariant = [
u"ftp://ftp.is.co.za/rfc/rfc1808.txt",
u"http://www.ietf.org/rfc/rfc2396.txt",
u"ldap://[2001:db8::7]/c=GB?objectClass?one",
u"mailto:John.Doe@example.com",
u"news:comp.infosystems.www.servers.unix",
u"tel:+1-816-555-1212",
u"telnet://192.0.2.16:80/",
u"urn:oasis:names:specification:docbook:dtd:xml:4.1.2" ]
for uri in invariant:
self.assertEqual(uri, iri2uri(uri))
def test_iri(self):
""" Test that the right type of escaping is done for each part of the URI."""
self.assertEqual("http://xn--o3h.com/%E2%98%84", iri2uri(u"http://\N{COMET}.com/\N{COMET}"))
self.assertEqual("http://bitworking.org/?fred=%E2%98%84", iri2uri(u"http://bitworking.org/?fred=\N{COMET}"))
self.assertEqual("http://bitworking.org/#%E2%98%84", iri2uri(u"http://bitworking.org/#\N{COMET}"))
self.assertEqual("#%E2%98%84", iri2uri(u"#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")))
self.assertNotEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode('utf-8')))
unittest.main()
| gpl-3.0 |
Quintin-Z/rt-thread | tools/win32spawn.py | 66 | 6761 | #
# File : win32spawn.py
# This file is part of RT-Thread RTOS
# COPYRIGHT (C) 2006 - 2015, RT-Thread Development Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Change Logs:
# Date Author Notes
# 2015-01-20 Bernard Add copyright information
#
import os
import threading
import Queue
# Windows import
import win32file
import win32pipe
import win32api
import win32con
import win32security
import win32process
import win32event
class Win32Spawn(object):
def __init__(self, cmd, shell=False):
self.queue = Queue.Queue()
self.is_terminated = False
self.wake_up_event = win32event.CreateEvent(None, 0, 0, None)
exec_dir = os.getcwd()
comspec = os.environ.get("COMSPEC", "cmd.exe")
cmd = comspec + ' /c ' + cmd
win32event.ResetEvent(self.wake_up_event)
currproc = win32api.GetCurrentProcess()
sa = win32security.SECURITY_ATTRIBUTES()
sa.bInheritHandle = 1
child_stdout_rd, child_stdout_wr = win32pipe.CreatePipe(sa, 0)
child_stdout_rd_dup = win32api.DuplicateHandle(currproc, child_stdout_rd, currproc, 0, 0, win32con.DUPLICATE_SAME_ACCESS)
win32file.CloseHandle(child_stdout_rd)
child_stderr_rd, child_stderr_wr = win32pipe.CreatePipe(sa, 0)
child_stderr_rd_dup = win32api.DuplicateHandle(currproc, child_stderr_rd, currproc, 0, 0, win32con.DUPLICATE_SAME_ACCESS)
win32file.CloseHandle(child_stderr_rd)
child_stdin_rd, child_stdin_wr = win32pipe.CreatePipe(sa, 0)
child_stdin_wr_dup = win32api.DuplicateHandle(currproc, child_stdin_wr, currproc, 0, 0, win32con.DUPLICATE_SAME_ACCESS)
win32file.CloseHandle(child_stdin_wr)
startup_info = win32process.STARTUPINFO()
startup_info.hStdInput = child_stdin_rd
startup_info.hStdOutput = child_stdout_wr
startup_info.hStdError = child_stderr_wr
startup_info.dwFlags = win32process.STARTF_USESTDHANDLES
cr_flags = 0
cr_flags = win32process.CREATE_NEW_PROCESS_GROUP
env = os.environ.copy()
self.h_process, h_thread, dw_pid, dw_tid = win32process.CreateProcess(None, cmd, None, None, 1,
cr_flags, env, os.path.abspath(exec_dir),
startup_info)
win32api.CloseHandle(h_thread)
win32file.CloseHandle(child_stdin_rd)
win32file.CloseHandle(child_stdout_wr)
win32file.CloseHandle(child_stderr_wr)
self.__child_stdout = child_stdout_rd_dup
self.__child_stderr = child_stderr_rd_dup
self.__child_stdin = child_stdin_wr_dup
self.exit_code = -1
def close(self):
win32file.CloseHandle(self.__child_stdout)
win32file.CloseHandle(self.__child_stderr)
win32file.CloseHandle(self.__child_stdin)
win32api.CloseHandle(self.h_process)
win32api.CloseHandle(self.wake_up_event)
def kill_subprocess():
win32event.SetEvent(self.wake_up_event)
def sleep(secs):
win32event.ResetEvent(self.wake_up_event)
timeout = int(1000 * secs)
val = win32event.WaitForSingleObject(self.wake_up_event, timeout)
if val == win32event.WAIT_TIMEOUT:
return True
else:
# The wake_up_event must have been signalled
return False
def get(self, block=True, timeout=None):
return self.queue.get(block=block, timeout=timeout)
def qsize(self):
return self.queue.qsize()
def __wait_for_child(self):
# kick off threads to read from stdout and stderr of the child process
threading.Thread(target=self.__do_read, args=(self.__child_stdout, )).start()
threading.Thread(target=self.__do_read, args=(self.__child_stderr, )).start()
while True:
# block waiting for the process to finish or the interrupt to happen
handles = (self.wake_up_event, self.h_process)
val = win32event.WaitForMultipleObjects(handles, 0, win32event.INFINITE)
if val >= win32event.WAIT_OBJECT_0 and val < win32event.WAIT_OBJECT_0 + len(handles):
handle = handles[val - win32event.WAIT_OBJECT_0]
if handle == self.wake_up_event:
win32api.TerminateProcess(self.h_process, 1)
win32event.ResetEvent(self.wake_up_event)
return False
elif handle == self.h_process:
# the process has ended naturally
return True
else:
assert False, "Unknown handle fired"
else:
assert False, "Unexpected return from WaitForMultipleObjects"
# Wait for job to finish. Since this method blocks, it can to be called from another thread.
# If the application wants to kill the process, it should call kill_subprocess().
def wait(self):
if not self.__wait_for_child():
# it's been killed
result = False
else:
# normal termination
self.exit_code = win32process.GetExitCodeProcess(self.h_process)
result = self.exit_code == 0
self.close()
self.is_terminated = True
return result
# This method gets called on a worker thread to read from either a stderr
# or stdout thread from the child process.
def __do_read(self, handle):
bytesToRead = 1024
while 1:
try:
finished = 0
hr, data = win32file.ReadFile(handle, bytesToRead, None)
if data:
self.queue.put_nowait(data)
except win32api.error:
finished = 1
if finished:
return
def start_pipe(self):
def worker(pipe):
return pipe.wait()
thrd = threading.Thread(target=worker, args=(self, ))
thrd.start()
| gpl-2.0 |
Nikolas1814/Code4Bulgaria | webServer/getSignals/views.py | 1 | 2967 | from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render
from django.http import HttpResponse
from django.db.models import Q
from register.models import AgencyList
from register.models import Petitions
from django.templatetags.static import static
import json
@csrf_exempt
def getSignals(request):
if request.method != 'POST':
raise Http404('Only POSTs are allowed')
requestJson = json.loads(request.body)
print requestJson
response = '{"status" : "no_such_category"}'
if AgencyList.objects.filter(AgencyName = requestJson["agencyName"]):
if int(requestJson["minimalId"]) == -1:
response = getElements(requestJson["agencyName"])
else:
response = getElementsById(requestJson["agencyName"], requestJson["minimalId"])
print response
return HttpResponse(response, content_type='text/json')
def getElements(agencyName):
status = '{'
signals = 0
minId = 0
for index in Petitions.objects.filter(AgencyName = agencyName).order_by("-date"):
signals +=1
minId = index.id
if(signals == 6):
break
status += '"signal' + str(signals) + '" : '
status += '{"petitionType" : "' + index.petitionType + '", "petitionDescription" : "' + index.petitionDescription
status += '", "citizenName" : "' + index.citizenName + '", "citizenMiddleName" : "' + index.citizenMiddleName
status += '", "citizenLastName" : "' + index.citizenLastName + '", "citizenAddres" : "' + index.citizenAddres
status += '", "date" : "' + str(index.date) + '", "petition" : "' + index.petition
status += '", "petitionData" : "' + static(index.petitionData) + '", "latitude" : "' + str(index.latitude)
status += '", "longitude" : "' + str(index.longitude) + '"}'
status += ', '
if signals < 6:
status += '"status" : "no_new_signals"}'
else:
status = status[:-2] #delete last comma
status += ', "minimalId" : "' + str(minId) + '"}'
return status
def getElementsById(agencyName,Id):
status = '{'
signals = 0
minId = 0
for index in Petitions.objects.filter(AgencyName = agencyName, id__lte= Id).order_by("-date"):
signals +=1
minId = index.id
if(signals == 6):
break
status += '"signal' + str(signals) + '" : '
status += '{"petitionType" : "' + index.petitionType + '", "petitionDescription" : "' + index.petitionDescription
status += '", "citizenName" : "' + index.citizenName + '", "citizenMiddleName" : "' + index.citizenMiddleName
status += '", "citizenLastName" : "' + index.citizenLastName + '", "citizenAddres" : "' + index.citizenAddres
status += '", "date" : "' + str(index.date) + '", "petition" : "' + index.petition
status += '", "petitionData" : "' + static(index.petitionData) + '", "latitude" : "' + str(index.latitude)
status += '", "longitude" : "' + str(index.longitude) + '"}'
status += ', '
if signals < 6:
status += '"status" : "no_new_signals"}'
else:
status = status[:-2] #delete last comma
status += ', "minimalId" : "' + str(minId) + '"}'
return status | mit |
nekulin/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/pipes.py | 59 | 9638 | """Conversion pipeline templates.
The problem:
------------
Suppose you have some data that you want to convert to another format,
such as from GIF image format to PPM image format. Maybe the
conversion involves several steps (e.g. piping it through compress or
uuencode). Some of the conversion steps may require that their input
is a disk file, others may be able to read standard input; similar for
their output. The input to the entire conversion may also be read
from a disk file or from an open file, and similar for its output.
The module lets you construct a pipeline template by sticking one or
more conversion steps together. It will take care of creating and
removing temporary files if they are necessary to hold intermediate
data. You can then use the template to do conversions from many
different sources to many different destinations. The temporary
file names used are different each time the template is used.
The templates are objects so you can create templates for many
different conversion steps and store them in a dictionary, for
instance.
Directions:
-----------
To create a template:
t = Template()
To add a conversion step to a template:
t.append(command, kind)
where kind is a string of two characters: the first is '-' if the
command reads its standard input or 'f' if it requires a file; the
second likewise for the output. The command must be valid /bin/sh
syntax. If input or output files are required, they are passed as
$IN and $OUT; otherwise, it must be possible to use the command in
a pipeline.
To add a conversion step at the beginning:
t.prepend(command, kind)
To convert a file to another file using a template:
sts = t.copy(infile, outfile)
If infile or outfile are the empty string, standard input is read or
standard output is written, respectively. The return value is the
exit status of the conversion pipeline.
To open a file for reading or writing through a conversion pipeline:
fp = t.open(file, mode)
where mode is 'r' to read the file, or 'w' to write it -- just like
for the built-in function open() or for os.popen().
To create a new template object initialized to a given one:
t2 = t.clone()
For an example, see the function test() at the end of the file.
""" # '
import re
import os
import tempfile
import string
__all__ = ["Template"]
# Conversion step kinds
FILEIN_FILEOUT = 'ff' # Must read & write real files
STDIN_FILEOUT = '-f' # Must write a real file
FILEIN_STDOUT = 'f-' # Must read a real file
STDIN_STDOUT = '--' # Normal pipeline element
SOURCE = '.-' # Must be first, writes stdout
SINK = '-.' # Must be last, reads stdin
stepkinds = [FILEIN_FILEOUT, STDIN_FILEOUT, FILEIN_STDOUT, STDIN_STDOUT, \
SOURCE, SINK]
class Template:
"""Class representing a pipeline template."""
def __init__(self):
"""Template() returns a fresh pipeline template."""
self.debugging = 0
self.reset()
def __repr__(self):
"""t.__repr__() implements repr(t)."""
return '<Template instance, steps=%r>' % (self.steps,)
def reset(self):
"""t.reset() restores a pipeline template to its initial state."""
self.steps = []
def clone(self):
"""t.clone() returns a new pipeline template with identical
initial state as the current one."""
t = Template()
t.steps = self.steps[:]
t.debugging = self.debugging
return t
def debug(self, flag):
"""t.debug(flag) turns debugging on or off."""
self.debugging = flag
def append(self, cmd, kind):
"""t.append(cmd, kind) adds a new step at the end."""
if type(cmd) is not type(''):
raise TypeError, \
'Template.append: cmd must be a string'
if kind not in stepkinds:
raise ValueError, \
'Template.append: bad kind %r' % (kind,)
if kind == SOURCE:
raise ValueError, \
'Template.append: SOURCE can only be prepended'
if self.steps and self.steps[-1][1] == SINK:
raise ValueError, \
'Template.append: already ends with SINK'
if kind[0] == 'f' and not re.search(r'\$IN\b', cmd):
raise ValueError, \
'Template.append: missing $IN in cmd'
if kind[1] == 'f' and not re.search(r'\$OUT\b', cmd):
raise ValueError, \
'Template.append: missing $OUT in cmd'
self.steps.append((cmd, kind))
def prepend(self, cmd, kind):
"""t.prepend(cmd, kind) adds a new step at the front."""
if type(cmd) is not type(''):
raise TypeError, \
'Template.prepend: cmd must be a string'
if kind not in stepkinds:
raise ValueError, \
'Template.prepend: bad kind %r' % (kind,)
if kind == SINK:
raise ValueError, \
'Template.prepend: SINK can only be appended'
if self.steps and self.steps[0][1] == SOURCE:
raise ValueError, \
'Template.prepend: already begins with SOURCE'
if kind[0] == 'f' and not re.search(r'\$IN\b', cmd):
raise ValueError, \
'Template.prepend: missing $IN in cmd'
if kind[1] == 'f' and not re.search(r'\$OUT\b', cmd):
raise ValueError, \
'Template.prepend: missing $OUT in cmd'
self.steps.insert(0, (cmd, kind))
def open(self, file, rw):
"""t.open(file, rw) returns a pipe or file object open for
reading or writing; the file is the other end of the pipeline."""
if rw == 'r':
return self.open_r(file)
if rw == 'w':
return self.open_w(file)
raise ValueError, \
'Template.open: rw must be \'r\' or \'w\', not %r' % (rw,)
def open_r(self, file):
"""t.open_r(file) and t.open_w(file) implement
t.open(file, 'r') and t.open(file, 'w') respectively."""
if not self.steps:
return open(file, 'r')
if self.steps[-1][1] == SINK:
raise ValueError, \
'Template.open_r: pipeline ends width SINK'
cmd = self.makepipeline(file, '')
return os.popen(cmd, 'r')
def open_w(self, file):
if not self.steps:
return open(file, 'w')
if self.steps[0][1] == SOURCE:
raise ValueError, \
'Template.open_w: pipeline begins with SOURCE'
cmd = self.makepipeline('', file)
return os.popen(cmd, 'w')
def copy(self, infile, outfile):
return os.system(self.makepipeline(infile, outfile))
def makepipeline(self, infile, outfile):
cmd = makepipeline(infile, self.steps, outfile)
if self.debugging:
print cmd
cmd = 'set -x; ' + cmd
return cmd
def makepipeline(infile, steps, outfile):
# Build a list with for each command:
# [input filename or '', command string, kind, output filename or '']
list = []
for cmd, kind in steps:
list.append(['', cmd, kind, ''])
#
# Make sure there is at least one step
#
if not list:
list.append(['', 'cat', '--', ''])
#
# Take care of the input and output ends
#
[cmd, kind] = list[0][1:3]
if kind[0] == 'f' and not infile:
list.insert(0, ['', 'cat', '--', ''])
list[0][0] = infile
#
[cmd, kind] = list[-1][1:3]
if kind[1] == 'f' and not outfile:
list.append(['', 'cat', '--', ''])
list[-1][-1] = outfile
#
# Invent temporary files to connect stages that need files
#
garbage = []
for i in range(1, len(list)):
lkind = list[i-1][2]
rkind = list[i][2]
if lkind[1] == 'f' or rkind[0] == 'f':
(fd, temp) = tempfile.mkstemp()
os.close(fd)
garbage.append(temp)
list[i-1][-1] = list[i][0] = temp
#
for item in list:
[inf, cmd, kind, outf] = item
if kind[1] == 'f':
cmd = 'OUT=' + quote(outf) + '; ' + cmd
if kind[0] == 'f':
cmd = 'IN=' + quote(inf) + '; ' + cmd
if kind[0] == '-' and inf:
cmd = cmd + ' <' + quote(inf)
if kind[1] == '-' and outf:
cmd = cmd + ' >' + quote(outf)
item[1] = cmd
#
cmdlist = list[0][1]
for item in list[1:]:
[cmd, kind] = item[1:3]
if item[0] == '':
if 'f' in kind:
cmd = '{ ' + cmd + '; }'
cmdlist = cmdlist + ' |\n' + cmd
else:
cmdlist = cmdlist + '\n' + cmd
#
if garbage:
rmcmd = 'rm -f'
for file in garbage:
rmcmd = rmcmd + ' ' + quote(file)
trapcmd = 'trap ' + quote(rmcmd + '; exit') + ' 1 2 3 13 14 15'
cmdlist = trapcmd + '\n' + cmdlist + '\n' + rmcmd
#
return cmdlist
# Reliably quote a string as a single argument for /bin/sh
_safechars = string.ascii_letters + string.digits + '!@%_-+=:,./' # Safe unquoted
_funnychars = '"`$\\' # Unsafe inside "double quotes"
def quote(file):
for c in file:
if c not in _safechars:
break
else:
return file
if '\'' not in file:
return '\'' + file + '\''
res = ''
for c in file:
if c in _funnychars:
c = '\\' + c
res = res + c
return '"' + res + '"'
| apache-2.0 |
shakamunyi/neutron-dvr | neutron/tests/unit/agent/l2population_rpc_base.py | 2 | 5420 | # Copyright (C) 2014 VA Linux Systems Japan K.K.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Fumihiko Kakuma, VA Linux Systems Japan K.K.
import collections
from neutron.agent import l2population_rpc
from neutron.plugins.openvswitch.agent import ovs_neutron_agent
from neutron.tests import base
class FakeNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin):
def fdb_add(self, context, fdb_entries):
pass
def fdb_remove(self, context, fdb_entries):
pass
def add_fdb_flow(self, port_info, remote_ip, lvm, ofport):
pass
def del_fdb_flow(self, port_info, remote_ip, lvm, ofport):
pass
def setup_tunnel_port(self, remote_ip, network_type):
pass
def cleanup_tunnel_port(self, tun_ofport, tunnel_type):
pass
class TestL2populationRpcCallBackTunnelMixinBase(base.BaseTestCase):
def setUp(self):
super(TestL2populationRpcCallBackTunnelMixinBase, self).setUp()
self.fakeagent = FakeNeutronAgent()
Port = collections.namedtuple('Port', 'ip, ofport')
LVM = collections.namedtuple(
'LVM', 'net, vlan, phys, segid, mac, ip, vif, port')
self.local_ip = '127.0.0.1'
self.type_gre = 'gre'
self.ports = [Port(ip='10.1.0.1', ofport='ofport1'),
Port(ip='10.1.0.2', ofport='ofport2'),
Port(ip='10.1.0.3', ofport='ofport3')]
self.ofports = {
self.type_gre: {
self.ports[0].ip: self.ports[0].ofport,
self.ports[1].ip: self.ports[1].ofport,
self.ports[2].ip: self.ports[2].ofport,
}
}
self.lvms = [LVM(net='net1', vlan=1, phys='phys1', segid='tun1',
mac='mac1', ip='1.1.1.1', vif='vifid1',
port='port1'),
LVM(net='net2', vlan=2, phys='phys2', segid='tun2',
mac='mac2', ip='2.2.2.2', vif='vifid2',
port='port2'),
LVM(net='net3', vlan=3, phys='phys3', segid='tun3',
mac='mac3', ip='3.3.3.3', vif='vifid3',
port='port3')]
self.agent_ports = {
self.ports[0].ip: [[self.lvms[0].mac, self.lvms[0].ip]],
self.ports[1].ip: [[self.lvms[1].mac, self.lvms[1].ip]],
self.ports[2].ip: [[self.lvms[2].mac, self.lvms[2].ip]],
}
self.fdb_entries1 = {
self.lvms[0].net: {
'network_type': self.type_gre,
'segment_id': self.lvms[0].segid,
'ports': {
self.local_ip: [],
self.ports[0].ip: [[self.lvms[0].mac, self.lvms[0].ip]]},
},
self.lvms[1].net: {
'network_type': self.type_gre,
'segment_id': self.lvms[1].segid,
'ports': {
self.local_ip: [],
self.ports[1].ip: [[self.lvms[1].mac, self.lvms[1].ip]]},
},
self.lvms[2].net: {
'network_type': self.type_gre,
'segment_id': self.lvms[2].segid,
'ports': {
self.local_ip: [],
self.ports[2].ip: [[self.lvms[2].mac, self.lvms[2].ip]]},
},
}
self.lvm1 = ovs_neutron_agent.LocalVLANMapping(
self.lvms[0].vlan, self.type_gre, self.lvms[0].phys,
self.lvms[0].segid, {self.lvms[0].vif: self.lvms[0].port})
self.lvm2 = ovs_neutron_agent.LocalVLANMapping(
self.lvms[1].vlan, self.type_gre, self.lvms[1].phys,
self.lvms[1].segid, {self.lvms[1].vif: self.lvms[1].port})
self.lvm3 = ovs_neutron_agent.LocalVLANMapping(
self.lvms[2].vlan, self.type_gre, self.lvms[2].phys,
self.lvms[2].segid, {self.lvms[2].vif: self.lvms[2].port})
self.local_vlan_map1 = {
self.lvms[0].net: self.lvm1,
self.lvms[1].net: self.lvm2,
self.lvms[2].net: self.lvm3,
}
self.upd_fdb_entry1_val = {
self.lvms[0].net: {
self.ports[0].ip: {
'before': [[self.lvms[0].mac, self.lvms[0].ip]],
'after': [[self.lvms[1].mac, self.lvms[1].ip]],
},
self.ports[1].ip: {
'before': [[self.lvms[0].mac, self.lvms[0].ip]],
'after': [[self.lvms[1].mac, self.lvms[1].ip]],
},
},
self.lvms[1].net: {
self.ports[2].ip: {
'before': [[self.lvms[0].mac, self.lvms[0].ip]],
'after': [[self.lvms[2].mac, self.lvms[2].ip]],
},
},
}
self.upd_fdb_entry1 = {'chg_ip': self.upd_fdb_entry1_val}
| apache-2.0 |
thisisshi/cloud-custodian | c7n/resources/secretsmanager.py | 1 | 3412 | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from c7n.manager import resources
from c7n.filters import iamaccess
from c7n.query import QueryResourceManager, TypeInfo
from c7n.tags import RemoveTag, Tag, TagActionFilter, TagDelayedAction
from c7n.utils import local_session
@resources.register('secrets-manager')
class SecretsManager(QueryResourceManager):
permissions = ('secretsmanager:ListSecretVersionIds',)
class resource_type(TypeInfo):
service = 'secretsmanager'
enum_spec = ('list_secrets', 'SecretList', None)
detail_spec = ('describe_secret', 'SecretId', 'Name', None)
config_type = cfn_type = 'AWS::SecretsManager::Secret'
name = id = 'Name'
arn = 'ARN'
SecretsManager.filter_registry.register('marked-for-op', TagActionFilter)
@SecretsManager.filter_registry.register('cross-account')
class CrossAccountAccessFilter(iamaccess.CrossAccountAccessFilter):
policy_annotation = "c7n:AccessPolicy"
permissions = ("secretsmanager:GetResourcePolicy",)
def process(self, resources, event=None):
self.client = local_session(self.manager.session_factory).client('secretsmanager')
return super(CrossAccountAccessFilter, self).process(resources)
def get_resource_policy(self, r):
if self.policy_annotation in r:
return r[self.policy_annotation]
r[self.policy_annotation] = p = self.client.get_resource_policy(
SecretId=r['Name']).get('ResourcePolicy', None)
return p
@SecretsManager.action_registry.register('tag')
class TagSecretsManagerResource(Tag):
"""Action to create tag(s) on a Secret resource
:example:
.. code-block:: yaml
policies:
- name: tag-secret
resource: secrets-manager
actions:
- type: tag
key: tag-key
value: tag-value
"""
permissions = ('secretsmanager:TagResource',)
def process_resource_set(self, client, resources, new_tags):
for r in resources:
tags = {t['Key']: t['Value'] for t in r.get('Tags', ())}
for t in new_tags:
tags[t['Key']] = t['Value']
formatted_tags = [{'Key': k, 'Value': v} for k, v in tags.items()]
client.tag_resource(SecretId=r['ARN'], Tags=formatted_tags)
@SecretsManager.action_registry.register('remove-tag')
class RemoveTagSecretsManagerResource(RemoveTag):
"""Action to remove tag(s) on a Secret resource
:example:
.. code-block:: yaml
policies:
- name: untag-secret
resource: secrets-manager
actions:
- type: remove-tag
tags: ['tag-to-be-removed']
"""
permissions = ('secretsmanager:UntagResource',)
def process_resource_set(self, client, resources, keys):
for r in resources:
client.untag_resource(SecretId=r['ARN'], TagKeys=keys)
@SecretsManager.action_registry.register('mark-for-op')
class MarkSecretForOp(TagDelayedAction):
"""Action to mark a Secret resource for deferred action :example:
.. code-block:: yaml
policies:
- name: mark-secret-for-delete
resource: secrets-manager
actions:
- type: mark-for-op
op: tag
days: 1
"""
| apache-2.0 |
julien78910/CouchPotatoServer | libs/xmpp/debug.py | 207 | 14069 | ## debug.py
##
## Copyright (C) 2003 Jacob Lundqvist
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published
## by the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
_version_ = '1.4.0'
"""\
Generic debug class
Other modules can always define extra debug flags for local usage, as long as
they make sure they append them to debug_flags
Also its always a good thing to prefix local flags with something, to reduce risk
of coliding flags. Nothing breaks if two flags would be identical, but it might
activate unintended debugging.
flags can be numeric, but that makes analysing harder, on creation its
not obvious what is activated, and when flag_show is given, output isnt
really meaningfull.
This Debug class can either be initialized and used on app level, or used independantly
by the individual classes.
For samples of usage, see samples subdir in distro source, and selftest
in this code
"""
import sys
import traceback
import time
import os
import types
if os.environ.has_key('TERM'):
colors_enabled=True
else:
colors_enabled=False
color_none = chr(27) + "[0m"
color_black = chr(27) + "[30m"
color_red = chr(27) + "[31m"
color_green = chr(27) + "[32m"
color_brown = chr(27) + "[33m"
color_blue = chr(27) + "[34m"
color_magenta = chr(27) + "[35m"
color_cyan = chr(27) + "[36m"
color_light_gray = chr(27) + "[37m"
color_dark_gray = chr(27) + "[30;1m"
color_bright_red = chr(27) + "[31;1m"
color_bright_green = chr(27) + "[32;1m"
color_yellow = chr(27) + "[33;1m"
color_bright_blue = chr(27) + "[34;1m"
color_purple = chr(27) + "[35;1m"
color_bright_cyan = chr(27) + "[36;1m"
color_white = chr(27) + "[37;1m"
"""
Define your flags in yor modules like this:
from debug import *
DBG_INIT = 'init' ; debug_flags.append( DBG_INIT )
DBG_CONNECTION = 'connection' ; debug_flags.append( DBG_CONNECTION )
The reason for having a double statement wis so we can validate params
and catch all undefined debug flags
This gives us control over all used flags, and makes it easier to allow
global debugging in your code, just do something like
foo = Debug( debug_flags )
group flags, that is a flag in it self containing multiple flags should be
defined without the debug_flags.append() sequence, since the parts are already
in the list, also they must of course be defined after the flags they depend on ;)
example:
DBG_MULTI = [ DBG_INIT, DBG_CONNECTION ]
NoDebug
-------
To speed code up, typically for product releases or such
use this class instead if you globaly want to disable debugging
"""
class NoDebug:
def __init__( self, *args, **kwargs ):
self.debug_flags = []
def show( self, *args, **kwargs):
pass
def Show( self, *args, **kwargs):
pass
def is_active( self, flag ):
pass
colors={}
def active_set( self, active_flags = None ):
return 0
LINE_FEED = '\n'
class Debug:
def __init__( self,
#
# active_flags are those that will trigger output
#
active_flags = None,
#
# Log file should be file object or file namne
#
log_file = sys.stderr,
#
# prefix and sufix can either be set globaly or per call.
# personally I use this to color code debug statements
# with prefix = chr(27) + '[34m'
# sufix = chr(27) + '[37;1m\n'
#
prefix = 'DEBUG: ',
sufix = '\n',
#
# If you want unix style timestamps,
# 0 disables timestamps
# 1 before prefix, good when prefix is a string
# 2 after prefix, good when prefix is a color
#
time_stamp = 0,
#
# flag_show should normaly be of, but can be turned on to get a
# good view of what flags are actually used for calls,
# if it is not None, it should be a string
# flags for current call will be displayed
# with flag_show as separator
# recomended values vould be '-' or ':', but any string goes
#
flag_show = None,
#
# If you dont want to validate flags on each call to
# show(), set this to 0
#
validate_flags = 1,
#
# If you dont want the welcome message, set to 0
# default is to show welcome if any flags are active
welcome = -1
):
self.debug_flags = []
if welcome == -1:
if active_flags and len(active_flags):
welcome = 1
else:
welcome = 0
self._remove_dupe_flags()
if log_file:
if type( log_file ) is type(''):
try:
self._fh = open(log_file,'w')
except:
print 'ERROR: can open %s for writing'
sys.exit(0)
else: ## assume its a stream type object
self._fh = log_file
else:
self._fh = sys.stdout
if time_stamp not in (0,1,2):
msg2 = '%s' % time_stamp
raise 'Invalid time_stamp param', msg2
self.prefix = prefix
self.sufix = sufix
self.time_stamp = time_stamp
self.flag_show = None # must be initialised after possible welcome
self.validate_flags = validate_flags
self.active_set( active_flags )
if welcome:
self.show('')
caller = sys._getframe(1) # used to get name of caller
try:
mod_name= ":%s" % caller.f_locals['__name__']
except:
mod_name = ""
self.show('Debug created for %s%s' % (caller.f_code.co_filename,
mod_name ))
self.show(' flags defined: %s' % ','.join( self.active ))
if type(flag_show) in (type(''), type(None)):
self.flag_show = flag_show
else:
msg2 = '%s' % type(flag_show )
raise 'Invalid type for flag_show!', msg2
def show( self, msg, flag = None, prefix = None, sufix = None,
lf = 0 ):
"""
flag can be of folowing types:
None - this msg will always be shown if any debugging is on
flag - will be shown if flag is active
(flag1,flag2,,,) - will be shown if any of the given flags
are active
if prefix / sufix are not given, default ones from init will be used
lf = -1 means strip linefeed if pressent
lf = 1 means add linefeed if not pressent
"""
if self.validate_flags:
self._validate_flag( flag )
if not self.is_active(flag):
return
if prefix:
pre = prefix
else:
pre = self.prefix
if sufix:
suf = sufix
else:
suf = self.sufix
if self.time_stamp == 2:
output = '%s%s ' % ( pre,
time.strftime('%b %d %H:%M:%S',
time.localtime(time.time() )),
)
elif self.time_stamp == 1:
output = '%s %s' % ( time.strftime('%b %d %H:%M:%S',
time.localtime(time.time() )),
pre,
)
else:
output = pre
if self.flag_show:
if flag:
output = '%s%s%s' % ( output, flag, self.flag_show )
else:
# this call uses the global default,
# dont print "None", just show the separator
output = '%s %s' % ( output, self.flag_show )
output = '%s%s%s' % ( output, msg, suf )
if lf:
# strip/add lf if needed
last_char = output[-1]
if lf == 1 and last_char != LINE_FEED:
output = output + LINE_FEED
elif lf == -1 and last_char == LINE_FEED:
output = output[:-1]
try:
self._fh.write( output )
except:
# unicode strikes again ;)
s=u''
for i in range(len(output)):
if ord(output[i]) < 128:
c = output[i]
else:
c = '?'
s=s+c
self._fh.write( '%s%s%s' % ( pre, s, suf ))
self._fh.flush()
def is_active( self, flag ):
'If given flag(s) should generate output.'
# try to abort early to quicken code
if not self.active:
return 0
if not flag or flag in self.active:
return 1
else:
# check for multi flag type:
if type( flag ) in ( type(()), type([]) ):
for s in flag:
if s in self.active:
return 1
return 0
def active_set( self, active_flags = None ):
"returns 1 if any flags where actually set, otherwise 0."
r = 0
ok_flags = []
if not active_flags:
#no debuging at all
self.active = []
elif type( active_flags ) in ( types.TupleType, types.ListType ):
flags = self._as_one_list( active_flags )
for t in flags:
if t not in self.debug_flags:
sys.stderr.write('Invalid debugflag given: %s\n' % t )
ok_flags.append( t )
self.active = ok_flags
r = 1
else:
# assume comma string
try:
flags = active_flags.split(',')
except:
self.show( '***' )
self.show( '*** Invalid debug param given: %s' % active_flags )
self.show( '*** please correct your param!' )
self.show( '*** due to this, full debuging is enabled' )
self.active = self.debug_flags
for f in flags:
s = f.strip()
ok_flags.append( s )
self.active = ok_flags
self._remove_dupe_flags()
return r
def active_get( self ):
"returns currently active flags."
return self.active
def _as_one_list( self, items ):
""" init param might contain nested lists, typically from group flags.
This code organises lst and remves dupes
"""
if type( items ) <> type( [] ) and type( items ) <> type( () ):
return [ items ]
r = []
for l in items:
if type( l ) == type([]):
lst2 = self._as_one_list( l )
for l2 in lst2:
self._append_unique_str(r, l2 )
elif l == None:
continue
else:
self._append_unique_str(r, l )
return r
def _append_unique_str( self, lst, item ):
"""filter out any dupes."""
if type(item) <> type(''):
msg2 = '%s' % item
raise 'Invalid item type (should be string)',msg2
if item not in lst:
lst.append( item )
return lst
def _validate_flag( self, flags ):
'verify that flag is defined.'
if flags:
for f in self._as_one_list( flags ):
if not f in self.debug_flags:
msg2 = '%s' % f
raise 'Invalid debugflag given', msg2
def _remove_dupe_flags( self ):
"""
if multiple instances of Debug is used in same app,
some flags might be created multiple time, filter out dupes
"""
unique_flags = []
for f in self.debug_flags:
if f not in unique_flags:
unique_flags.append(f)
self.debug_flags = unique_flags
colors={}
def Show(self, flag, msg, prefix=''):
msg=msg.replace('\r','\\r').replace('\n','\\n').replace('><','>\n <')
if not colors_enabled: pass
elif self.colors.has_key(prefix): msg=self.colors[prefix]+msg+color_none
else: msg=color_none+msg
if not colors_enabled: prefixcolor=''
elif self.colors.has_key(flag): prefixcolor=self.colors[flag]
else: prefixcolor=color_none
if prefix=='error':
_exception = sys.exc_info()
if _exception[0]:
msg=msg+'\n'+''.join(traceback.format_exception(_exception[0], _exception[1], _exception[2])).rstrip()
prefix= self.prefix+prefixcolor+(flag+' '*12)[:12]+' '+(prefix+' '*6)[:6]
self.show(msg, flag, prefix)
def is_active( self, flag ):
if not self.active: return 0
if not flag or flag in self.active and DBG_ALWAYS not in self.active or flag not in self.active and DBG_ALWAYS in self.active : return 1
return 0
DBG_ALWAYS='always'
##Uncomment this to effectively disable all debugging and all debugging overhead.
#Debug=NoDebug
| gpl-3.0 |
asnorkin/shad_testing_script | test_shad.py | 1 | 1475 | class TestShad:
def test_stress(self, solutions, input_generator, estimator, log):
for idx, input in enumerate(input_generator()):
times, mems, outputs = [], [], []
for solution in solutions:
output, time, mem = estimator(solution, input)
outputs.append(output)
mems.append(mem)
times.append(time)
log.error('[{}]input: {}, outputs: {}'.format(idx, input, outputs))
self._check_all_outputs_are_the_same(input, outputs, solutions, times, mems, log)
def _check_all_outputs_are_the_same(self, input, outputs, solutions, times, mems, log):
if [outputs[0]] * len(outputs) == outputs:
# log.info("Solutions have the same outputs!")
# log.info("Input: {}\n".format(input))
# log.info("Output: {}\n".format(outputs[0]))
log.info("Resources:\n")
for sol, time, mem in zip(solutions, times, mems):
pass
# log.info("\t{sol}[{time}s, {mem}]".format(sol=sol, time=time, mem=mem))
return True
log.error("Solutions have different outputs.\n")
log.error("Input: {}\n".format(input))
log.error("Outputs:\n")
for sol, time, mem, output in zip(solutions, times, mems, outputs):
log.error("\t{sol}[{time}s, {mem}]:\n{output}\n".format(sol=sol, time=time, mem=mem, output=output))
raise ValueError
| gpl-3.0 |
AcademicsToday/academicstoday-django | academicstoday_project/account/migrations/0001_initial.py | 3 | 1592 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='PrivateMessage',
fields=[
('id', models.AutoField(serialize=False, primary_key=True)),
('title', models.CharField(max_length=127)),
('text', models.TextField()),
('sent_date', models.DateField(auto_now_add=True, null=True)),
('to_address', models.CharField(max_length=255)),
('from_address', models.CharField(max_length=255)),
],
options={
'db_table': 'at_private_messages',
},
),
migrations.CreateModel(
name='Student',
fields=[
('student_id', models.AutoField(serialize=False, primary_key=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'at_students',
},
),
migrations.CreateModel(
name='Teacher',
fields=[
('teacher_id', models.AutoField(serialize=False, primary_key=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'at_teachers',
},
),
]
| apache-2.0 |
weiting-chen/manila | manila/tests/integrated/api/client.py | 1 | 7601 | # Copyright (c) 2011 Justin Santa Barbara
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib
from oslo_log import log
from oslo_serialization import jsonutils
from six.moves.urllib import parse
LOG = log.getLogger(__name__)
class OpenStackApiException(Exception):
def __init__(self, message=None, response=None):
self.response = response
if not message:
message = 'Unspecified error'
if response:
_status = response.status
_body = response.read()
message = ('%(message)s\nStatus Code: %(_status)s\n'
'Body: %(_body)s') % {
"message": message,
"_status": _status,
"_body": _body
}
super(OpenStackApiException, self).__init__(message)
class OpenStackApiAuthenticationException(OpenStackApiException):
def __init__(self, response=None, message=None):
if not message:
message = "Authentication error"
super(OpenStackApiAuthenticationException, self).__init__(message,
response)
class OpenStackApiAuthorizationException(OpenStackApiException):
def __init__(self, response=None, message=None):
if not message:
message = "Authorization error"
super(OpenStackApiAuthorizationException, self).__init__(message,
response)
class OpenStackApiNotFoundException(OpenStackApiException):
def __init__(self, response=None, message=None):
if not message:
message = "Item not found"
super(OpenStackApiNotFoundException, self).__init__(message, response)
class TestOpenStackClient(object):
"""Simple OpenStack API Client.
This is a really basic OpenStack API client that is under our control,
so we can make changes / insert hooks for testing
"""
def __init__(self, auth_user, auth_key, auth_uri):
super(TestOpenStackClient, self).__init__()
self.auth_result = None
self.auth_user = auth_user
self.auth_key = auth_key
self.auth_uri = auth_uri
# default project_id
self.project_id = 'openstack'
def request(self, url, method='GET', body=None, headers=None):
_headers = {'Content-Type': 'application/json'}
_headers.update(headers or {})
parsed_url = parse.urlparse(url)
port = parsed_url.port
hostname = parsed_url.hostname
scheme = parsed_url.scheme
if scheme == 'http':
conn = httplib.HTTPConnection(hostname,
port=port)
elif scheme == 'https':
conn = httplib.HTTPSConnection(hostname,
port=port)
else:
raise OpenStackApiException("Unknown scheme: %s" % url)
relative_url = parsed_url.path
if parsed_url.query:
relative_url = relative_url + "?" + parsed_url.query
LOG.info("Doing %(method)s on %(relative_url)s",
{"method": method, "relative_url": relative_url})
if body:
LOG.info("Body: %s", body)
conn.request(method, relative_url, body, _headers)
response = conn.getresponse()
return response
def _authenticate(self):
if self.auth_result:
return self.auth_result
auth_uri = self.auth_uri
headers = {'X-Auth-User': self.auth_user,
'X-Auth-Key': self.auth_key,
'X-Auth-Project-Id': self.project_id}
response = self.request(auth_uri,
headers=headers)
http_status = response.status
LOG.debug("%(auth_uri)s => code %(http_status)s" %
{"auth_uri": auth_uri, "http_status": http_status})
if http_status == 401:
raise OpenStackApiAuthenticationException(response=response)
auth_headers = {}
for k, v in response.getheaders():
auth_headers[k] = v
self.auth_result = auth_headers
return self.auth_result
def api_request(self, relative_uri, check_response_status=None, **kwargs):
auth_result = self._authenticate()
# NOTE(justinsb): httplib 'helpfully' converts headers to lower case
base_uri = auth_result['x-server-management-url']
full_uri = '%s/%s' % (base_uri, relative_uri)
headers = kwargs.setdefault('headers', {})
headers['X-Auth-Token'] = auth_result['x-auth-token']
response = self.request(full_uri, **kwargs)
http_status = response.status
LOG.debug("%(relative_uri)s => code %(http_status)s" %
{"relative_uri": relative_uri, "http_status": http_status})
if check_response_status:
if http_status not in check_response_status:
if http_status == 404:
raise OpenStackApiNotFoundException(response=response)
elif http_status == 401:
raise OpenStackApiAuthorizationException(response=response)
else:
raise OpenStackApiException(
message="Unexpected status code",
response=response)
return response
def _decode_json(self, response):
body = response.read()
LOG.debug("Decoding JSON: %s" % (body))
if body:
return jsonutils.loads(body)
else:
return ""
def api_get(self, relative_uri, **kwargs):
kwargs.setdefault('check_response_status', [200])
response = self.api_request(relative_uri, **kwargs)
return self._decode_json(response)
def api_post(self, relative_uri, body, **kwargs):
kwargs['method'] = 'POST'
if body:
headers = kwargs.setdefault('headers', {})
headers['Content-Type'] = 'application/json'
kwargs['body'] = jsonutils.dumps(body)
kwargs.setdefault('check_response_status', [200, 202])
response = self.api_request(relative_uri, **kwargs)
return self._decode_json(response)
def api_put(self, relative_uri, body, **kwargs):
kwargs['method'] = 'PUT'
if body:
headers = kwargs.setdefault('headers', {})
headers['Content-Type'] = 'application/json'
kwargs['body'] = jsonutils.dumps(body)
kwargs.setdefault('check_response_status', [200, 202, 204])
response = self.api_request(relative_uri, **kwargs)
return self._decode_json(response)
def api_delete(self, relative_uri, **kwargs):
kwargs['method'] = 'DELETE'
kwargs.setdefault('check_response_status', [200, 202, 204])
return self.api_request(relative_uri, **kwargs)
def get_shares(self, detail=True):
rel_url = '/shares/detail' if detail else '/shares'
return self.api_get(rel_url)['shares']
| apache-2.0 |
joequery/django | tests/urlpatterns_reverse/erroneous_urls.py | 199 | 1350 | import warnings
from django.conf.urls import url
from django.utils.deprecation import RemovedInDjango110Warning
from . import views
# Test deprecated behavior of passing strings as view to url().
# Some of these can be removed in Django 1.10 as they aren't convertable to
# callables.
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RemovedInDjango110Warning)
urlpatterns = [
# View has erroneous import
url(r'erroneous_inner/$', views.erroneous_view),
# Module has erroneous import
url(r'erroneous_outer/$', 'urlpatterns_reverse.erroneous_views_module.erroneous_view'),
# Module is an unqualified string
url(r'erroneous_unqualified/$', 'unqualified_view'),
# View does not exist
url(r'missing_inner/$', 'urlpatterns_reverse.views.missing_view'),
# View is not a callable (string import; arbitrary Python object)
url(r'uncallable-dotted/$', 'urlpatterns_reverse.views.uncallable'),
# View is not a callable (explicit import; arbitrary Python object)
url(r'uncallable-object/$', views.uncallable),
# Module does not exist
url(r'missing_outer/$', 'urlpatterns_reverse.missing_module.missing_view'),
# Regex contains an error (refs #6170)
url(r'(regex_error/$', views.empty_view),
]
| bsd-3-clause |
magvugr/AT | EntVirtual/lib/python2.7/site-packages/django/core/cache/__init__.py | 230 | 3806 | """
Caching framework.
This package defines set of cache backends that all conform to a simple API.
In a nutshell, a cache is a set of values -- which can be any object that
may be pickled -- identified by string keys. For the complete API, see
the abstract BaseCache class in django.core.cache.backends.base.
Client code should use the `cache` variable defined here to access the default
cache backend and look up non-default cache backends in the `caches` dict-like
object.
See docs/topics/cache.txt for information on the public API.
"""
from threading import local
from django.conf import settings
from django.core import signals
from django.core.cache.backends.base import (
BaseCache, CacheKeyWarning, InvalidCacheBackendError,
)
from django.utils.module_loading import import_string
__all__ = [
'cache', 'DEFAULT_CACHE_ALIAS', 'InvalidCacheBackendError',
'CacheKeyWarning', 'BaseCache',
]
DEFAULT_CACHE_ALIAS = 'default'
def _create_cache(backend, **kwargs):
try:
# Try to get the CACHES entry for the given backend name first
try:
conf = settings.CACHES[backend]
except KeyError:
try:
# Trying to import the given backend, in case it's a dotted path
import_string(backend)
except ImportError as e:
raise InvalidCacheBackendError("Could not find backend '%s': %s" % (
backend, e))
location = kwargs.pop('LOCATION', '')
params = kwargs
else:
params = conf.copy()
params.update(kwargs)
backend = params.pop('BACKEND')
location = params.pop('LOCATION', '')
backend_cls = import_string(backend)
except ImportError as e:
raise InvalidCacheBackendError(
"Could not find backend '%s': %s" % (backend, e))
return backend_cls(location, params)
class CacheHandler(object):
"""
A Cache Handler to manage access to Cache instances.
Ensures only one instance of each alias exists per thread.
"""
def __init__(self):
self._caches = local()
def __getitem__(self, alias):
try:
return self._caches.caches[alias]
except AttributeError:
self._caches.caches = {}
except KeyError:
pass
if alias not in settings.CACHES:
raise InvalidCacheBackendError(
"Could not find config for '%s' in settings.CACHES" % alias
)
cache = _create_cache(alias)
self._caches.caches[alias] = cache
return cache
def all(self):
return getattr(self._caches, 'caches', {}).values()
caches = CacheHandler()
class DefaultCacheProxy(object):
"""
Proxy access to the default Cache object's attributes.
This allows the legacy `cache` object to be thread-safe using the new
``caches`` API.
"""
def __getattr__(self, name):
return getattr(caches[DEFAULT_CACHE_ALIAS], name)
def __setattr__(self, name, value):
return setattr(caches[DEFAULT_CACHE_ALIAS], name, value)
def __delattr__(self, name):
return delattr(caches[DEFAULT_CACHE_ALIAS], name)
def __contains__(self, key):
return key in caches[DEFAULT_CACHE_ALIAS]
def __eq__(self, other):
return caches[DEFAULT_CACHE_ALIAS] == other
def __ne__(self, other):
return caches[DEFAULT_CACHE_ALIAS] != other
cache = DefaultCacheProxy()
def close_caches(**kwargs):
# Some caches -- python-memcached in particular -- need to do a cleanup at the
# end of a request cycle. If not implemented in a particular backend
# cache.close is a no-op
for cache in caches.all():
cache.close()
signals.request_finished.connect(close_caches)
| gpl-3.0 |
ychfan/tensorflow | tensorflow/examples/adding_an_op/zero_out_op_3.py | 190 | 1053 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ZeroOut op Python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import tensorflow as tf
_zero_out_module = tf.load_op_library(
os.path.join(tf.resource_loader.get_data_files_path(),
'zero_out_op_kernel_3.so'))
zero_out = _zero_out_module.zero_out
| apache-2.0 |
imerr/SFMLEngine | scripts/android.py | 1 | 4072 | import os
import sys
import subprocess
import shutil
config = {}
# commented out architectures don't have their dependencies included in sfml
# so for example libjpeg is missing for arm64-v8a and x86_64
architectures = [
"armeabi",
"armeabi-v7a",
# "arm64-v8a",
"x86",
# "x86_64",
"mips"
]
platformArch = {
"armeabi" : "arch-arm",
"armeabi-v7a" : "arch-arm",
"arm64-v8a" : "arch-arm",
"x86" : "arch-x86",
"x86_64" : "arch-x86",
"mips" : "arch-mips"
}
def main():
if len(sys.argv) < 6:
print "Usage: android.py <sourceDir> <buildDir> <NDK> <SDK> <buildTarget> [mode=Debug] [apiLevel=android-9]"
sys.exit(1)
global config, architectures
config["source"] = os.path.abspath(sys.argv[1])
config["build"] = os.path.abspath(sys.argv[2])
config["ndk"] = sys.argv[3]
config["sdk"] = sys.argv[4]
config["target"] = sys.argv[5]
config["mode"] = sys.argv[6] if len(sys.argv) >= 7 else "Debug"
config["apiLevel"] = sys.argv[7] if len(sys.argv) >= 8 else "android-9"
os.environ["ANDROID_NDK"] = config["ndk"]
os.environ["ANDROID_SDK"] = config["sdk"]
os.environ["PATH"] += "{2}{0}/tools{2}{0}/platform-tools{2}{1}".format(config["ndk"], config["sdk"], os.pathsep)
try:
os.mkdir(config["build"])
except OSError: pass
os.chdir(config["build"])
for arch in architectures:
if not cmake(arch):
print "CMake for", arch, "failed"
sys.exit(1)
for arch in architectures:
if not ninja(arch):
print "Building (ninja) for", arch, "failed"
sys.exit(1)
# collect artifacts
if os.path.exists("libs"):
shutil.rmtree("libs")
# copy openal.so..
shutil.copytree(os.path.join(config["source"], "Engine/external/SFML/extlibs/libs-android"), "libs")
for arch in architectures:
collect(arch)
# TODO: dont delete+copy, only sync
print "Copying assets..."
try:
shutil.rmtree("assets")
except OSError:
pass
os.mkdir("assets")
shutil.copytree(os.path.join(config["source"], "assets"), "assets/assets")
print "Copying res..."
try:
shutil.rmtree("res")
except OSError:
pass
shutil.copytree(os.path.join(config["source"], "res"), "res")
shutil.copy2(os.path.join(config["source"], "AndroidManifest.xml"), "AndroidManifest.xml")
p = subprocess.Popen([
os.path.join(config["sdk"], "tools/android.bat"),
"update",
"project",
"--target",
"1".format(config["apiLevel"]),
"--path",
"."
])
if p.wait() != 0:
print "Updating project failed"
sys.exit(1)
p = subprocess.Popen([
"ant.bat",
"debug" if config["mode"] == "Debug" else "debug"
])
if p.wait() != 0:
print "Building apk failed"
sys.exit(1)
def archPath(arch):
return "cmake_"+arch
def cmake(arch):
global config
print arch
if os.path.exists(archPath(arch)):
return True
os.environ["SYSROOT"] = config["ndk"] + "/platforms/" + config["apiLevel"] + "/" + platformArch[arch]
os.mkdir(archPath(arch))
os.chdir(archPath(arch))
p = subprocess.Popen([
"cmake.exe",
"-DANDROID_ABI=" + arch,
"-DANDROID_NATIVE_API_LEVEL=" + config["apiLevel"],
"-DCMAKE_TOOLCHAIN_FILE={0}/Engine/external/SFML/cmake/toolchains/android.toolchain.cmake".format(config["source"]),
"-Wno-dev",
"-GNinja",
"-DCMAKE_BUILD_TYPE="+config["mode"],
config["source"],
])
res = p.wait()
os.chdir("..")
return res == 0
def ninja(arch):
global config
print arch
if not os.path.exists(archPath(arch)):
print "Missing", archPath(arch)
return False
os.environ["SYSROOT"] = config["ndk"] + "/platforms/" + config["apiLevel"] + "/" + platformArch[arch]
os.chdir(archPath(arch))
p = subprocess.Popen([
"ninja",
config["target"],
"sfml-activity"
])
#p.communicate()
res = p.wait()
os.chdir("..")
return res == 0
def collect(arch):
#os.mkdir("libs/"+arch)
for subdir, dirs, files in os.walk(archPath(arch)):
for file in files:
if file.endswith(".so"):
print "Collecting "+arch+ " " + file
# sfml-activity check is a workaround for it always trying to load libsfml-activity.so
shutil.copy2(os.path.join(subdir, file), "libs/" + arch + "/" + ("libsfml-activity.so" if file == "libsfml-activity-d.so" else file))
main() | mit |
zhang-alex/bash-modules | en/Lib/site-packages/pip/operations/freeze.py | 342 | 5194 | from __future__ import absolute_import
import logging
import re
import pip
from pip.req import InstallRequirement
from pip.req.req_file import COMMENT_RE
from pip.utils import get_installed_distributions
from pip._vendor import pkg_resources
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.pkg_resources import RequirementParseError
logger = logging.getLogger(__name__)
def freeze(
requirement=None,
find_links=None, local_only=None, user_only=None, skip_regex=None,
default_vcs=None,
isolated=False,
wheel_cache=None,
skip=()):
find_links = find_links or []
skip_match = None
if skip_regex:
skip_match = re.compile(skip_regex).search
dependency_links = []
for dist in pkg_resources.working_set:
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt')
)
for link in find_links:
if '#egg=' in link:
dependency_links.append(link)
for link in find_links:
yield '-f %s' % link
installations = {}
for dist in get_installed_distributions(local_only=local_only,
skip=(),
user_only=user_only):
try:
req = pip.FrozenRequirement.from_dist(
dist,
dependency_links
)
except RequirementParseError:
logger.warning(
"Could not parse requirement: %s",
dist.project_name
)
continue
installations[req.name] = req
if requirement:
# the options that don't get turned into an InstallRequirement
# should only be emitted once, even if the same option is in multiple
# requirements files, so we need to keep track of what has been emitted
# so that we don't emit it again if it's seen again
emitted_options = set()
for req_file_path in requirement:
with open(req_file_path) as req_file:
for line in req_file:
if (not line.strip() or
line.strip().startswith('#') or
(skip_match and skip_match(line)) or
line.startswith((
'-r', '--requirement',
'-Z', '--always-unzip',
'-f', '--find-links',
'-i', '--index-url',
'--pre',
'--trusted-host',
'--process-dependency-links',
'--extra-index-url'))):
line = line.rstrip()
if line not in emitted_options:
emitted_options.add(line)
yield line
continue
if line.startswith('-e') or line.startswith('--editable'):
if line.startswith('-e'):
line = line[2:].strip()
else:
line = line[len('--editable'):].strip().lstrip('=')
line_req = InstallRequirement.from_editable(
line,
default_vcs=default_vcs,
isolated=isolated,
wheel_cache=wheel_cache,
)
else:
line_req = InstallRequirement.from_line(
COMMENT_RE.sub('', line).strip(),
isolated=isolated,
wheel_cache=wheel_cache,
)
if not line_req.name:
logger.info(
"Skipping line in requirement file [%s] because "
"it's not clear what it would install: %s",
req_file_path, line.strip(),
)
logger.info(
" (add #egg=PackageName to the URL to avoid"
" this warning)"
)
elif line_req.name not in installations:
logger.warning(
"Requirement file [%s] contains %s, but that "
"package is not installed",
req_file_path, COMMENT_RE.sub('', line).strip(),
)
else:
yield str(installations[line_req.name]).rstrip()
del installations[line_req.name]
yield(
'## The following requirements were added by '
'pip freeze:'
)
for installation in sorted(
installations.values(), key=lambda x: x.name.lower()):
if canonicalize_name(installation.name) not in skip:
yield str(installation).rstrip()
| mit |
ryfeus/lambda-packs | Keras_tensorflow/source/keras/regularizers.py | 9 | 2034 | from __future__ import absolute_import
import six
from . import backend as K
from .utils.generic_utils import serialize_keras_object
from .utils.generic_utils import deserialize_keras_object
class Regularizer(object):
"""Regularizer base class.
"""
def __call__(self, x):
return 0.
@classmethod
def from_config(cls, config):
return cls(**config)
class L1L2(Regularizer):
"""Regularizer for L1 and L2 regularization.
# Arguments
l1: Float; L1 regularization factor.
l2: Float; L2 regularization factor.
"""
def __init__(self, l1=0., l2=0.):
self.l1 = K.cast_to_floatx(l1)
self.l2 = K.cast_to_floatx(l2)
def __call__(self, x):
regularization = 0.
if self.l1:
regularization += K.sum(self.l1 * K.abs(x))
if self.l2:
regularization += K.sum(self.l2 * K.square(x))
return regularization
def get_config(self):
return {'l1': float(self.l1),
'l2': float(self.l2)}
# Aliases.
def l1(l=0.01):
return L1L2(l1=l)
def l2(l=0.01):
return L1L2(l2=l)
def l1_l2(l1=0.01, l2=0.01):
return L1L2(l1=l1, l2=l2)
def serialize(regularizer):
return serialize_keras_object(regularizer)
def deserialize(config, custom_objects=None):
return deserialize_keras_object(config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='regularizer')
def get(identifier):
if identifier is None:
return None
if isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
config = {'class_name': str(identifier), 'config': {}}
return deserialize(config)
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret regularizer identifier:',
identifier)
| mit |
freephys/python_ase | ase/lattice/surface.py | 2 | 10639 | """Helper functions for creating the most common surfaces and related tasks.
The helper functions can create the most common low-index surfaces,
add vacuum layers and add adsorbates.
"""
from math import sqrt
import numpy as np
from ase.atom import Atom
from ase.atoms import Atoms
from ase.data import reference_states, atomic_numbers
def fcc100(symbol, size, a=None, vacuum=0.0):
"""FCC(100) surface.
Supported special adsorption sites: 'ontop', 'bridge', 'hollow'."""
return surface(symbol, 'fcc', '100', size, a, None, vacuum)
def fcc110(symbol, size, a=None, vacuum=0.0):
"""FCC(110) surface.
Supported special adsorption sites: 'ontop', 'longbridge',
'shortbridge','hollow'."""
return surface(symbol, 'fcc', '110', size, a, None, vacuum)
def bcc100(symbol, size, a=None, vacuum=0.0):
"""BCC(100) surface.
Supported special adsorption sites: 'ontop', 'bridge', 'hollow'."""
return surface(symbol, 'bcc', '100', size, a, None, vacuum)
def bcc110(symbol, size, a=None, vacuum=0.0, orthogonal=False):
"""BCC(110) surface.
Supported special adsorption sites: 'ontop', 'longbridge',
'shortbridge', 'hollow'.
Use *orthogonal=True* to get an orthogonal unit cell - works only
for size=(i,j,k) with j even."""
return surface(symbol, 'bcc', '110', size, a, None, vacuum, orthogonal)
def bcc111(symbol, size, a=None, vacuum=0.0, orthogonal=False):
"""BCC(111) surface.
Supported special adsorption sites: 'ontop'.
Use *orthogonal=True* to get an orthogonal unit cell - works only
for size=(i,j,k) with j even."""
return surface(symbol, 'bcc', '111', size, a, None, vacuum, orthogonal)
def fcc111(symbol, size, a=None, vacuum=0.0, orthogonal=False):
"""FCC(111) surface.
Supported special adsorption sites: 'ontop', 'bridge', 'fcc' and 'hcp'.
Use *orthogonal=True* to get an orthogonal unit cell - works only
for size=(i,j,k) with j even."""
return surface(symbol, 'fcc', '111', size, a, None, vacuum, orthogonal)
def hcp0001(symbol, size, a=None, c=None, vacuum=0.0, orthogonal=False):
"""HCP(0001) surface.
Supported special adsorption sites: 'ontop', 'bridge', 'fcc' and 'hcp'.
Use *orthogonal=True* to get an orthogonal unit cell - works only
for size=(i,j,k) with j even."""
return surface(symbol, 'hcp', '0001', size, a, c, vacuum, orthogonal)
def add_adsorbate(slab, adsorbate, height, position=(0, 0), offset=None,
mol_index=0):
"""Add an adsorbate to a surface.
This function adds an adsorbate to a slab. If the slab is
produced by one of the utility functions in ase.lattice.surface, it
is possible to specify the position of the adsorbate by a keyword
(the supported keywords depend on which function was used to
create the slab).
If the adsorbate is a molecule, the atom indexed by the mol_index
optional argument is positioned on top of the adsorption position
on the surface, and it is the responsibility of the user to orient
the adsorbate in a sensible way.
This function can be called multiple times to add more than one
adsorbate.
Parameters:
slab: The surface onto which the adsorbate should be added.
adsorbate: The adsorbate. Must be one of the following three types:
A string containing the chemical symbol for a single atom.
An atom object.
An atoms object (for a molecular adsorbate).
height: Height above the surface.
position: The x-y position of the adsorbate, either as a tuple of
two numbers or as a keyword (if the surface is produced by one
of the functions in ase.lattice.surfaces).
offset (default: None): Offsets the adsorbate by a number of unit
cells. Mostly useful when adding more than one adsorbate.
mol_index (default: 0): If the adsorbate is a molecule, index of
the atom to be positioned above the location specified by the
position argument.
Note *position* is given in absolute xy coordinates (or as
a keyword), whereas offset is specified in unit cells. This
can be used to give the positions in units of the unit cell by
using *offset* instead.
"""
info = slab.adsorbate_info
if 'cell' not in info:
info['cell'] = slab.get_cell()[:2,:2]
pos = np.array([0.0, 0.0]) # (x, y) part
spos = np.array([0.0, 0.0]) # part relative to unit cell
if offset is not None:
spos += np.asarray(offset, float)
if isinstance(position, str):
# A site-name:
if 'sites' not in info:
raise TypeError('If the atoms are not made by an ' +
'ase.lattice.surface function, ' +
'position cannot be a name.')
if position not in info['sites']:
raise TypeError('Adsorption site %s not supported.' % position)
spos += info['sites'][position]
else:
pos += position
pos += np.dot(spos, info['cell'])
# Convert the adsorbate to an Atoms object
if isinstance(adsorbate, Atoms):
ads = adsorbate
elif isinstance(adsorbate, Atom):
ads = Atoms([adsorbate])
else:
# Hope it is a useful string or something like that
ads = Atoms(adsorbate)
# Get the z-coordinate:
try:
a = info['top layer atom index']
except KeyError:
a = slab.positions[:, 2].argmax()
info['top layer atom index']= a
z = slab.positions[a, 2] + height
# Move adsorbate into position
ads.translate([pos[0], pos[1], z] - ads.positions[mol_index])
# Attach the adsorbate
slab.extend(ads)
def surface(symbol, structure, face, size, a, c, vacuum, orthogonal=True):
"""Function to build often used surfaces.
Don't call this function directly - use fcc100, fcc110, bcc111, ..."""
Z = atomic_numbers[symbol]
if a is None:
sym = reference_states[Z]['symmetry'].lower()
if sym != structure:
raise ValueError("Can't guess lattice constant for %s-%s!" %
(structure, symbol))
a = reference_states[Z]['a']
if structure == 'hcp' and c is None:
if reference_states[Z]['symmetry'].lower() == 'hcp':
c = reference_states[Z]['a']
else:
c = sqrt(8 / 3.0) * a
positions = np.empty((size[2], size[1], size[0], 3))
positions[..., 0] = np.arange(size[0]).reshape((1, 1, -1))
positions[..., 1] = np.arange(size[1]).reshape((1, -1, 1))
positions[..., 2] = np.arange(size[2]).reshape((-1, 1, 1))
numbers = np.ones(size[0] * size[1] * size[2], int) * Z
tags = np.empty((size[2], size[1], size[0]), int)
tags[:] = np.arange(size[2], 0, -1).reshape((-1, 1, 1))
slab = Atoms(numbers,
tags=tags.ravel(),
pbc=(True, True, False),
cell=size)
surface_cell = None
sites = {'ontop': (0, 0)}
surf = structure + face
if surf == 'fcc100':
cell = (sqrt(0.5), sqrt(0.5), 0.5)
positions[-2::-2, ..., :2] += 0.5
sites.update({'hollow': (0.5, 0.5), 'bridge': (0.5, 0)})
elif surf == 'fcc110':
cell = (1.0, sqrt(0.5), sqrt(0.125))
positions[-2::-2, ..., :2] += 0.5
sites.update({'hollow': (0.5, 0.5), 'longbridge': (0.5, 0),
'shortbridge': (0, 0.5)})
elif surf == 'bcc100':
cell = (1.0, 1.0, 0.5)
positions[-2::-2, ..., :2] += 0.5
sites.update({'hollow': (0.5, 0.5), 'bridge': (0.5, 0)})
else:
if orthogonal and size[1] % 2 == 1:
raise ValueError(("Can't make orthorhombic cell with size=%r. " %
(tuple(size),)) +
'Second number in size must be even.')
if surf == 'fcc111':
cell = (sqrt(0.5), sqrt(0.375), 1 / sqrt(3))
if orthogonal:
positions[-1::-3, 1::2, :, 0] += 0.5
positions[-2::-3, 1::2, :, 0] += 0.5
positions[-3::-3, 1::2, :, 0] -= 0.5
positions[-2::-3, ..., :2] += (0.0, 2.0 / 3)
positions[-3::-3, ..., :2] += (0.5, 1.0 / 3)
else:
positions[-2::-3, ..., :2] += (-1.0 / 3, 2.0 / 3)
positions[-3::-3, ..., :2] += (1.0 / 3, 1.0 / 3)
sites.update({'bridge': (0.5, 0), 'fcc': (1.0 / 3, 1.0 / 3),
'hcp': (2.0 / 3, 2.0 / 3)})
elif surf == 'hcp0001':
cell = (1.0, sqrt(0.75), 0.5 * c / a)
if orthogonal:
positions[:, 1::2, :, 0] += 0.5
positions[-2::-2, ..., :2] += (0.0, 2.0 / 3)
else:
positions[-2::-2, ..., :2] += (-1.0 / 3, 2.0 / 3)
sites.update({'bridge': (0.5, 0), 'fcc': (1.0 / 3, 1.0 / 3),
'hcp': (2.0 / 3, 2.0 / 3)})
elif surf == 'bcc110':
cell = (1.0, sqrt(0.5), sqrt(0.5))
if orthogonal:
positions[:, 1::2, :, 0] += 0.5
positions[-2::-2, ..., :2] += (0.0, 1.0)
else:
positions[-2::-2, ..., :2] += (-0.5, 1.0)
sites.update({'shortbridge': (0, 0.5), 'longbridge': (0.5, 0)})
elif surf == 'bcc111':
cell = (sqrt(2), sqrt(1.5), sqrt(3) / 6)
if orthogonal:
positions[-1::-3, 1::2, :, 0] += 0.5
positions[-2::-3, 1::2, :, 0] += 0.5
positions[-3::-3, 1::2, :, 0] -= 0.5
positions[-2::-3, ..., :2] += (0.0, 2.0 / 3)
positions[-3::-3, ..., :2] += (0.5, 1.0 / 3)
else:
positions[-2::-3, ..., :2] += (-1.0 / 3, 2.0 / 3)
positions[-3::-3, ..., :2] += (1.0 / 3, 1.0 / 3)
sites.update({'hollow': (1.0 / 3, 1.0 / 3)})
surface_cell = a * np.array([(cell[0], 0),
(cell[0] / 2, cell[1])])
if not orthogonal:
cell = np.array([(cell[0], 0, 0),
(cell[0] / 2, cell[1], 0),
(0, 0, cell[2])])
if surface_cell is None:
surface_cell = a * np.diag(cell[:2])
if isinstance(cell, tuple):
cell = np.diag(cell)
slab.set_positions(positions.reshape((-1, 3)))
slab.set_cell([a * v * n for v, n in zip(cell, size)], scale_atoms=True)
slab.cell[2, 2] += vacuum
slab.adsorbate_info['cell'] = surface_cell
slab.adsorbate_info['sites'] = sites
return slab
| gpl-3.0 |
vizual54/MissionPlanner | Lib/email/generator.py | 53 | 14294 | # Copyright (C) 2001-2010 Python Software Foundation
# Contact: email-sig@python.org
"""Classes to generate plain text from a message object tree."""
__all__ = ['Generator', 'DecodedGenerator']
import re
import sys
import time
import random
import warnings
from cStringIO import StringIO
from email.header import Header
UNDERSCORE = '_'
NL = '\n'
fcre = re.compile(r'^From ', re.MULTILINE)
def _is8bitstring(s):
if isinstance(s, str):
try:
unicode(s, 'us-ascii')
except UnicodeError:
return True
return False
class Generator:
"""Generates output from a Message object tree.
This basic generator writes the message to the given file object as plain
text.
"""
#
# Public interface
#
def __init__(self, outfp, mangle_from_=True, maxheaderlen=78):
"""Create the generator for message flattening.
outfp is the output file-like object for writing the message to. It
must have a write() method.
Optional mangle_from_ is a flag that, when True (the default), escapes
From_ lines in the body of the message by putting a `>' in front of
them.
Optional maxheaderlen specifies the longest length for a non-continued
header. When a header line is longer (in characters, with tabs
expanded to 8 spaces) than maxheaderlen, the header will split as
defined in the Header class. Set maxheaderlen to zero to disable
header wrapping. The default is 78, as recommended (but not required)
by RFC 2822.
"""
self._fp = outfp
self._mangle_from_ = mangle_from_
self._maxheaderlen = maxheaderlen
def write(self, s):
# Just delegate to the file object
self._fp.write(s)
def flatten(self, msg, unixfrom=False):
"""Print the message object tree rooted at msg to the output file
specified when the Generator instance was created.
unixfrom is a flag that forces the printing of a Unix From_ delimiter
before the first object in the message tree. If the original message
has no From_ delimiter, a `standard' one is crafted. By default, this
is False to inhibit the printing of any From_ delimiter.
Note that for subobjects, no From_ line is printed.
"""
if unixfrom:
ufrom = msg.get_unixfrom()
if not ufrom:
ufrom = 'From nobody ' + time.ctime(time.time())
print >> self._fp, ufrom
self._write(msg)
def clone(self, fp):
"""Clone this generator with the exact same options."""
return self.__class__(fp, self._mangle_from_, self._maxheaderlen)
#
# Protected interface - undocumented ;/
#
def _write(self, msg):
# We can't write the headers yet because of the following scenario:
# say a multipart message includes the boundary string somewhere in
# its body. We'd have to calculate the new boundary /before/ we write
# the headers so that we can write the correct Content-Type:
# parameter.
#
# The way we do this, so as to make the _handle_*() methods simpler,
# is to cache any subpart writes into a StringIO. The we write the
# headers and the StringIO contents. That way, subpart handlers can
# Do The Right Thing, and can still modify the Content-Type: header if
# necessary.
oldfp = self._fp
try:
self._fp = sfp = StringIO()
self._dispatch(msg)
finally:
self._fp = oldfp
# Write the headers. First we see if the message object wants to
# handle that itself. If not, we'll do it generically.
meth = getattr(msg, '_write_headers', None)
if meth is None:
self._write_headers(msg)
else:
meth(self)
self._fp.write(sfp.getvalue())
def _dispatch(self, msg):
# Get the Content-Type: for the message, then try to dispatch to
# self._handle_<maintype>_<subtype>(). If there's no handler for the
# full MIME type, then dispatch to self._handle_<maintype>(). If
# that's missing too, then dispatch to self._writeBody().
main = msg.get_content_maintype()
sub = msg.get_content_subtype()
specific = UNDERSCORE.join((main, sub)).replace('-', '_')
meth = getattr(self, '_handle_' + specific, None)
if meth is None:
generic = main.replace('-', '_')
meth = getattr(self, '_handle_' + generic, None)
if meth is None:
meth = self._writeBody
meth(msg)
#
# Default handlers
#
def _write_headers(self, msg):
for h, v in msg.items():
print >> self._fp, '%s:' % h,
if self._maxheaderlen == 0:
# Explicit no-wrapping
print >> self._fp, v
elif isinstance(v, Header):
# Header instances know what to do
print >> self._fp, v.encode()
elif _is8bitstring(v):
# If we have raw 8bit data in a byte string, we have no idea
# what the encoding is. There is no safe way to split this
# string. If it's ascii-subset, then we could do a normal
# ascii split, but if it's multibyte then we could break the
# string. There's no way to know so the least harm seems to
# be to not split the string and risk it being too long.
print >> self._fp, v
else:
# Header's got lots of smarts, so use it. Note that this is
# fundamentally broken though because we lose idempotency when
# the header string is continued with tabs. It will now be
# continued with spaces. This was reversedly broken before we
# fixed bug 1974. Either way, we lose.
print >> self._fp, Header(
v, maxlinelen=self._maxheaderlen, header_name=h).encode()
# A blank line always separates headers from body
print >> self._fp
#
# Handlers for writing types and subtypes
#
def _handle_text(self, msg):
payload = msg.get_payload()
if payload is None:
return
if not isinstance(payload, basestring):
raise TypeError('string payload expected: %s' % type(payload))
if self._mangle_from_:
payload = fcre.sub('>From ', payload)
self._fp.write(payload)
# Default body handler
_writeBody = _handle_text
def _handle_multipart(self, msg):
# The trick here is to write out each part separately, merge them all
# together, and then make sure that the boundary we've chosen isn't
# present in the payload.
msgtexts = []
subparts = msg.get_payload()
if subparts is None:
subparts = []
elif isinstance(subparts, basestring):
# e.g. a non-strict parse of a message with no starting boundary.
self._fp.write(subparts)
return
elif not isinstance(subparts, list):
# Scalar payload
subparts = [subparts]
for part in subparts:
s = StringIO()
g = self.clone(s)
g.flatten(part, unixfrom=False)
msgtexts.append(s.getvalue())
# BAW: What about boundaries that are wrapped in double-quotes?
boundary = msg.get_boundary()
if not boundary:
# Create a boundary that doesn't appear in any of the
# message texts.
alltext = NL.join(msgtexts)
boundary = _make_boundary(alltext)
msg.set_boundary(boundary)
# If there's a preamble, write it out, with a trailing CRLF
if msg.preamble is not None:
print >> self._fp, msg.preamble
# dash-boundary transport-padding CRLF
print >> self._fp, '--' + boundary
# body-part
if msgtexts:
self._fp.write(msgtexts.pop(0))
# *encapsulation
# --> delimiter transport-padding
# --> CRLF body-part
for body_part in msgtexts:
# delimiter transport-padding CRLF
print >> self._fp, '\n--' + boundary
# body-part
self._fp.write(body_part)
# close-delimiter transport-padding
self._fp.write('\n--' + boundary + '--')
if msg.epilogue is not None:
print >> self._fp
self._fp.write(msg.epilogue)
def _handle_multipart_signed(self, msg):
# The contents of signed parts has to stay unmodified in order to keep
# the signature intact per RFC1847 2.1, so we disable header wrapping.
# RDM: This isn't enough to completely preserve the part, but it helps.
old_maxheaderlen = self._maxheaderlen
try:
self._maxheaderlen = 0
self._handle_multipart(msg)
finally:
self._maxheaderlen = old_maxheaderlen
def _handle_message_delivery_status(self, msg):
# We can't just write the headers directly to self's file object
# because this will leave an extra newline between the last header
# block and the boundary. Sigh.
blocks = []
for part in msg.get_payload():
s = StringIO()
g = self.clone(s)
g.flatten(part, unixfrom=False)
text = s.getvalue()
lines = text.split('\n')
# Strip off the unnecessary trailing empty line
if lines and lines[-1] == '':
blocks.append(NL.join(lines[:-1]))
else:
blocks.append(text)
# Now join all the blocks with an empty line. This has the lovely
# effect of separating each block with an empty line, but not adding
# an extra one after the last one.
self._fp.write(NL.join(blocks))
def _handle_message(self, msg):
s = StringIO()
g = self.clone(s)
# The payload of a message/rfc822 part should be a multipart sequence
# of length 1. The zeroth element of the list should be the Message
# object for the subpart. Extract that object, stringify it, and
# write it out.
# Except, it turns out, when it's a string instead, which happens when
# and only when HeaderParser is used on a message of mime type
# message/rfc822. Such messages are generated by, for example,
# Groupwise when forwarding unadorned messages. (Issue 7970.) So
# in that case we just emit the string body.
payload = msg.get_payload()
if isinstance(payload, list):
g.flatten(msg.get_payload(0), unixfrom=False)
payload = s.getvalue()
self._fp.write(payload)
_FMT = '[Non-text (%(type)s) part of message omitted, filename %(filename)s]'
class DecodedGenerator(Generator):
"""Generates a text representation of a message.
Like the Generator base class, except that non-text parts are substituted
with a format string representing the part.
"""
def __init__(self, outfp, mangle_from_=True, maxheaderlen=78, fmt=None):
"""Like Generator.__init__() except that an additional optional
argument is allowed.
Walks through all subparts of a message. If the subpart is of main
type `text', then it prints the decoded payload of the subpart.
Otherwise, fmt is a format string that is used instead of the message
payload. fmt is expanded with the following keywords (in
%(keyword)s format):
type : Full MIME type of the non-text part
maintype : Main MIME type of the non-text part
subtype : Sub-MIME type of the non-text part
filename : Filename of the non-text part
description: Description associated with the non-text part
encoding : Content transfer encoding of the non-text part
The default value for fmt is None, meaning
[Non-text (%(type)s) part of message omitted, filename %(filename)s]
"""
Generator.__init__(self, outfp, mangle_from_, maxheaderlen)
if fmt is None:
self._fmt = _FMT
else:
self._fmt = fmt
def _dispatch(self, msg):
for part in msg.walk():
maintype = part.get_content_maintype()
if maintype == 'text':
print >> self, part.get_payload(decode=True)
elif maintype == 'multipart':
# Just skip this
pass
else:
print >> self, self._fmt % {
'type' : part.get_content_type(),
'maintype' : part.get_content_maintype(),
'subtype' : part.get_content_subtype(),
'filename' : part.get_filename('[no filename]'),
'description': part.get('Content-Description',
'[no description]'),
'encoding' : part.get('Content-Transfer-Encoding',
'[no encoding]'),
}
# Helper
_width = len(repr(sys.maxint-1))
_fmt = '%%0%dd' % _width
def _make_boundary(text=None):
# Craft a random boundary. If text is given, ensure that the chosen
# boundary doesn't appear in the text.
token = random.randrange(sys.maxint)
boundary = ('=' * 15) + (_fmt % token) + '=='
if text is None:
return boundary
b = boundary
counter = 0
while True:
cre = re.compile('^--' + re.escape(b) + '(--)?$', re.MULTILINE)
if not cre.search(text):
break
b = boundary + '.' + str(counter)
counter += 1
return b
| gpl-3.0 |
oceanobservatories/mi-dataset | mi/dataset/parser/test/test_parad_j_cspp.py | 3 | 8159 | """
@package mi.dataset.parser.test.test_parad_j_cspp
@file marine-integrations/mi/dataset/parser/test/test_parad_j_cspp.py
@author Joe Padula
@brief Test code for a parad_j_cspp data parser
"""
import os
from nose.plugins.attrib import attr
from mi.core.log import get_logger
from mi.dataset.test.test_parser import BASE_RESOURCE_PATH, ParserUnitTestCase
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.parser.cspp_base import \
METADATA_PARTICLE_CLASS_KEY, \
DATA_PARTICLE_CLASS_KEY
from mi.dataset.parser.parad_j_cspp import \
ParadJCsppParser, \
ParadJCsppInstrumentTelemeteredDataParticle, \
ParadJCsppMetadataTelemeteredDataParticle, \
ParadJCsppInstrumentRecoveredDataParticle, \
ParadJCsppMetadataRecoveredDataParticle
log = get_logger()
RESOURCE_PATH = os.path.join(BASE_RESOURCE_PATH,
'parad_j', 'cspp', 'resource')
O_MODE = 'rU' # Universal Open mode
@attr('UNIT', group='mi')
class ParadJCsppParserUnitTestCase(ParserUnitTestCase):
"""
parad_j_cspp Parser unit test suite
"""
def setUp(self):
ParserUnitTestCase.setUp(self)
self._telemetered_parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.parad_j_cspp',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
METADATA_PARTICLE_CLASS_KEY: ParadJCsppMetadataTelemeteredDataParticle,
DATA_PARTICLE_CLASS_KEY: ParadJCsppInstrumentTelemeteredDataParticle,
}
}
self._recovered_parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.parad_j_cspp',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
METADATA_PARTICLE_CLASS_KEY: ParadJCsppMetadataRecoveredDataParticle,
DATA_PARTICLE_CLASS_KEY: ParadJCsppInstrumentRecoveredDataParticle,
}
}
# Define test data particles and their associated timestamps which will be
# compared with returned results
self.file_ingested_value = None
def particle_to_yml(self, particles, filename, mode='w'):
"""
This is added as a testing helper, not actually as part of the parser tests. Since the same particles
will be used for the driver test it is helpful to write them to .yml in the same form they need in the
results.yml fids here.
"""
# open write append, if you want to start from scratch manually delete this fid
fid = open(os.path.join(RESOURCE_PATH, filename), mode)
fid.write('header:\n')
fid.write(" particle_object: 'MULTIPLE'\n")
fid.write(" particle_type: 'MULTIPLE'\n")
fid.write('data:\n')
for i in range(0, len(particles)):
particle_dict = particles[i].generate_dict()
fid.write(' - _index: %d\n' % (i+1))
fid.write(' particle_object: %s\n' % particles[i].__class__.__name__)
fid.write(' particle_type: %s\n' % particle_dict.get('stream_name'))
fid.write(' internal_timestamp: %f\n' % particle_dict.get('internal_timestamp'))
for val in particle_dict.get('values'):
if isinstance(val.get('value'), float):
fid.write(' %s: %16.16f\n' % (val.get('value_id'), val.get('value')))
elif isinstance(val.get('value'), str):
fid.write(' %s: \'%s\'\n' % (val.get('value_id'), val.get('value')))
else:
fid.write(' %s: %s\n' % (val.get('value_id'), val.get('value')))
fid.close()
def create_yml(self):
"""
This utility creates a yml file
"""
fid = open(os.path.join(RESOURCE_PATH, '11079364_PPD_PARS.txt'), O_MODE)
stream_handle = fid
parser = ParadJCsppParser(self._telemetered_parser_config,
stream_handle,
self.exception_callback)
particles = parser.get_records(20)
self.particle_to_yml(particles, '11079364_PPD_PARS_telem.yml')
fid.close()
def test_simple(self):
"""
Read test data and pull out data particles
Assert that the results are those we expected.
"""
# Recovered
file_path = os.path.join(RESOURCE_PATH, '11079364_PPB_PARS.txt')
stream_handle = open(file_path, O_MODE)
parser = ParadJCsppParser(self._recovered_parser_config,
stream_handle,
self.exception_callback)
particles = parser.get_records(20)
log.debug("*** test_simple Num particles %s", len(particles))
# check all the values against expected results.
self.assert_particles(particles, "11079364_PPB_PARS_recov.yml", RESOURCE_PATH)
stream_handle.close()
# Telemetered
file_path = os.path.join(RESOURCE_PATH, '11079364_PPD_PARS.txt')
stream_handle = open(file_path, O_MODE)
parser = ParadJCsppParser(self._telemetered_parser_config,
stream_handle,
self.exception_callback)
particles = parser.get_records(20)
log.debug("*** test_simple Num particles %s", len(particles))
# check all the values against expected results.
self.assert_particles(particles, "11079364_PPD_PARS_telem.yml", RESOURCE_PATH)
stream_handle.close()
def test_get_many(self):
"""
Read test data and pull out multiple data particles at one time.
Assert that the results are those we expected.
"""
file_path = os.path.join(RESOURCE_PATH, '11079364_PPB_PARS.txt')
stream_handle = open(file_path, O_MODE)
# Note: since the recovered and telemetered parser and particles are common
# to each other, testing one is sufficient, will be completely tested
# in driver tests
parser = ParadJCsppParser(self._recovered_parser_config,
stream_handle,
self.exception_callback)
# try to get 2000 particles, there are only 194 data records
# so should get 195 including the meta data
particles = parser.get_records(2000)
log.debug("*** test_get_many Num particles %s", len(particles))
self.assertEqual(len(particles), 195)
stream_handle.close()
def test_bad_data(self):
"""
Ensure that bad data is skipped when it exists and a RecoverableSampleException is thrown.
"""
file_path = os.path.join(RESOURCE_PATH, '11079364_BAD_PPB_PARS.txt')
stream_handle = open(file_path, O_MODE)
log.debug(self.exception_callback_value)
parser = ParadJCsppParser(self._recovered_parser_config,
stream_handle,
self.exception_callback)
parser.get_records(1)
log.debug("Exception callback value: %s", self.exception_callback_value)
self.assertTrue(self.exception_callback_value is not None)
# 14 bad records
self.assertEqual(len(self.exception_callback_value), 14)
stream_handle.close()
def test_additional_column(self):
"""
Ensure that additional column of data will cause an exception.
"""
file_path = os.path.join(RESOURCE_PATH, '11079364_PPB_PARS_ADDED_COLUMN.txt')
stream_handle = open(file_path, O_MODE)
log.debug(self.exception_callback_value)
parser = ParadJCsppParser(self._recovered_parser_config,
stream_handle,
self.exception_callback)
parser.get_records(1)
log.debug("Exception callback value: %s", self.exception_callback_value)
self.assertTrue(self.exception_callback_value is not None)
stream_handle.close() | bsd-2-clause |
othercriteria/StochasticBlockmodel | old/rasch_normal_bayes.py | 1 | 7147 | #!/usr/bin/env python
import numpy as np
import numexpr as ne
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from sklearn.linear_model import LogisticRegression
# Parameters
params = { 'N': 200,
'edge_precision': 1.0,
'prior_precision': 0.01,
'alpha_sd': 2.0,
'beta_shank': 3.0,
'num_shank': 8,
'beta_self': 4.0,
'kappa': -0.5,
'N_subs': [10, 25, 40, 55, 70],
'num_fits': 10,
'logistic_fit_alpha': True,
'plot_heatmap': False }
# Set random seed for reproducible output
np.random.seed(137)
# Calculate edge means from parameters and covariates
def edge_means(alpha, beta, kappa, x):
N = x.shape[0]
mu = np.zeros((N,N))
for i in range(N):
mu[i,:] += alpha[0,i]
for j in range(N):
mu[:,j] += alpha[0,j]
mu += np.dot(x, beta)
mu += kappa
return mu
# Inverse-logit
def sigma(x):
return 1.0 / (1.0 + np.exp(-x))
# Procedure to find posterior mean and covariance via Bayesian inference
def infer_normal(A, x):
N = A.shape[0]
B = x.shape[2]
t = A.reshape((N*N,))
Phi = np.zeros((N*N,(B + 1 + 2 * N_sub)))
Phi_trans = np.transpose(Phi)
for b in range(B):
Phi[:,b] = x_sub[:,:,b].reshape((N*N,))
Phi[:,B] = 1.0
for i in range(N):
phi_row = np.zeros((N,N))
phi_row[i,:] = 1.0
Phi[:,B + 1 + i] = phi_row.reshape((N*N,))
for j in range(N_sub):
phi_col = np.zeros((N,N))
phi_col[:,j] = 1.0
Phi[:,B + 1 + N + j] = phi_col.reshape((N*N,))
S_N_inv = (params['prior_precision'] * np.eye(B + 1 + 2 * N) +
params['edge_precision'] * np.dot(Phi_trans, Phi))
S_N = np.linalg.inv(S_N_inv)
m_N = params['edge_precision'] * np.dot(S_N, np.dot(Phi_trans, t))
return m_N, S_N
# Procedure to find MLE via logistic regression
def infer_logistic(A, x, fit_alpha = False):
N = A.shape[0]
B = x.shape[2]
lr = LogisticRegression(fit_intercept = True,
C = 1.0 / params['prior_precision'], penalty = 'l2')
y = A.reshape((N*N,))
if fit_alpha:
Phi = np.zeros((N*N,(B + 2*N)))
else:
Phi = np.zeros((N*N,B))
Phi[:,0] = 1.0
for b in range(B):
Phi[:,b] = x[:,:,b].reshape((N*N,))
if fit_alpha:
for i in range(N):
phi_row = np.zeros((N,N))
phi_row[i,:] = 1.0
Phi[:,B + i] = phi_row.reshape((N*N,))
for j in range(N):
phi_col = np.zeros((N,N))
phi_col[:,j] = 1.0
Phi[:,B + N + j] = phi_col.reshape((N*N,))
lr.fit(Phi, y)
coefs = lr.coef_[0]
intercept = lr.intercept_[0]
alpha = np.zeros((2,N))
out = {'alpha': alpha, 'beta': coefs[0:B], 'kappa': intercept}
if fit_alpha:
out['alpha'][0] = coefs[B:(B + N)]
out['alpha'][1] = coefs[(B + N):(B + 2*N)]
# Compute posterior covariance via Laplace approximation
if fit_alpha:
S_0_inv = params['prior_precision'] * np.eye(B + 1 + 2*N)
Phi_kappa = np.empty((N*N,(B + 1 + 2*N)))
Phi_kappa[:,(B + 1):(B + 1 + 2*N)] = Phi[:,B:(B + 2*N)]
w = np.empty(B + 1 + 2*N)
w[(B + 1):(B + 1 + 2*N)] = coefs[B:(B + 2*N)]
else:
S_0_inv = params['prior_precision'] * np.eye(B + 1)
Phi_kappa = np.empty((N*N,(B + 1)))
w = np.empty(B + 1)
Phi_kappa[:,0:B] = Phi[:,0:B]
Phi_kappa[:,B] = 1.0
w[0:B] = coefs[0:B]
w[B] = intercept
C = 0.0
for i in range(N*N):
y = sigma(np.dot(w, Phi_kappa[i,:]))
C += y * (1.0 - y) * (np.outer(Phi_kappa[i,:], Phi_kappa[i,:]))
S_N = np.linalg.inv(S_0_inv + C)
out['S_N'] = S_N
return out
# Generate random network, using randomly generated latent parameters
if params['alpha_sd'] > 0.0:
alpha = np.random.normal(0, params['alpha_sd'], (2,params['N']))
alpha[0] -= np.mean(alpha[0])
alpha[1] -= np.mean(alpha[1])
else:
alpha = np.zeros((2,params['N']))
beta = np.array([params['beta_shank'], params['beta_self']])
shank = np.random.randint(0, params['num_shank'], params['N'])
x = np.empty((params['N'],params['N'],2))
for i in range(params['N']):
for j in range(params['N']):
x[i,j,0] = (shank[i] == shank[j])
x[i,j,1] = (i == j)
kappa = params['kappa']
mu = edge_means(alpha, beta, kappa, x)
A_n = np.random.normal(mu, np.sqrt(1.0 / params['edge_precision']))
A_l = np.random.random((params['N'],params['N'])) < sigma(mu)
# Show heatmap of the underlying network
if params['plot_heatmap']:
plt.figure()
plt.subplot(1,2,1)
plt.imshow(A_n)
plt.subplot(1,2,2)
plt.imshow(A_l)
plt.title('Unordered')
plt.figure()
o = np.argsort(shank)
plt.subplot(1,2,1)
plt.imshow(A_n[o][:,o])
plt.subplot(1,2,2)
plt.imshow(A_l[o][:,o])
plt.title('Grouped by shank')
plt.figure()
o = np.argsort(alpha[0])
plt.subplot(1,2,1)
plt.imshow(A_n[o][:,o])
plt.subplot(1,2,2)
plt.imshow(A_l[o][:,o])
plt.title('Ordered by alpha_out')
plt.figure()
o = np.argsort(alpha[1])
plt.subplot(1,2,1)
plt.imshow(A_n[o][:,o])
plt.subplot(1,2,2)
plt.imshow(A_l[o][:,o])
plt.title('Ordered by alpha_in')
plt.figure()
o = np.argsort(np.sum(alpha, axis = 0))
plt.subplot(1,2,1)
plt.imshow(A_n[o][:,o])
plt.subplot(1,2,2)
plt.imshow(A_l[o][:,o])
plt.title('Ordered by alpha_total')
# Convenience functions for plotting
#
# Finding the right settings for Ellipse is surprisingly tricky so I follow:
# http://scikit-learn.org/stable/auto_examples/plot_lda_qda.html
def make_axis(f, n, title):
ax = f.add_subplot(2, len(params['N_subs']), (n+1), aspect = 'equal')
ax.set_xlim(beta[0] - 2.0, beta[0] + 2.0)
ax.set_ylim(beta[1] - 2.0, beta[1] + 2.0)
ax.set_xlabel('beta_shank')
ax.set_ylabel('beta_self')
ax.set_title(title)
return ax
def draw_ellipse(a, m, S):
v, w = np.linalg.eigh(S)
u = w[0] / np.linalg.norm(w[0])
angle = (180.0 / np.pi) * np.arctan(u[1] / u[0])
e = Ellipse(m, 2.0 * np.sqrt(v[0]), 2.0 * np.sqrt(v[1]),
180.0 + angle, color = 'k')
a.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5)
# Fit model to subset of data, displaying beta posterior
fig = plt.figure()
inds = np.arange(params['N'])
for n, N_sub in enumerate(params['N_subs']):
for num_fit in range(params['num_fits']):
np.random.shuffle(inds)
sub = inds[0:N_sub]
# Sample subnetwork
A_n_sub = A_n[sub][:,sub]
A_l_sub = A_l[sub][:,sub]
x_sub = x[sub][:,sub]
# Fit normal model
m_N, S_N = infer_normal(A_n_sub, x_sub)
ax = make_axis(fig, n, 'Normal (N_sub = %d)' % N_sub)
draw_ellipse(ax, m_N[0:2], S_N[0:2,0:2])
# Fit logistic model
fit = infer_logistic(A_l_sub, x_sub, params['logistic_fit_alpha'])
ax = make_axis(fig, len(params['N_subs']) + n, 'Logistic')
draw_ellipse(ax, fit['beta'], fit['S_N'][0:2,0:2])
# Display all pending graphs
plt.show()
| mit |
jsmartin/shade | shade/tests/unit/test_domain_params.py | 1 | 2899 | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import os_client_config as occ
import bunch
import shade
from shade import exc
from shade.tests.unit import base
class TestDomainParams(base.TestCase):
def setUp(self):
super(TestDomainParams, self).setUp()
self.cloud = shade.openstack_cloud()
@mock.patch.object(occ.cloud_config.CloudConfig, 'get_api_version')
@mock.patch.object(shade.OpenStackCloud, '_get_project')
def test_identity_params_v3(self, mock_get_project, mock_api_version):
mock_get_project.return_value = bunch.Bunch(id=1234)
mock_api_version.return_value = '3'
ret = self.cloud._get_identity_params(domain_id='5678', project='bar')
self.assertIn('default_project', ret)
self.assertEqual(ret['default_project'], 1234)
self.assertIn('domain', ret)
self.assertEqual(ret['domain'], '5678')
@mock.patch.object(occ.cloud_config.CloudConfig, 'get_api_version')
@mock.patch.object(shade.OpenStackCloud, '_get_project')
def test_identity_params_v3_no_domain(
self, mock_get_project, mock_api_version):
mock_get_project.return_value = bunch.Bunch(id=1234)
mock_api_version.return_value = '3'
self.assertRaises(
exc.OpenStackCloudException,
self.cloud._get_identity_params,
domain_id=None, project='bar')
@mock.patch.object(occ.cloud_config.CloudConfig, 'get_api_version')
@mock.patch.object(shade.OpenStackCloud, '_get_project')
def test_identity_params_v2(self, mock_get_project, mock_api_version):
mock_get_project.return_value = bunch.Bunch(id=1234)
mock_api_version.return_value = '2'
ret = self.cloud._get_identity_params(domain_id='foo', project='bar')
self.assertIn('tenant_id', ret)
self.assertEqual(ret['tenant_id'], 1234)
self.assertNotIn('domain', ret)
@mock.patch.object(shade.OpenStackCloud, '_get_project')
def test_identity_params_v2_no_domain(self, mock_get_project):
mock_get_project.return_value = bunch.Bunch(id=1234)
self.cloud.api_versions = dict(identity='2')
ret = self.cloud._get_identity_params(domain_id=None, project='bar')
self.assertIn('tenant_id', ret)
self.assertEqual(ret['tenant_id'], 1234)
self.assertNotIn('domain', ret)
| apache-2.0 |
scottdangelo/RemoveVolumeMangerLocks | cinder/tests/unit/test_backup_tsm.py | 21 | 12778 | # Copyright 2013 IBM Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Tests for volume backup to IBM Tivoli Storage Manager (TSM).
"""
import json
import os
import posix
from oslo_concurrency import processutils as putils
from oslo_utils import timeutils
from cinder.backup.drivers import tsm
from cinder import context
from cinder import db
from cinder import exception
from cinder import objects
from cinder import test
from cinder import utils
SIM = None
VOLUME_PATH = '/dev/null'
class TSMBackupSimulator(object):
"""Simulates TSM dsmc command.
The simulator simulates the execution of the 'dsmc' command.
This allows the TSM backup test to succeed even if TSM is not installed.
"""
def __init__(self):
self._backup_list = {}
self._hardlinks = []
self._next_cmd_error = {
'backup': '',
}
self._intro_msg = ('IBM Tivoli Storage Manager\n'
'Command Line Backup-Archive Client Interface\n'
'...\n\n')
def _cmd_backup(self, **kwargs):
# simulates the execution of the dsmc backup command
ret_msg = self._intro_msg
path = kwargs['path']
ret_msg += ('Image backup of volume \'%s\'\n\n'
'Total number of objects inspected: 1\n'
% path)
if self._next_cmd_error['backup'] == 'fail':
ret_msg += ('ANS1228E Sending of object \'%s\' '
'failed\n' % path)
ret_msg += ('ANS1063E The specified path is not a valid file '
'system or logical volume name.')
self._next_cmd_error['backup'] = ''
retcode = 12
else:
ret_msg += 'Total number of objects backed up: 1'
if path not in self._backup_list:
self._backup_list[path] = []
else:
self._backup_list[path][-1]['active'] = False
date = timeutils.utcnow()
datestr = date.strftime("%m/%d/%Y %H:%M:%S")
self._backup_list[path].append({'date': datestr, 'active': True})
retcode = 0
return (ret_msg, '', retcode)
def _backup_exists(self, path):
if path not in self._backup_list:
return ('ANS4000E Error processing \'%s\': file space does '
'not exist.' % path)
return 'OK'
def _cmd_restore(self, **kwargs):
ret_msg = self._intro_msg
path = kwargs['path']
exists = self._backup_exists(path)
if exists == 'OK':
ret_msg += ('Total number of objects restored: 1\n'
'Total number of objects failed: 0')
retcode = 0
else:
ret_msg += exists
retcode = 12
return (ret_msg, '', retcode)
def _cmd_delete(self, **kwargs):
# simulates the execution of the dsmc delete command
ret_msg = self._intro_msg
path = kwargs['path']
exists = self._backup_exists(path)
if exists == 'OK':
ret_msg += ('Total number of objects deleted: 1\n'
'Total number of objects failed: 0')
retcode = 0
index = len(self._backup_list[path]) - 1
del self._backup_list[path][index]
if not len(self._backup_list[path]):
del self._backup_list[path]
else:
ret_msg += exists
retcode = 12
return (ret_msg, '', retcode)
def _cmd_to_dict(self, arg_list):
"""Convert command for kwargs (assumes a properly formed command)."""
ret = {'cmd': arg_list[0],
'type': arg_list[1],
'path': arg_list[-1]}
for i in range(2, len(arg_list) - 1):
arg = arg_list[i].split('=')
if len(arg) == 1:
ret[arg[0]] = True
else:
ret[arg[0]] = arg[1]
return ret
def _exec_dsmc_cmd(self, cmd):
"""Simulates the execution of the dsmc command."""
cmd_switch = {'backup': self._cmd_backup,
'restore': self._cmd_restore,
'delete': self._cmd_delete}
kwargs = self._cmd_to_dict(cmd)
if kwargs['cmd'] != 'dsmc' or kwargs['type'] not in cmd_switch:
raise putils.ProcessExecutionError(exit_code=1,
stdout='',
stderr='Not dsmc command',
cmd=' '.join(cmd))
out, err, ret = cmd_switch[kwargs['type']](**kwargs)
return (out, err, ret)
def exec_cmd(self, cmd):
"""Simulates the execution of dsmc, rm, and ln commands."""
if cmd[0] == 'dsmc':
out, err, ret = self._exec_dsmc_cmd(cmd)
elif cmd[0] == 'ln':
dest = cmd[2]
out = ''
if dest in self._hardlinks:
err = ('ln: failed to create hard link `%s\': '
'File exists' % dest)
ret = 1
else:
self._hardlinks.append(dest)
err = ''
ret = 0
elif cmd[0] == 'rm':
dest = cmd[2]
out = ''
if dest not in self._hardlinks:
err = ('rm: cannot remove `%s\': No such file or '
'directory' % dest)
ret = 1
else:
index = self._hardlinks.index(dest)
del self._hardlinks[index]
err = ''
ret = 0
else:
raise putils.ProcessExecutionError(exit_code=1,
stdout='',
stderr='Unsupported command',
cmd=' '.join(cmd))
return (out, err, ret)
def error_injection(self, cmd, error):
self._next_cmd_error[cmd] = error
def fake_exec(*cmd, **kwargs):
# Support only bool
check_exit_code = kwargs.pop('check_exit_code', True)
global SIM
out, err, ret = SIM.exec_cmd(cmd)
if ret and check_exit_code:
raise putils.ProcessExecutionError(
exit_code=-1,
stdout=out,
stderr=err,
cmd=' '.join(cmd))
return (out, err)
def fake_stat_image(path):
# Simulate stat to return the mode of a block device
# make sure that st_mode (the first in the sequence(
# matches the mode of a block device
return posix.stat_result((25008, 5753, 5, 1, 0, 6, 0,
1375881199, 1375881197, 1375881197))
def fake_stat_file(path):
# Simulate stat to return the mode of a block device
# make sure that st_mode (the first in the sequence(
# matches the mode of a block device
return posix.stat_result((33188, 5753, 5, 1, 0, 6, 0,
1375881199, 1375881197, 1375881197))
def fake_stat_illegal(path):
# Simulate stat to return the mode of a block device
# make sure that st_mode (the first in the sequence(
# matches the mode of a block device
return posix.stat_result((17407, 5753, 5, 1, 0, 6, 0,
1375881199, 1375881197, 1375881197))
class BackupTSMTestCase(test.TestCase):
def setUp(self):
super(BackupTSMTestCase, self).setUp()
global SIM
SIM = TSMBackupSimulator()
self.sim = SIM
self.ctxt = context.get_admin_context()
self.driver = tsm.TSMBackupDriver(self.ctxt)
self.stubs.Set(utils, 'execute', fake_exec)
self.stubs.Set(os, 'stat', fake_stat_image)
def _create_volume_db_entry(self, volume_id):
vol = {'id': volume_id,
'size': 1,
'status': 'available'}
return db.volume_create(self.ctxt, vol)['id']
def _create_backup_db_entry(self, backup_id, mode):
if mode == 'file':
backup_path = VOLUME_PATH
else:
backup_path = '/dev/backup-%s' % backup_id
service_metadata = json.dumps({'backup_mode': mode,
'backup_path': backup_path})
backup = {'id': backup_id,
'size': 1,
'container': 'test-container',
'volume_id': '1234-5678-1234-8888',
'service_metadata': service_metadata,
'user_id': 'user-id',
'project_id': 'project-id',
}
return db.backup_create(self.ctxt, backup)['id']
def test_backup_image(self):
volume_id = '1234-5678-1234-7777'
mode = 'image'
self._create_volume_db_entry(volume_id)
backup_id1 = 123
backup_id2 = 456
backup_id3 = 666
self._create_backup_db_entry(backup_id1, mode)
self._create_backup_db_entry(backup_id2, mode)
self._create_backup_db_entry(backup_id3, mode)
with open(VOLUME_PATH, 'w+') as volume_file:
# Create two backups of the volume
backup1 = objects.Backup.get_by_id(self.ctxt, backup_id1)
self.driver.backup(backup1, volume_file)
backup2 = objects.Backup.get_by_id(self.ctxt, backup_id2)
self.driver.backup(backup2, volume_file)
# Create a backup that fails
fail_back = objects.Backup.get_by_id(self.ctxt, backup_id3)
self.sim.error_injection('backup', 'fail')
self.assertRaises(exception.InvalidBackup,
self.driver.backup, fail_back, volume_file)
# Try to restore one, then the other
self.driver.restore(backup1, volume_id, volume_file)
self.driver.restore(backup2, volume_id, volume_file)
# Delete both backups
self.driver.delete(backup2)
self.driver.delete(backup1)
def test_backup_file(self):
volume_id = '1234-5678-1234-8888'
mode = 'file'
self.stubs.Set(os, 'stat', fake_stat_file)
self._create_volume_db_entry(volume_id)
backup_id1 = 123
backup_id2 = 456
self._create_backup_db_entry(backup_id1, mode)
self._create_backup_db_entry(backup_id2, mode)
with open(VOLUME_PATH, 'w+') as volume_file:
# Create two backups of the volume
backup1 = objects.Backup.get_by_id(self.ctxt, 123)
self.driver.backup(backup1, volume_file)
backup2 = objects.Backup.get_by_id(self.ctxt, 456)
self.driver.backup(backup2, volume_file)
# Create a backup that fails
self._create_backup_db_entry(666, mode)
fail_back = objects.Backup.get_by_id(self.ctxt, 666)
self.sim.error_injection('backup', 'fail')
self.assertRaises(exception.InvalidBackup,
self.driver.backup, fail_back, volume_file)
# Try to restore one, then the other
self.driver.restore(backup1, volume_id, volume_file)
self.driver.restore(backup2, volume_id, volume_file)
# Delete both backups
self.driver.delete(backup1)
self.driver.delete(backup2)
def test_backup_invalid_mode(self):
volume_id = '1234-5678-1234-9999'
mode = 'illegal'
self.stubs.Set(os, 'stat', fake_stat_illegal)
self._create_volume_db_entry(volume_id)
backup_id1 = 123
self._create_backup_db_entry(backup_id1, mode)
with open(VOLUME_PATH, 'w+') as volume_file:
# Create two backups of the volume
backup1 = objects.Backup.get_by_id(self.ctxt, 123)
self.assertRaises(exception.InvalidBackup,
self.driver.backup, backup1, volume_file)
self.assertRaises(exception.InvalidBackup,
self.driver.restore,
backup1,
volume_id,
volume_file)
self.assertRaises(exception.InvalidBackup,
self.driver.delete, backup1)
| apache-2.0 |
joshuajan/odoo | addons/portal_project_issue/__openerp__.py | 375 | 1713 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Portal Issue',
'version': '0.1',
'category': 'Tools',
'complexity': 'easy',
'description': """
This module adds issue menu and features to your portal if project_issue and portal are installed.
==================================================================================================
""",
'author': 'OpenERP SA',
'depends': ['project_issue','portal'],
'data': [
'security/portal_security.xml',
'security/ir.model.access.csv',
'portal_project_issue_view.xml',
'views/portal_project_issue.xml',
],
'installable': True,
'auto_install': True,
'category': 'Hidden',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dkagedal/stgit | stgit/commands/repair.py | 3 | 8365 | # -*- coding: utf-8 -*-
__copyright__ = """
Copyright (C) 2006, Karl Hasselström <kha@treskal.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see http://www.gnu.org/licenses/.
"""
import sys, os
from stgit.argparse import opt
from stgit.commands.common import *
from stgit.utils import *
from stgit.out import *
from stgit.run import *
from stgit import stack, git
help = 'Fix StGit metadata if branch was modified with git commands'
kind = 'stack'
usage = ['']
description = """
If you modify an StGit stack (branch) with some git commands -- such
as commit, pull, merge, and rebase -- you will leave the StGit
metadata in an inconsistent state. In that situation, you have two
options:
1. Use "stg undo" to undo the effect of the git commands. (If you
know what you are doing and want more control, "git reset" or
similar will work too.)
2. Use "stg repair". This will fix up the StGit metadata to
accommodate the modifications to the branch. Specifically, it will
do the following:
* If you have made regular git commits on top of your stack of
StGit patches, "stg repair" makes new StGit patches out of
them, preserving their contents.
* However, merge commits cannot become patches; if you have
committed a merge on top of your stack, "repair" will simply
mark all patches below the merge unapplied, since they are no
longer reachable. If this is not what you want, use "stg
undo" to get rid of the merge and run "stg repair" again.
* The applied patches are supposed to be precisely those that
are reachable from the branch head. If you have used e.g.
"git reset" to move the head, some applied patches may no
longer be reachable, and some unapplied patches may have
become reachable. "stg repair" will correct the appliedness
of such patches.
"stg repair" will fix these inconsistencies reliably, so as long
as you like what it does, you have no reason to avoid causing
them in the first place. For example, you might find it
convenient to make commits with a graphical tool and then have
"stg repair" make proper patches of the commits.
NOTE: If using git commands on the stack was a mistake, running "stg
repair" is _not_ what you want. In that case, what you want is option
(1) above."""
args = []
options = []
directory = DirectoryGotoToplevel(log = True)
class Commit(object):
def __init__(self, id):
self.id = id
self.parents = set()
self.children = set()
self.patch = None
self.__commit = None
def __get_commit(self):
if not self.__commit:
self.__commit = git.get_commit(self.id)
return self.__commit
commit = property(__get_commit)
def __str__(self):
if self.patch:
return '%s (%s)' % (self.id, self.patch)
else:
return self.id
def __repr__(self):
return '<%s>' % str(self)
def read_commit_dag(branch):
out.start('Reading commit DAG')
commits = {}
patches = set()
for line in Run('git', 'rev-list', '--parents', '--all').output_lines():
cs = line.split()
for id in cs:
if not id in commits:
commits[id] = Commit(id)
for id in cs[1:]:
commits[cs[0]].parents.add(commits[id])
commits[id].children.add(commits[cs[0]])
for line in Run('git', 'show-ref').output_lines():
id, ref = line.split()
m = re.match(r'^refs/patches/%s/(.+)$' % re.escape(branch), ref)
if m and not m.group(1).endswith('.log'):
c = commits[id]
c.patch = m.group(1)
patches.add(c)
out.done()
return commits, patches
def func(parser, options, args):
"""Repair inconsistencies in StGit metadata."""
orig_applied = crt_series.get_applied()
orig_unapplied = crt_series.get_unapplied()
orig_hidden = crt_series.get_hidden()
if crt_series.get_protected():
raise CmdException(
'This branch is protected. Modification is not permitted.')
# Find commits that aren't patches, and applied patches.
head = git.get_commit(git.get_head()).get_id_hash()
commits, patches = read_commit_dag(crt_series.get_name())
c = commits[head]
patchify = [] # commits to definitely patchify
maybe_patchify = [] # commits to patchify if we find a patch below them
applied = []
while len(c.parents) == 1:
parent, = c.parents
if c.patch:
applied.append(c)
patchify.extend(maybe_patchify)
maybe_patchify = []
else:
maybe_patchify.append(c)
c = parent
applied.reverse()
patchify.reverse()
# Find patches hidden behind a merge.
merge = c
todo = set([c])
seen = set()
hidden = set()
while todo:
c = todo.pop()
seen.add(c)
todo |= c.parents - seen
if c.patch:
hidden.add(c)
if hidden:
out.warn(('%d patch%s are hidden below the merge commit'
% (len(hidden), ['es', ''][len(hidden) == 1])),
'%s,' % merge.id, 'and will be considered unapplied.')
# Make patches of any linear sequence of commits on top of a patch.
names = set(p.patch for p in patches)
def name_taken(name):
return name in names
if applied and patchify:
out.start('Creating %d new patch%s'
% (len(patchify), ['es', ''][len(patchify) == 1]))
for p in patchify:
name = make_patch_name(p.commit.get_log(), name_taken)
out.info('Creating patch %s from commit %s' % (name, p.id))
aname, amail, adate = name_email_date(p.commit.get_author())
cname, cmail, cdate = name_email_date(p.commit.get_committer())
parent, = p.parents
crt_series.new_patch(
name, can_edit = False, commit = False,
top = p.id, bottom = parent.id, message = p.commit.get_log(),
author_name = aname, author_email = amail, author_date = adate,
committer_name = cname, committer_email = cmail)
p.patch = name
applied.append(p)
names.add(name)
out.done()
# Figure out hidden
orig_patches = orig_applied + orig_unapplied + orig_hidden
orig_applied_name_set = set(orig_applied)
orig_unapplied_name_set = set(orig_unapplied)
orig_hidden_name_set = set(orig_hidden)
orig_patches_name_set = set(orig_patches)
hidden = [p for p in patches if p.patch in orig_hidden_name_set]
# Write the applied/unapplied files.
out.start('Checking patch appliedness')
unapplied = patches - set(applied) - set(hidden)
applied_name_set = set(p.patch for p in applied)
unapplied_name_set = set(p.patch for p in unapplied)
hidden_name_set = set(p.patch for p in hidden)
patches_name_set = set(p.patch for p in patches)
for name in orig_patches_name_set - patches_name_set:
out.info('%s is gone' % name)
for name in applied_name_set - orig_applied_name_set:
out.info('%s is now applied' % name)
for name in unapplied_name_set - orig_unapplied_name_set:
out.info('%s is now unapplied' % name)
for name in hidden_name_set - orig_hidden_name_set:
out.info('%s is now hidden' % name)
orig_order = dict(zip(orig_patches, xrange(len(orig_patches))))
def patchname_cmp(p1, p2):
i1 = orig_order.get(p1, len(orig_order))
i2 = orig_order.get(p2, len(orig_order))
return cmp((i1, p1), (i2, p2))
crt_series.set_applied(p.patch for p in applied)
crt_series.set_unapplied(sorted(unapplied_name_set, cmp = patchname_cmp))
crt_series.set_hidden(sorted(hidden_name_set, cmp = patchname_cmp))
out.done()
| gpl-2.0 |
SteveHNH/ansible | lib/ansible/modules/storage/infinidat/infini_fs.py | 29 | 4499 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Gregory Shulov (gregory.shulov@gmail.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: infini_fs
version_added: 2.3
short_description: Create, Delete or Modify filesystems on Infinibox
description:
- This module creates, deletes or modifies filesystems on Infinibox.
author: Gregory Shulov (@GR360RY)
options:
name:
description:
- File system name.
required: true
state:
description:
- Creates/Modifies file system when present or removes when absent.
required: false
default: present
choices: [ "present", "absent" ]
size:
description:
- File system size in MB, GB or TB units. See examples.
required: false
pool:
description:
- Pool that will host file system.
required: true
extends_documentation_fragment:
- infinibox
requirements:
- capacity
'''
EXAMPLES = '''
- name: Create new file system named foo under pool named bar
infini_fs:
name: foo
size: 1TB
pool: bar
state: present
user: admin
password: secret
system: ibox001
'''
RETURN = '''
'''
try:
from capacity import KiB, Capacity
HAS_CAPACITY = True
except ImportError:
HAS_CAPACITY = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.infinibox import HAS_INFINISDK, api_wrapper, get_system, infinibox_argument_spec
@api_wrapper
def get_pool(module, system):
"""Return Pool or None"""
try:
return system.pools.get(name=module.params['pool'])
except:
return None
@api_wrapper
def get_filesystem(module, system):
"""Return Filesystem or None"""
try:
return system.filesystems.get(name=module.params['name'])
except:
return None
@api_wrapper
def create_filesystem(module, system):
"""Create Filesystem"""
if not module.check_mode:
filesystem = system.filesystems.create(name=module.params['name'], pool=get_pool(module, system))
if module.params['size']:
size = Capacity(module.params['size']).roundup(64 * KiB)
filesystem.update_size(size)
module.exit_json(changed=True)
@api_wrapper
def update_filesystem(module, filesystem):
"""Update Filesystem"""
changed = False
if module.params['size']:
size = Capacity(module.params['size']).roundup(64 * KiB)
if filesystem.get_size() != size:
if not module.check_mode:
filesystem.update_size(size)
changed = True
module.exit_json(changed=changed)
@api_wrapper
def delete_filesystem(module, filesystem):
""" Delete Filesystem"""
if not module.check_mode:
filesystem.delete()
module.exit_json(changed=True)
def main():
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
name = dict(required=True),
state = dict(default='present', choices=['present', 'absent']),
pool = dict(required=True),
size = dict()
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
if not HAS_INFINISDK:
module.fail_json(msg='infinisdk is required for this module')
if not HAS_CAPACITY:
module.fail_json(msg='The capacity python library is required for this module')
if module.params['size']:
try:
Capacity(module.params['size'])
except:
module.fail_json(msg='size (Physical Capacity) should be defined in MB, GB, TB or PB units')
state = module.params['state']
system = get_system(module)
pool = get_pool(module, system)
filesystem = get_filesystem(module, system)
if pool is None:
module.fail_json(msg='Pool {} not found'.format(module.params['pool']))
if state == 'present' and not filesystem:
create_filesystem(module, system)
elif state == 'present' and filesystem:
update_filesystem(module, filesystem)
elif state == 'absent' and filesystem:
delete_filesystem(module, filesystem)
elif state == 'absent' and not filesystem:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| gpl-3.0 |
programming086/omim | tools/unix/diff_features.py | 3 | 1034 | #!/usr/bin/python
import sys, re
RE_STAT = re.compile(r'(?:\d+\. )?([\w:|-]+?)\|: size = (\d+); count = (\d+); length = ([0-9.e+-]+) m; area = ([0-9.e+-]+) m.\s*')
def parse_and_add(data, line):
m = RE_STAT.match(line)
if m:
data[m.group(1)] = int(m.group(3))
if len(sys.argv) < 3:
print 'This tool compares type_statistics output for feature sizes'
print 'Usage: {0} <output_new> <output_old> [threshold_in_%]'.format(sys.argv[0])
sys.exit(0)
data1 = {}
with open(sys.argv[2], 'r') as f:
for line in f:
parse_and_add(data1, line)
data2 = {}
with open(sys.argv[1], 'r') as f:
for line in f:
parse_and_add(data2, line)
threshold = (int(sys.argv[3]) if len(sys.argv) > 3 else 100) / 100.0 + 1
min_diff = 40
for k in data1:
v1 = int(data1[k])
if k in data2:
v2 = int(data2[k])
if v1 == 0 or v2 == 0 or max(v1, v2) / float(min(v1, v2)) > threshold and abs(v1 - v2) > min_diff:
print '{0}: {1} to {2}'.format(k, v1, v2)
elif v1 > min_diff:
print '- not found: {0}, {1}'.format(k, v1)
| apache-2.0 |
ehogan/iris | lib/iris/tests/unit/util/test_squeeze.py | 17 | 2185 | # (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Test function :func:`iris.util.squeeze`."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import unittest
import iris
import iris.tests.stock as stock
class Test(tests.IrisTest):
def setUp(self):
self.cube = stock.simple_2d_w_multidim_and_scalars()
def test_no_change(self):
self.assertEqual(self.cube, iris.util.squeeze(self.cube))
def test_squeeze_one_dim(self):
cube_3d = iris.util.new_axis(self.cube, scalar_coord='an_other')
cube_2d = iris.util.squeeze(cube_3d)
self.assertEqual(self.cube, cube_2d)
def test_squeeze_two_dims(self):
cube_3d = iris.util.new_axis(self.cube, scalar_coord='an_other')
cube_4d = iris.util.new_axis(cube_3d, scalar_coord='air_temperature')
self.assertEqual(self.cube, iris.util.squeeze(cube_4d))
def test_squeeze_one_anonymous_dim(self):
cube_3d = iris.util.new_axis(self.cube)
cube_2d = iris.util.squeeze(cube_3d)
self.assertEqual(self.cube, cube_2d)
def test_squeeze_to_scalar_cube(self):
cube_scalar = self.cube[0, 0]
cube_1d = iris.util.new_axis(cube_scalar)
self.assertEqual(cube_scalar, iris.util.squeeze(cube_1d))
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 |
Mafarricos/Mafarricos-xbmc-addons | script.module.addonsresolver/resources/libs/parsers/kmedia.py | 3 | 1041 | # -*- coding: UTF-8 -*-
# by Mafarricos
# email: MafaStudios@gmail.com
# This program is free software: GNU General Public License
import os,urllib
import links,search
from resources.libs import basic
def createstrm(name,imdbid,year,url):
addon_id = links.link().kmediatorrent_id
addon_path = os.path.join(links.link().installfolder,addon_id)
addon_getsettings = links.link().getSetting("kmediatorrent_enabled")
addon_pos = links.link().getSetting("kmediatorrent_pos")
if len(addon_pos) == 1: addon_pos = '0'+addon_pos
srtmBasePath = links.link().strmPath
addonplay = links.link().kmediatorrent_play
if not os.path.exists(addon_path) and addon_getsettings == 'true': links.link().setSetting("kmediatorrent_enabled",'false')
if addon_getsettings == 'true':
qual,magnet = search.ytssearch(imdbid)
if magnet:
for i in range(0,len(magnet)):
strmPath = os.path.join(srtmBasePath,addon_pos+'.'+addon_id+'.'+qual[i]+'.strm')
playurl = addonplay % (urllib.quote_plus(magnet[i]))
basic.writefile(strmPath,'w',playurl) | gpl-2.0 |
shams169/pythonProject | ContactsDir/env/lib/python3.6/site-packages/gunicorn/instrument/statsd.py | 11 | 4462 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
"Bare-bones implementation of statsD's protocol, client-side"
import socket
import logging
from re import sub
from gunicorn.glogging import Logger
from gunicorn import six
# Instrumentation constants
METRIC_VAR = "metric"
VALUE_VAR = "value"
MTYPE_VAR = "mtype"
GAUGE_TYPE = "gauge"
COUNTER_TYPE = "counter"
HISTOGRAM_TYPE = "histogram"
class Statsd(Logger):
"""statsD-based instrumentation, that passes as a logger
"""
def __init__(self, cfg):
"""host, port: statsD server
"""
Logger.__init__(self, cfg)
self.prefix = sub(r"^(.+[^.]+)\.*$", "\g<1>.", cfg.statsd_prefix)
try:
host, port = cfg.statsd_host
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.connect((host, int(port)))
except Exception:
self.sock = None
# Log errors and warnings
def critical(self, msg, *args, **kwargs):
Logger.critical(self, msg, *args, **kwargs)
self.increment("gunicorn.log.critical", 1)
def error(self, msg, *args, **kwargs):
Logger.error(self, msg, *args, **kwargs)
self.increment("gunicorn.log.error", 1)
def warning(self, msg, *args, **kwargs):
Logger.warning(self, msg, *args, **kwargs)
self.increment("gunicorn.log.warning", 1)
def exception(self, msg, *args, **kwargs):
Logger.exception(self, msg, *args, **kwargs)
self.increment("gunicorn.log.exception", 1)
# Special treatement for info, the most common log level
def info(self, msg, *args, **kwargs):
self.log(logging.INFO, msg, *args, **kwargs)
# skip the run-of-the-mill logs
def debug(self, msg, *args, **kwargs):
self.log(logging.DEBUG, msg, *args, **kwargs)
def log(self, lvl, msg, *args, **kwargs):
"""Log a given statistic if metric, value and type are present
"""
try:
extra = kwargs.get("extra", None)
if extra is not None:
metric = extra.get(METRIC_VAR, None)
value = extra.get(VALUE_VAR, None)
typ = extra.get(MTYPE_VAR, None)
if metric and value and typ:
if typ == GAUGE_TYPE:
self.gauge(metric, value)
elif typ == COUNTER_TYPE:
self.increment(metric, value)
elif typ == HISTOGRAM_TYPE:
self.histogram(metric, value)
else:
pass
# Log to parent logger only if there is something to say
if msg is not None and len(msg) > 0:
Logger.log(self, lvl, msg, *args, **kwargs)
except Exception:
Logger.warning(self, "Failed to log to statsd", exc_info=True)
# access logging
def access(self, resp, req, environ, request_time):
"""Measure request duration
request_time is a datetime.timedelta
"""
Logger.access(self, resp, req, environ, request_time)
duration_in_ms = request_time.seconds * 1000 + float(request_time.microseconds) / 10 ** 3
status = resp.status
if isinstance(status, str):
status = int(status.split(None, 1)[0])
self.histogram("gunicorn.request.duration", duration_in_ms)
self.increment("gunicorn.requests", 1)
self.increment("gunicorn.request.status.%d" % status, 1)
# statsD methods
# you can use those directly if you want
def gauge(self, name, value):
self._sock_send("{0}{1}:{2}|g".format(self.prefix, name, value))
def increment(self, name, value, sampling_rate=1.0):
self._sock_send("{0}{1}:{2}|c|@{3}".format(self.prefix, name, value, sampling_rate))
def decrement(self, name, value, sampling_rate=1.0):
self._sock_send("{0){1}:-{2}|c|@{3}".format(self.prefix, name, value, sampling_rate))
def histogram(self, name, value):
self._sock_send("{0}{1}:{2}|ms".format(self.prefix, name, value))
def _sock_send(self, msg):
try:
if isinstance(msg, six.text_type):
msg = msg.encode("ascii")
if self.sock:
self.sock.send(msg)
except Exception:
Logger.warning(self, "Error sending message to statsd", exc_info=True)
| mit |
Sup3Roque/Pancas | plugin.video.loganaddon/oneplay.py | 101 | 12474 | ## ONLY FOR NOOBS :D
##CONVERSION OF following encryption by shani into python
## only decryption function is implemented
'''
* jQuery JavaScript Library v1.4.2
* http://jquery.com/
*
* Copyright 2010, John Resig
* Dual licensed under the MIT or GPL Version 2 licenses.
* http://jquery.org/license
*
* Includes Sizzle.js
* http://sizzlejs.com/
* Copyright 2010, The Dojo Foundation
* Released under the MIT, BSD, and GPL Licenses.
*
* Date: Sat Feb 13 22:33:48 2010 -0500
'''
import urllib
import base64
import re,urllib2,cookielib
def decode(r):
e = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
r = r.replace("\n", "");
f = []
c = [0,0,0,0]
t = [0,0,0];
# print 'rrrrrrrrrrrrrrrrrrrrrrrr',r
for n in range(0 ,len(r),4):
c[0]=-1
try:
c[0] = e.index(r[n]);
except:pass
c[1]=-1
try:
c[1] = e.index(r[n + 1])
except:pass
c[2]=-1
try:
c[2] = e.index(r[n + 2]);
except:pass
c[3]=-1
try:
c[3] = e.index(r[n + 3])
except:pass
t[0] = c[0] << 2 | c[1] >> 4
t[1] = (15 & c[1]) << 4 | c[2] >> 2
t[2] = (3 & c[2]) << 6 | c[3]
f+=[t[0], t[1], t[2]];
# print f
# print f[0:10]
return f[0: len(f) - (len(f) % 16)]
'''
def fun_e:
return unescape(encodeURIComponent(e))
} catch (r) {
throw "Error utf"
}
'''
def func_u(e):
c = [];
#if decode:
# print 'basssssssssssssssssssssss', base64.decode(e)
# return
# e= urllib.unquote(base64.decode(e))
for n in range(0, len(e)):
c.append(ord(e[n]));
return c
def fun_A(e, r):
n=0;
f = [None]*(len(e) / r);
for n in range(0, len(e),r):
f[n / r] = int(e[n:n+r], 16);
return f
'''L inner functions
'''
def func_L_r(e, r):
return e << r | e >> 32 - r ##change>>>
def func_L_n(e, r):
c = 2147483648 & e
t = 2147483648 & r
n = 1073741824 & e
f = 1073741824 & r
a = (1073741823 & e) + (1073741823 & r)
return (2147483648 ^ a ^ c ^ t) if n & f else ( (3221225472 ^ a ^ c ^ t if 1073741824 & a else 1073741824 ^ a ^ c ^ t ) if n | f else a ^ c ^ t)
def func_L_f(e, r, n):
return e & r | ~e & n
def func_L_c(e, r, n):
return e & n | r & ~n
def func_L_t(e, r, n):
return e ^ r ^ n
def func_L_a(e, r, n):
return r ^ (e | ~n)
def func_L_o(e, c, t, a, o, d, u):
e = func_L_n(e, func_L_n(func_L_n(func_L_f(c, t, a), o), u))
return func_L_n(func_L_r(e, d), c)
def func_L_d(e, f, t, a, o, d, u):
e = func_L_n(e, func_L_n(func_L_n(func_L_c(f, t, a), o), u))
return func_L_n(func_L_r(e, d), f)
def func_L_u(e, f, c, a, o, d, u):
e = func_L_n(e, func_L_n(func_L_n(func_L_t(f, c, a), o), u))
return func_L_n(func_L_r(e, d), f)
def func_L_i(e, f, c, t, o, d, u):
e = func_L_n(e, func_L_n(func_L_n(func_L_a(f, c, t), o), u))
return func_L_n(func_L_r(e, d), f)
def func_L_b(e):
n = len(e)
f = n + 8
c = (f - f % 64) / 64
t = 16 * (c + 1)
a = [0]*(n+1)
o = 0; d = 0
# for (var r, n = e.length, f = n + 8, c = (f - f % 64) / 64, t = 16 * (c + 1), a = [], o = 0, d = 0; n > d;) r = (d - d % 4) / 4, o = 8 * (d % 4),
for d in range(0,n):
r = (d - d % 4) / 4;
o = 8 * (d % 4);
#print a[r]
#print e[d]
a[r] = a[r] | e[d] << o
d+=1
# print a, d,n
r = (d - d % 4) / 4
o = 8 * (d % 4)
a[r] = a[r] | 128 << o
a[t - 2] = n << 3
# print 'tttttttttttttttttt',t
# print 'len a',len(a)
try:
a[t - 1] = n >> 29# >>> removed
except: pass
return a
def func_L_h(e):
f = [];
for n in range(0,4):
r = 255 & e >> 8 * n #>>> removed
f.append(r)
return f
def func_L(e):
l=0
v=0
S = [];
m = fun_A("67452301efcdab8998badcfe10325476d76aa478e8c7b756242070dbc1bdceeef57c0faf4787c62aa8304613fd469501698098d88b44f7afffff5bb1895cd7be6b901122fd987193a679438e49b40821f61e2562c040b340265e5a51e9b6c7aad62f105d02441453d8a1e681e7d3fbc821e1cde6c33707d6f4d50d87455a14eda9e3e905fcefa3f8676f02d98d2a4c8afffa39428771f6816d9d6122fde5380ca4beea444bdecfa9f6bb4b60bebfbc70289b7ec6eaa127fad4ef308504881d05d9d4d039e6db99e51fa27cf8c4ac5665f4292244432aff97ab9423a7fc93a039655b59c38f0ccc92ffeff47d85845dd16fa87e4ffe2ce6e0a30143144e0811a1f7537e82bd3af2352ad7d2bbeb86d391", 8);
# print m
# print 'eeeeeeeeeeeeeeeeeeeeee',e
S = func_L_b(e);
# print 'S is ',S
y = m[0]; k = m[1]; M = m[2]; x = m[3]
for l in range(0, len(S),16):
v = y; s = k; p = M; g = x;
y = func_L_o(y, k, M, x, S[l + 0], 7, m[4])
x = func_L_o(x, y, k, M, S[l + 1], 12, m[5])
M = func_L_o(M, x, y, k, S[l + 2], 17, m[6])
k = func_L_o(k, M, x, y, S[l + 3], 22, m[7])
y = func_L_o(y, k, M, x, S[l + 4], 7, m[8])
x = func_L_o(x, y, k, M, S[l + 5], 12, m[9])
M = func_L_o(M, x, y, k, S[l + 6], 17, m[10])
k = func_L_o(k, M, x, y, S[l + 7], 22, m[11])
y = func_L_o(y, k, M, x, S[l + 8], 7, m[12])
x = func_L_o(x, y, k, M, S[l + 9], 12, m[13])
M = func_L_o(M, x, y, k, S[l + 10], 17, m[14])
k = func_L_o(k, M, x, y, S[l + 11], 22, m[15])
y = func_L_o(y, k, M, x, S[l + 12], 7, m[16])
x = func_L_o(x, y, k, M, S[l + 13], 12, m[17])
M = func_L_o(M, x, y, k, S[l + 14], 17, m[18])
k = func_L_o(k, M, x, y, S[l + 15], 22, m[19])
y = func_L_d(y, k, M, x, S[l + 1], 5, m[20])
x = func_L_d(x, y, k, M, S[l + 6], 9, m[21])
M = func_L_d(M, x, y, k, S[l + 11], 14, m[22])
k = func_L_d(k, M, x, y, S[l + 0], 20, m[23])
y = func_L_d(y, k, M, x, S[l + 5], 5, m[24])
x = func_L_d(x, y, k, M, S[l + 10], 9, m[25])
M = func_L_d(M, x, y, k, S[l + 15], 14, m[26])
k = func_L_d(k, M, x, y, S[l + 4], 20, m[27])
y = func_L_d(y, k, M, x, S[l + 9], 5, m[28])
x = func_L_d(x, y, k, M, S[l + 14], 9, m[29])
M = func_L_d(M, x, y, k, S[l + 3], 14, m[30])
k = func_L_d(k, M, x, y, S[l + 8], 20, m[31])
y = func_L_d(y, k, M, x, S[l + 13], 5, m[32])
x = func_L_d(x, y, k, M, S[l + 2], 9, m[33])
M = func_L_d(M, x, y, k, S[l + 7], 14, m[34])
k = func_L_d(k, M, x, y, S[l + 12], 20, m[35])
y = func_L_u(y, k, M, x, S[l + 5], 4, m[36])
x = func_L_u(x, y, k, M, S[l + 8], 11, m[37])
M = func_L_u(M, x, y, k, S[l + 11], 16, m[38])
k = func_L_u(k, M, x, y, S[l + 14], 23, m[39])
y = func_L_u(y, k, M, x, S[l + 1], 4, m[40])
x = func_L_u(x, y, k, M, S[l + 4], 11, m[41])
M = func_L_u(M, x, y, k, S[l + 7], 16, m[42])
k = func_L_u(k, M, x, y, S[l + 10], 23, m[43])
y = func_L_u(y, k, M, x, S[l + 13], 4, m[44])
x = func_L_u(x, y, k, M, S[l + 0], 11, m[45])
M = func_L_u(M, x, y, k, S[l + 3], 16, m[46])
k = func_L_u(k, M, x, y, S[l + 6], 23, m[47])
y = func_L_u(y, k, M, x, S[l + 9], 4, m[48])
x = func_L_u(x, y, k, M, S[l + 12], 11, m[49])
M = func_L_u(M, x, y, k, S[l + 15], 16, m[50])
k = func_L_u(k, M, x, y, S[l + 2], 23, m[51])
y = func_L_i(y, k, M, x, S[l + 0], 6, m[52])
x = func_L_i(x, y, k, M, S[l + 7], 10, m[53])
M = func_L_i(M, x, y, k, S[l + 14], 15, m[54])
k = func_L_i(k, M, x, y, S[l + 5], 21, m[55])
y = func_L_i(y, k, M, x, S[l + 12], 6, m[56])
x = func_L_i(x, y, k, M, S[l + 3], 10, m[57])
M = func_L_i(M, x, y, k, S[l + 10], 15, m[58])
k = func_L_i(k, M, x, y, S[l + 1], 21, m[59])
y = func_L_i(y, k, M, x, S[l + 8], 6, m[60])
x = func_L_i(x, y, k, M, S[l + 15], 10, m[61])
M = func_L_i(M, x, y, k, S[l + 6], 15, m[62])
k = func_L_i(k, M, x, y, S[l + 13], 21, m[63])
y = func_L_i(y, k, M, x, S[l + 4], 6, m[64])
x = func_L_i(x, y, k, M, S[l + 11], 10, m[65])
M = func_L_i(M, x, y, k, S[l + 2], 15, m[66])
k = func_L_i(k, M, x, y, S[l + 9], 21, m[67])
y = func_L_n(y, v)
k = func_L_n(k, s)
M = func_L_n(M, p)
x = func_L_n(x, g)
# print 'y is ' ,y,func_L_h(y)
return func_L_h(y)+func_L_h(k)+ func_L_h(M)+func_L_h(x)
def func_h(n, f):
c=0
e = 14
r = 8
t = 3 if e >= 12 else 2
a = []
o = []
d = [None]*t
u = [],
i = n+ f;
# print 'n is',n
# print 'f is',f
# print 'i is',i
# print 'func_L(i)'
#print func_L(i)
#return '',''
d[0] = func_L(i)
# print 'dddddddddddddddd',d
u = d[0]
# print 'uuuuuuuuuuuuuuuu',u
#print u
for c in range(1,t):
d[c] = func_L( d[c - 1]+i )
u+=(d[c])
# print u
a = u[0: 4 * r]
o = u[4 * r: 4 * r + 16]
return a,o
def decrypt(val,key):
f= decode(val);
c=f[8:16]
k=func_u(key);
a,o= func_h(k, c)
# print 'aaaaaaaaaaaaaaaaa is ',a
# print 'oooooooooooooooo is ',o
#print c
f=f[16:]
key=a
iv=o
# print len(key)
key2=""
for k in range(0,len(key)):
key2+=chr(key[k])
iv2=""
for k in range(0,len(iv)):
iv2+=chr(iv[k])
f2=""
for k in range(0,len(f)):
f2+=chr(f[k])
import pyaes
decryptor = pyaes.new(key2, pyaes.MODE_CBC, IV=iv2)
return decryptor.decrypt(f).replace('\x00', '')
def getCaptchaUrl(page_data):
patt='jQuery.dec\("(.*?)", "(.*?)"'
print page_data
txt,key=re.findall(patt,page_data)[0]
decText=decrypt(txt,key);
print 'first dec',decText
iloop=0
while 'jQuery.dec(' in decText and iloop<5:
iloop+=1
txt,key=re.findall(patt,decText)[0]
# print 'txt\n',txt
# print 'key\n',key
decText=decrypt(txt,key);
print 'final dec',decText
img_pat='<img src="(.*?)"'
img_url=re.findall(img_pat,decText)[0]
if not img_url.startswith('http'):
img_url='http://oneplay.tv/embed/'+img_url
print 'catcha url',img_url
return img_url
def getUrl(url, cookieJar=None,post=None, timeout=20, headers=None, returnResponse=False, noredirect=False):
cookie_handler = urllib2.HTTPCookieProcessor(cookieJar)
opener = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
# opener = urllib2.install_opener(opener)
req = urllib2.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36')
if headers:
for h,hv in headers:
req.add_header(h,hv)
response = opener.open(req,post,timeout=timeout)
if returnResponse: return response
link=response.read()
response.close()
return link;
def decrypt_oneplaypage(page_url, cookieJar):
if page_url.startswith("http"):
page_data= getUrl(page_url,cookieJar)
else:
page_data=page_url
# print page_data
patt='\$\(document\)\[(.*?\])'
myvar=''
var_dec='myvar='+re.findall(patt,page_data)[0]
# print var_dec
exec(var_dec)
# print myvar
data='';key=''
for i in range(len(myvar)):
if len(myvar[i])>100: data=myvar[i];#
if len(myvar[i])==10: key=myvar[i]
# print myvar[1],myvar[3]
s=decrypt (data,key)
print s
return s
#print decrypt_oneplaypage('http://oneplay.tv/embed/?i=94&n=VH1&w=100%&h=480' ,None)
#print decrypt("U2FsdGVkX19zHnMuwv6Wdv8ap6pV/ZMTPd5y2B0B3WZ3N1YDS+9e3aqW/6vue+AjizICMLtuQJ2JUgYygfhQ4gRvcukV444ns3HvnpYRQ2Oy3Bse5k+NgRDAorrdZpLMQjyZIfdIhJVdIi0PeGTqGcwxAfGdaFYLc6aQNctw/6wFnCfF4VYkjEK+DK/3D0tyln8k+VmsQXZ1B4+W7sWYTsHhLUwZgErXwpfRrdJ2aWh8P+/u7vroK1Gj6DADKePq/f4dSEDZL41lSpjy21h0RIZznrk9mONDfAEpuvKxyLtXIlVVnikHXSU4jK26YP7aYIei6SciPrhU3XHmxerF95lGUWyw7tMRMxwk8kZ+6CqrpiCjjfjCXvYW68VlBOdBYKXB00HxZiNFpcNwKJ5NjUmY2SgzPmsIbfWy3dyHca68diVog8+BgSYRMPbUX8VFPGiqbC+WLMofb+JpfztQul0WLP913p27uE6Rnvwcj9xRe8Log0iRWEk885clRnerE6Czumbn2PgN/5tczOX3qG2I4uU48Zymh3ckK0pxml6Gx6mD5g4eFHubharIpD6D8D3mtZo4oiPvgB3xYZQqIZKjqbn6gkPsPQMZP1Y1+fNHALpPCAKlnSCR1wJ2ekZRatuPI0ud8+nqoIN4+HhCTQixLXdZQPPAdX5vrDyGWz8TDZTNIjas1nkg/eAhCXruQgbnP/5KpFQqmJOCY//MPhbLmWgLne8+Eu294DbPTlXyiIa70D70sjLv7M1QB0snBFUeOvZlSTqPbWOpwDxBrRLBChTG8VPYdwwZNuOAGJfpK3M+8tReRYQ5iujc5FeF/0hDDEPHPthSfrKSPmQOdw==","lnYKqHhwrA")
| gpl-2.0 |
Peddle/hue | desktop/core/ext-py/pycrypto-2.6.1/lib/Crypto/SelfTest/Cipher/test_pkcs1_oaep.py | 113 | 17292 | # -*- coding: utf-8 -*-
#
# SelfTest/Cipher/test_pkcs1_oaep.py: Self-test for PKCS#1 OAEP encryption
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
from __future__ import nested_scopes
__revision__ = "$Id$"
import unittest
from Crypto.SelfTest.st_common import list_test_cases, a2b_hex, b2a_hex
from Crypto.Util.py3compat import *
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_OAEP as PKCS
from Crypto.Hash import MD2,MD5,SHA as SHA1,SHA256,RIPEMD
from Crypto import Random
def rws(t):
"""Remove white spaces, tabs, and new lines from a string"""
for c in ['\n', '\t', ' ']:
t = t.replace(c,'')
return t
def t2b(t):
"""Convert a text string with bytes in hex form to a byte string"""
clean = rws(t)
if len(clean)%2 == 1:
raise ValueError("Even number of characters expected")
return a2b_hex(clean)
class PKCS1_OAEP_Tests(unittest.TestCase):
def setUp(self):
self.rng = Random.new().read
self.key1024 = RSA.generate(1024, self.rng)
# List of tuples with test data for PKCS#1 OAEP
# Each tuple is made up by:
# Item #0: dictionary with RSA key component
# Item #1: plaintext
# Item #2: ciphertext
# Item #3: random data (=seed)
# Item #4: hash object
_testData = (
#
# From in oaep-int.txt to be found in
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
#
(
# Private key
{
'n':'''bb f8 2f 09 06 82 ce 9c 23 38 ac 2b 9d a8 71 f7
36 8d 07 ee d4 10 43 a4 40 d6 b6 f0 74 54 f5 1f
b8 df ba af 03 5c 02 ab 61 ea 48 ce eb 6f cd 48
76 ed 52 0d 60 e1 ec 46 19 71 9d 8a 5b 8b 80 7f
af b8 e0 a3 df c7 37 72 3e e6 b4 b7 d9 3a 25 84
ee 6a 64 9d 06 09 53 74 88 34 b2 45 45 98 39 4e
e0 aa b1 2d 7b 61 a5 1f 52 7a 9a 41 f6 c1 68 7f
e2 53 72 98 ca 2a 8f 59 46 f8 e5 fd 09 1d bd cb''',
# Public key
'e':'11',
# In the test vector, only p and q were given...
# d is computed offline as e^{-1} mod (p-1)(q-1)
'd':'''a5dafc5341faf289c4b988db30c1cdf83f31251e0
668b42784813801579641b29410b3c7998d6bc465745e5c3
92669d6870da2c082a939e37fdcb82ec93edac97ff3ad595
0accfbc111c76f1a9529444e56aaf68c56c092cd38dc3bef
5d20a939926ed4f74a13eddfbe1a1cecc4894af9428c2b7b
8883fe4463a4bc85b1cb3c1'''
}
,
# Plaintext
'''d4 36 e9 95 69 fd 32 a7 c8 a0 5b bc 90 d3 2c 49''',
# Ciphertext
'''12 53 e0 4d c0 a5 39 7b b4 4a 7a b8 7e 9b f2 a0
39 a3 3d 1e 99 6f c8 2a 94 cc d3 00 74 c9 5d f7
63 72 20 17 06 9e 52 68 da 5d 1c 0b 4f 87 2c f6
53 c1 1d f8 23 14 a6 79 68 df ea e2 8d ef 04 bb
6d 84 b1 c3 1d 65 4a 19 70 e5 78 3b d6 eb 96 a0
24 c2 ca 2f 4a 90 fe 9f 2e f5 c9 c1 40 e5 bb 48
da 95 36 ad 87 00 c8 4f c9 13 0a de a7 4e 55 8d
51 a7 4d df 85 d8 b5 0d e9 68 38 d6 06 3e 09 55''',
# Random
'''aa fd 12 f6 59 ca e6 34 89 b4 79 e5 07 6d de c2
f0 6c b5 8f''',
# Hash
SHA1,
),
#
# From in oaep-vect.txt to be found in Example 1.1
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
#
(
# Private key
{
'n':'''a8 b3 b2 84 af 8e b5 0b 38 70 34 a8 60 f1 46 c4
91 9f 31 87 63 cd 6c 55 98 c8 ae 48 11 a1 e0 ab
c4 c7 e0 b0 82 d6 93 a5 e7 fc ed 67 5c f4 66 85
12 77 2c 0c bc 64 a7 42 c6 c6 30 f5 33 c8 cc 72
f6 2a e8 33 c4 0b f2 58 42 e9 84 bb 78 bd bf 97
c0 10 7d 55 bd b6 62 f5 c4 e0 fa b9 84 5c b5 14
8e f7 39 2d d3 aa ff 93 ae 1e 6b 66 7b b3 d4 24
76 16 d4 f5 ba 10 d4 cf d2 26 de 88 d3 9f 16 fb''',
'e':'''01 00 01''',
'd':'''53 33 9c fd b7 9f c8 46 6a 65 5c 73 16 ac a8 5c
55 fd 8f 6d d8 98 fd af 11 95 17 ef 4f 52 e8 fd
8e 25 8d f9 3f ee 18 0f a0 e4 ab 29 69 3c d8 3b
15 2a 55 3d 4a c4 d1 81 2b 8b 9f a5 af 0e 7f 55
fe 73 04 df 41 57 09 26 f3 31 1f 15 c4 d6 5a 73
2c 48 31 16 ee 3d 3d 2d 0a f3 54 9a d9 bf 7c bf
b7 8a d8 84 f8 4d 5b eb 04 72 4d c7 36 9b 31 de
f3 7d 0c f5 39 e9 cf cd d3 de 65 37 29 ea d5 d1 '''
}
,
# Plaintext
'''66 28 19 4e 12 07 3d b0 3b a9 4c da 9e f9 53 23
97 d5 0d ba 79 b9 87 00 4a fe fe 34''',
# Ciphertext
'''35 4f e6 7b 4a 12 6d 5d 35 fe 36 c7 77 79 1a 3f
7b a1 3d ef 48 4e 2d 39 08 af f7 22 fa d4 68 fb
21 69 6d e9 5d 0b e9 11 c2 d3 17 4f 8a fc c2 01
03 5f 7b 6d 8e 69 40 2d e5 45 16 18 c2 1a 53 5f
a9 d7 bf c5 b8 dd 9f c2 43 f8 cf 92 7d b3 13 22
d6 e8 81 ea a9 1a 99 61 70 e6 57 a0 5a 26 64 26
d9 8c 88 00 3f 84 77 c1 22 70 94 a0 d9 fa 1e 8c
40 24 30 9c e1 ec cc b5 21 00 35 d4 7a c7 2e 8a''',
# Random
'''18 b7 76 ea 21 06 9d 69 77 6a 33 e9 6b ad 48 e1
dd a0 a5 ef''',
SHA1
),
#
# From in oaep-vect.txt to be found in Example 2.1
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
#
(
# Private key
{
'n':'''01 94 7c 7f ce 90 42 5f 47 27 9e 70 85 1f 25 d5
e6 23 16 fe 8a 1d f1 93 71 e3 e6 28 e2 60 54 3e
49 01 ef 60 81 f6 8c 0b 81 41 19 0d 2a e8 da ba
7d 12 50 ec 6d b6 36 e9 44 ec 37 22 87 7c 7c 1d
0a 67 f1 4b 16 94 c5 f0 37 94 51 a4 3e 49 a3 2d
de 83 67 0b 73 da 91 a1 c9 9b c2 3b 43 6a 60 05
5c 61 0f 0b af 99 c1 a0 79 56 5b 95 a3 f1 52 66
32 d1 d4 da 60 f2 0e da 25 e6 53 c4 f0 02 76 6f
45''',
'e':'''01 00 01''',
'd':'''08 23 f2 0f ad b5 da 89 08 8a 9d 00 89 3e 21 fa
4a 1b 11 fb c9 3c 64 a3 be 0b aa ea 97 fb 3b 93
c3 ff 71 37 04 c1 9c 96 3c 1d 10 7a ae 99 05 47
39 f7 9e 02 e1 86 de 86 f8 7a 6d de fe a6 d8 cc
d1 d3 c8 1a 47 bf a7 25 5b e2 06 01 a4 a4 b2 f0
8a 16 7b 5e 27 9d 71 5b 1b 45 5b dd 7e ab 24 59
41 d9 76 8b 9a ce fb 3c cd a5 95 2d a3 ce e7 25
25 b4 50 16 63 a8 ee 15 c9 e9 92 d9 24 62 fe 39'''
},
# Plaintext
'''8f f0 0c aa 60 5c 70 28 30 63 4d 9a 6c 3d 42 c6
52 b5 8c f1 d9 2f ec 57 0b ee e7''',
# Ciphertext
'''01 81 af 89 22 b9 fc b4 d7 9d 92 eb e1 98 15 99
2f c0 c1 43 9d 8b cd 49 13 98 a0 f4 ad 3a 32 9a
5b d9 38 55 60 db 53 26 83 c8 b7 da 04 e4 b1 2a
ed 6a ac df 47 1c 34 c9 cd a8 91 ad dc c2 df 34
56 65 3a a6 38 2e 9a e5 9b 54 45 52 57 eb 09 9d
56 2b be 10 45 3f 2b 6d 13 c5 9c 02 e1 0f 1f 8a
bb 5d a0 d0 57 09 32 da cf 2d 09 01 db 72 9d 0f
ef cc 05 4e 70 96 8e a5 40 c8 1b 04 bc ae fe 72
0e''',
# Random
'''8c 40 7b 5e c2 89 9e 50 99 c5 3e 8c e7 93 bf 94
e7 1b 17 82''',
SHA1
),
#
# From in oaep-vect.txt to be found in Example 10.1
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
#
(
# Private key
{
'n':'''ae 45 ed 56 01 ce c6 b8 cc 05 f8 03 93 5c 67 4d
db e0 d7 5c 4c 09 fd 79 51 fc 6b 0c ae c3 13 a8
df 39 97 0c 51 8b ff ba 5e d6 8f 3f 0d 7f 22 a4
02 9d 41 3f 1a e0 7e 4e be 9e 41 77 ce 23 e7 f5
40 4b 56 9e 4e e1 bd cf 3c 1f b0 3e f1 13 80 2d
4f 85 5e b9 b5 13 4b 5a 7c 80 85 ad ca e6 fa 2f
a1 41 7e c3 76 3b e1 71 b0 c6 2b 76 0e de 23 c1
2a d9 2b 98 08 84 c6 41 f5 a8 fa c2 6b da d4 a0
33 81 a2 2f e1 b7 54 88 50 94 c8 25 06 d4 01 9a
53 5a 28 6a fe b2 71 bb 9b a5 92 de 18 dc f6 00
c2 ae ea e5 6e 02 f7 cf 79 fc 14 cf 3b dc 7c d8
4f eb bb f9 50 ca 90 30 4b 22 19 a7 aa 06 3a ef
a2 c3 c1 98 0e 56 0c d6 4a fe 77 95 85 b6 10 76
57 b9 57 85 7e fd e6 01 09 88 ab 7d e4 17 fc 88
d8 f3 84 c4 e6 e7 2c 3f 94 3e 0c 31 c0 c4 a5 cc
36 f8 79 d8 a3 ac 9d 7d 59 86 0e aa da 6b 83 bb''',
'e':'''01 00 01''',
'd':'''05 6b 04 21 6f e5 f3 54 ac 77 25 0a 4b 6b 0c 85
25 a8 5c 59 b0 bd 80 c5 64 50 a2 2d 5f 43 8e 59
6a 33 3a a8 75 e2 91 dd 43 f4 8c b8 8b 9d 5f c0
d4 99 f9 fc d1 c3 97 f9 af c0 70 cd 9e 39 8c 8d
19 e6 1d b7 c7 41 0a 6b 26 75 df bf 5d 34 5b 80
4d 20 1a dd 50 2d 5c e2 df cb 09 1c e9 99 7b be
be 57 30 6f 38 3e 4d 58 81 03 f0 36 f7 e8 5d 19
34 d1 52 a3 23 e4 a8 db 45 1d 6f 4a 5b 1b 0f 10
2c c1 50 e0 2f ee e2 b8 8d ea 4a d4 c1 ba cc b2
4d 84 07 2d 14 e1 d2 4a 67 71 f7 40 8e e3 05 64
fb 86 d4 39 3a 34 bc f0 b7 88 50 1d 19 33 03 f1
3a 22 84 b0 01 f0 f6 49 ea f7 93 28 d4 ac 5c 43
0a b4 41 49 20 a9 46 0e d1 b7 bc 40 ec 65 3e 87
6d 09 ab c5 09 ae 45 b5 25 19 01 16 a0 c2 61 01
84 82 98 50 9c 1c 3b f3 a4 83 e7 27 40 54 e1 5e
97 07 50 36 e9 89 f6 09 32 80 7b 52 57 75 1e 79'''
},
# Plaintext
'''8b ba 6b f8 2a 6c 0f 86 d5 f1 75 6e 97 95 68 70
b0 89 53 b0 6b 4e b2 05 bc 16 94 ee''',
# Ciphertext
'''53 ea 5d c0 8c d2 60 fb 3b 85 85 67 28 7f a9 15
52 c3 0b 2f eb fb a2 13 f0 ae 87 70 2d 06 8d 19
ba b0 7f e5 74 52 3d fb 42 13 9d 68 c3 c5 af ee
e0 bf e4 cb 79 69 cb f3 82 b8 04 d6 e6 13 96 14
4e 2d 0e 60 74 1f 89 93 c3 01 4b 58 b9 b1 95 7a
8b ab cd 23 af 85 4f 4c 35 6f b1 66 2a a7 2b fc
c7 e5 86 55 9d c4 28 0d 16 0c 12 67 85 a7 23 eb
ee be ff 71 f1 15 94 44 0a ae f8 7d 10 79 3a 87
74 a2 39 d4 a0 4c 87 fe 14 67 b9 da f8 52 08 ec
6c 72 55 79 4a 96 cc 29 14 2f 9a 8b d4 18 e3 c1
fd 67 34 4b 0c d0 82 9d f3 b2 be c6 02 53 19 62
93 c6 b3 4d 3f 75 d3 2f 21 3d d4 5c 62 73 d5 05
ad f4 cc ed 10 57 cb 75 8f c2 6a ee fa 44 12 55
ed 4e 64 c1 99 ee 07 5e 7f 16 64 61 82 fd b4 64
73 9b 68 ab 5d af f0 e6 3e 95 52 01 68 24 f0 54
bf 4d 3c 8c 90 a9 7b b6 b6 55 32 84 eb 42 9f cc''',
# Random
'''47 e1 ab 71 19 fe e5 6c 95 ee 5e aa d8 6f 40 d0
aa 63 bd 33''',
SHA1
),
)
def testEncrypt1(self):
# Verify encryption using all test vectors
for test in self._testData:
# Build the key
comps = [ long(rws(test[0][x]),16) for x in ('n','e') ]
key = RSA.construct(comps)
# RNG that takes its random numbers from a pool given
# at initialization
class randGen:
def __init__(self, data):
self.data = data
self.idx = 0
def __call__(self, N):
r = self.data[self.idx:N]
self.idx += N
return r
# The real test
key._randfunc = randGen(t2b(test[3]))
cipher = PKCS.new(key, test[4])
ct = cipher.encrypt(t2b(test[1]))
self.assertEqual(ct, t2b(test[2]))
def testEncrypt2(self):
# Verify that encryption fails if plaintext is too long
pt = '\x00'*(128-2*20-2+1)
cipher = PKCS.new(self.key1024)
self.assertRaises(ValueError, cipher.encrypt, pt)
def testDecrypt1(self):
# Verify decryption using all test vectors
for test in self._testData:
# Build the key
comps = [ long(rws(test[0][x]),16) for x in ('n','e','d') ]
key = RSA.construct(comps)
# The real test
cipher = PKCS.new(key, test[4])
pt = cipher.decrypt(t2b(test[2]))
self.assertEqual(pt, t2b(test[1]))
def testDecrypt2(self):
# Simplest possible negative tests
for ct_size in (127,128,129):
cipher = PKCS.new(self.key1024)
self.assertRaises(ValueError, cipher.decrypt, bchr(0x00)*ct_size)
def testEncryptDecrypt1(self):
# Encrypt/Decrypt messages of length [0..128-2*20-2]
for pt_len in xrange(0,128-2*20-2):
pt = self.rng(pt_len)
ct = PKCS.encrypt(pt, self.key1024)
pt2 = PKCS.decrypt(ct, self.key1024)
self.assertEqual(pt,pt2)
def testEncryptDecrypt1(self):
# Helper function to monitor what's requested from RNG
global asked
def localRng(N):
global asked
asked += N
return self.rng(N)
# Verify that OAEP is friendly to all hashes
for hashmod in (MD2,MD5,SHA1,SHA256,RIPEMD):
# Verify that encrypt() asks for as many random bytes
# as the hash output size
asked = 0
pt = self.rng(40)
self.key1024._randfunc = localRng
cipher = PKCS.new(self.key1024, hashmod)
ct = cipher.encrypt(pt)
self.assertEqual(cipher.decrypt(ct), pt)
self.failUnless(asked > hashmod.digest_size)
def testEncryptDecrypt2(self):
# Verify that OAEP supports labels
pt = self.rng(35)
xlabel = self.rng(22)
cipher = PKCS.new(self.key1024, label=xlabel)
ct = cipher.encrypt(pt)
self.assertEqual(cipher.decrypt(ct), pt)
def testEncryptDecrypt3(self):
# Verify that encrypt() uses the custom MGF
global mgfcalls
# Helper function to monitor what's requested from MGF
def newMGF(seed,maskLen):
global mgfcalls
mgfcalls += 1
return bchr(0x00)*maskLen
mgfcalls = 0
pt = self.rng(32)
cipher = PKCS.new(self.key1024, mgfunc=newMGF)
ct = cipher.encrypt(pt)
self.assertEqual(mgfcalls, 2)
self.assertEqual(cipher.decrypt(ct), pt)
def get_tests(config={}):
tests = []
tests += list_test_cases(PKCS1_OAEP_Tests)
return tests
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| apache-2.0 |
tuxfux-hlp-notes/python-batches | archieves/batch-60/files/myenv/lib/python2.7/site-packages/pip/_vendor/requests/packages/charade/charsetgroupprober.py | 2929 | 3791 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
from .charsetprober import CharSetProber
class CharSetGroupProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mActiveNum = 0
self._mProbers = []
self._mBestGuessProber = None
def reset(self):
CharSetProber.reset(self)
self._mActiveNum = 0
for prober in self._mProbers:
if prober:
prober.reset()
prober.active = True
self._mActiveNum += 1
self._mBestGuessProber = None
def get_charset_name(self):
if not self._mBestGuessProber:
self.get_confidence()
if not self._mBestGuessProber:
return None
# self._mBestGuessProber = self._mProbers[0]
return self._mBestGuessProber.get_charset_name()
def feed(self, aBuf):
for prober in self._mProbers:
if not prober:
continue
if not prober.active:
continue
st = prober.feed(aBuf)
if not st:
continue
if st == constants.eFoundIt:
self._mBestGuessProber = prober
return self.get_state()
elif st == constants.eNotMe:
prober.active = False
self._mActiveNum -= 1
if self._mActiveNum <= 0:
self._mState = constants.eNotMe
return self.get_state()
return self.get_state()
def get_confidence(self):
st = self.get_state()
if st == constants.eFoundIt:
return 0.99
elif st == constants.eNotMe:
return 0.01
bestConf = 0.0
self._mBestGuessProber = None
for prober in self._mProbers:
if not prober:
continue
if not prober.active:
if constants._debug:
sys.stderr.write(prober.get_charset_name()
+ ' not active\n')
continue
cf = prober.get_confidence()
if constants._debug:
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(), cf))
if bestConf < cf:
bestConf = cf
self._mBestGuessProber = prober
if not self._mBestGuessProber:
return 0.0
return bestConf
# else:
# self._mBestGuessProber = self._mProbers[0]
# return self._mBestGuessProber.get_confidence()
| gpl-3.0 |
gwwfps/boxrps | werkzeug/test.py | 25 | 31018 | # -*- coding: utf-8 -*-
"""
werkzeug.test
~~~~~~~~~~~~~
This module implements a client to WSGI applications for testing.
:copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
import urllib
import urlparse
import mimetypes
from time import time
from random import random
from itertools import chain
from tempfile import TemporaryFile
from cStringIO import StringIO
from cookielib import CookieJar
from urllib2 import Request as U2Request
from werkzeug._internal import _empty_stream, _get_environ
from werkzeug.wrappers import BaseRequest
from werkzeug.urls import url_encode, url_fix, iri_to_uri
from werkzeug.wsgi import get_host, get_current_url
from werkzeug.datastructures import FileMultiDict, MultiDict, \
CombinedMultiDict, Headers, FileStorage
def stream_encode_multipart(values, use_tempfile=True, threshold=1024 * 500,
boundary=None, charset='utf-8'):
"""Encode a dict of values (either strings or file descriptors or
:class:`FileStorage` objects.) into a multipart encoded string stored
in a file descriptor.
"""
if boundary is None:
boundary = '---------------WerkzeugFormPart_%s%s' % (time(), random())
_closure = [StringIO(), 0, False]
if use_tempfile:
def write(string):
stream, total_length, on_disk = _closure
if on_disk:
stream.write(string)
else:
length = len(string)
if length + _closure[1] <= threshold:
stream.write(string)
else:
new_stream = TemporaryFile('wb+')
new_stream.write(stream.getvalue())
new_stream.write(string)
_closure[0] = new_stream
_closure[2] = True
_closure[1] = total_length + length
else:
write = _closure[0].write
if not isinstance(values, MultiDict):
values = MultiDict(values)
for key, values in values.iterlists():
for value in values:
write('--%s\r\nContent-Disposition: form-data; name="%s"' %
(boundary, key))
reader = getattr(value, 'read', None)
if reader is not None:
filename = getattr(value, 'filename',
getattr(value, 'name', None))
content_type = getattr(value, 'content_type', None)
if content_type is None:
content_type = filename and \
mimetypes.guess_type(filename)[0] or \
'application/octet-stream'
if filename is not None:
write('; filename="%s"\r\n' % filename)
else:
write('\r\n')
write('Content-Type: %s\r\n\r\n' % content_type)
while 1:
chunk = reader(16384)
if not chunk:
break
write(chunk)
else:
if isinstance(value, unicode):
value = value.encode(charset)
write('\r\n\r\n' + value)
write('\r\n')
write('--%s--\r\n' % boundary)
length = int(_closure[0].tell())
_closure[0].seek(0)
return _closure[0], length, boundary
def encode_multipart(values, boundary=None, charset='utf-8'):
"""Like `stream_encode_multipart` but returns a tuple in the form
(``boundary``, ``data``) where data is a bytestring.
"""
stream, length, boundary = stream_encode_multipart(
values, use_tempfile=False, boundary=boundary, charset=charset)
return boundary, stream.read()
def File(fd, filename=None, mimetype=None):
"""Backwards compat."""
from warnings import warn
warn(DeprecationWarning('werkzeug.test.File is deprecated, use the '
'EnvironBuilder or FileStorage instead'))
return FileStorage(fd, filename=filename, content_type=mimetype)
class _TestCookieHeaders(object):
"""A headers adapter for cookielib
"""
def __init__(self, headers):
self.headers = headers
def getheaders(self, name):
headers = []
name = name.lower()
for k, v in self.headers:
if k.lower() == name:
headers.append(v)
return headers
class _TestCookieResponse(object):
"""Something that looks like a httplib.HTTPResponse, but is actually just an
adapter for our test responses to make them available for cookielib.
"""
def __init__(self, headers):
self.headers = _TestCookieHeaders(headers)
def info(self):
return self.headers
class _TestCookieJar(CookieJar):
"""A cookielib.CookieJar modified to inject and read cookie headers from
and to wsgi environments, and wsgi application responses.
"""
def inject_wsgi(self, environ):
"""Inject the cookies as client headers into the server's wsgi
environment.
"""
cvals = []
for cookie in self:
cvals.append('%s=%s' % (cookie.name, cookie.value))
if cvals:
environ['HTTP_COOKIE'] = ', '.join(cvals)
def extract_wsgi(self, environ, headers):
"""Extract the server's set-cookie headers as cookies into the
cookie jar.
"""
self.extract_cookies(
_TestCookieResponse(headers),
U2Request(get_current_url(environ)),
)
def _iter_data(data):
"""Iterates over a dict or multidict yielding all keys and values.
This is used to iterate over the data passed to the
:class:`EnvironBuilder`.
"""
if isinstance(data, MultiDict):
for key, values in data.iterlists():
for value in values:
yield key, value
else:
for key, values in data.iteritems():
if isinstance(values, list):
for value in values:
yield key, value
else:
yield key, values
class EnvironBuilder(object):
"""This class can be used to conveniently create a WSGI environment
for testing purposes. It can be used to quickly create WSGI environments
or request objects from arbitrary data.
The signature of this class is also used in some other places as of
Werkzeug 0.5 (:func:`create_environ`, :meth:`BaseResponse.from_values`,
:meth:`Client.open`). Because of this most of the functionality is
available through the constructor alone.
Files and regular form data can be manipulated independently of each
other with the :attr:`form` and :attr:`files` attributes, but are
passed with the same argument to the constructor: `data`.
`data` can be any of these values:
- a `str`: If it's a string it is converted into a :attr:`input_stream`,
the :attr:`content_length` is set and you have to provide a
:attr:`content_type`.
- a `dict`: If it's a dict the keys have to be strings and the values
any of the following objects:
- a :class:`file`-like object. These are converted into
:class:`FileStorage` objects automatically.
- a tuple. The :meth:`~FileMultiDict.add_file` method is called
with the tuple items as positional arguments.
.. versionadded:: 0.6
`path` and `base_url` can now be unicode strings that are encoded using
the :func:`iri_to_uri` function.
:param path: the path of the request. In the WSGI environment this will
end up as `PATH_INFO`. If the `query_string` is not defined
and there is a question mark in the `path` everything after
it is used as query string.
:param base_url: the base URL is a URL that is used to extract the WSGI
URL scheme, host (server name + server port) and the
script root (`SCRIPT_NAME`).
:param query_string: an optional string or dict with URL parameters.
:param method: the HTTP method to use, defaults to `GET`.
:param input_stream: an optional input stream. Do not specify this and
`data`. As soon as an input stream is set you can't
modify :attr:`args` and :attr:`files` unless you
set the :attr:`input_stream` to `None` again.
:param content_type: The content type for the request. As of 0.5 you
don't have to provide this when specifying files
and form data via `data`.
:param content_length: The content length for the request. You don't
have to specify this when providing data via
`data`.
:param errors_stream: an optional error stream that is used for
`wsgi.errors`. Defaults to :data:`stderr`.
:param multithread: controls `wsgi.multithread`. Defaults to `False`.
:param multiprocess: controls `wsgi.multiprocess`. Defaults to `False`.
:param run_once: controls `wsgi.run_once`. Defaults to `False`.
:param headers: an optional list or :class:`Headers` object of headers.
:param data: a string or dict of form data. See explanation above.
:param environ_base: an optional dict of environment defaults.
:param environ_overrides: an optional dict of environment overrides.
:param charset: the charset used to encode unicode data.
"""
#: the server protocol to use. defaults to HTTP/1.1
server_protocol = 'HTTP/1.1'
#: the wsgi version to use. defaults to (1, 0)
wsgi_version = (1, 0)
#: the default request class for :meth:`get_request`
request_class = BaseRequest
def __init__(self, path='/', base_url=None, query_string=None,
method='GET', input_stream=None, content_type=None,
content_length=None, errors_stream=None, multithread=False,
multiprocess=False, run_once=False, headers=None, data=None,
environ_base=None, environ_overrides=None, charset='utf-8'):
if query_string is None and '?' in path:
path, query_string = path.split('?', 1)
self.charset = charset
if isinstance(path, unicode):
path = iri_to_uri(path, charset)
self.path = path
if base_url is not None:
if isinstance(base_url, unicode):
base_url = iri_to_uri(base_url, charset)
else:
base_url = url_fix(base_url, charset)
self.base_url = base_url
if isinstance(query_string, basestring):
self.query_string = query_string
else:
if query_string is None:
query_string = MultiDict()
elif not isinstance(query_string, MultiDict):
query_string = MultiDict(query_string)
self.args = query_string
self.method = method
if headers is None:
headers = Headers()
elif not isinstance(headers, Headers):
headers = Headers(headers)
self.headers = headers
self.content_type = content_type
if errors_stream is None:
errors_stream = sys.stderr
self.errors_stream = errors_stream
self.multithread = multithread
self.multiprocess = multiprocess
self.run_once = run_once
self.environ_base = environ_base
self.environ_overrides = environ_overrides
self.input_stream = input_stream
self.content_length = content_length
self.closed = False
if data:
if input_stream is not None:
raise TypeError('can\'t provide input stream and data')
if isinstance(data, basestring):
self.input_stream = StringIO(data)
if self.content_length is None:
self.content_length = len(data)
else:
for key, value in _iter_data(data):
if isinstance(value, (tuple, dict)) or \
hasattr(value, 'read'):
self._add_file_from_data(key, value)
else:
self.form.setlistdefault(key).append(value)
def _add_file_from_data(self, key, value):
"""Called in the EnvironBuilder to add files from the data dict."""
if isinstance(value, tuple):
self.files.add_file(key, *value)
elif isinstance(value, dict):
from warnings import warn
warn(DeprecationWarning('it\'s no longer possible to pass dicts '
'as `data`. Use tuples or FileStorage '
'objects instead'), stacklevel=2)
args = v
value = dict(value)
mimetype = value.pop('mimetype', None)
if mimetype is not None:
value['content_type'] = mimetype
self.files.add_file(key, **value)
else:
self.files.add_file(key, value)
def _get_base_url(self):
return urlparse.urlunsplit((self.url_scheme, self.host,
self.script_root, '', '')).rstrip('/') + '/'
def _set_base_url(self, value):
if value is None:
scheme = 'http'
netloc = 'localhost'
scheme = 'http'
script_root = ''
else:
scheme, netloc, script_root, qs, anchor = urlparse.urlsplit(value)
if qs or anchor:
raise ValueError('base url must not contain a query string '
'or fragment')
self.script_root = script_root.rstrip('/')
self.host = netloc
self.url_scheme = scheme
base_url = property(_get_base_url, _set_base_url, doc='''
The base URL is a URL that is used to extract the WSGI
URL scheme, host (server name + server port) and the
script root (`SCRIPT_NAME`).''')
del _get_base_url, _set_base_url
def _get_content_type(self):
ct = self.headers.get('Content-Type')
if ct is None and not self._input_stream:
if self.method in ('POST', 'PUT'):
if self._files:
return 'multipart/form-data'
return 'application/x-www-form-urlencoded'
return None
return ct
def _set_content_type(self, value):
if value is None:
self.headers.pop('Content-Type', None)
else:
self.headers['Content-Type'] = value
content_type = property(_get_content_type, _set_content_type, doc='''
The content type for the request. Reflected from and to the
:attr:`headers`. Do not set if you set :attr:`files` or
:attr:`form` for auto detection.''')
del _get_content_type, _set_content_type
def _get_content_length(self):
return self.headers.get('Content-Length', type=int)
def _set_content_length(self, value):
if value is None:
self.headers.pop('Content-Length', None)
else:
self.headers['Content-Length'] = str(value)
content_length = property(_get_content_length, _set_content_length, doc='''
The content length as integer. Reflected from and to the
:attr:`headers`. Do not set if you set :attr:`files` or
:attr:`form` for auto detection.''')
del _get_content_length, _set_content_length
def form_property(name, storage, doc):
key = '_' + name
def getter(self):
if self._input_stream is not None:
raise AttributeError('an input stream is defined')
rv = getattr(self, key)
if rv is None:
rv = storage()
setattr(self, key, rv)
return rv
def setter(self, value):
self._input_stream = None
setattr(self, key, value)
return property(getter, setter, doc)
form = form_property('form', MultiDict, doc='''
A :class:`MultiDict` of form values.''')
files = form_property('files', FileMultiDict, doc='''
A :class:`FileMultiDict` of uploaded files. You can use the
:meth:`~FileMultiDict.add_file` method to add new files to the
dict.''')
del form_property
def _get_input_stream(self):
return self._input_stream
def _set_input_stream(self, value):
self._input_stream = value
self._form = self._files = None
input_stream = property(_get_input_stream, _set_input_stream, doc='''
An optional input stream. If you set this it will clear
:attr:`form` and :attr:`files`.''')
del _get_input_stream, _set_input_stream
def _get_query_string(self):
if self._query_string is None:
if self._args is not None:
return url_encode(self._args, charset=self.charset)
return ''
return self._query_string
def _set_query_string(self, value):
self._query_string = value
self._args = None
query_string = property(_get_query_string, _set_query_string, doc='''
The query string. If you set this to a string :attr:`args` will
no longer be available.''')
del _get_query_string, _set_query_string
def _get_args(self):
if self._query_string is not None:
raise AttributeError('a query string is defined')
if self._args is None:
self._args = MultiDict()
return self._args
def _set_args(self, value):
self._query_string = None
self._args = value
args = property(_get_args, _set_args, doc='''
The URL arguments as :class:`MultiDict`.''')
del _get_args, _set_args
@property
def server_name(self):
"""The server name (read-only, use :attr:`host` to set)"""
return self.host.split(':', 1)[0]
@property
def server_port(self):
"""The server port as integer (read-only, use :attr:`host` to set)"""
pieces = self.host.split(':', 1)
if len(pieces) == 2 and pieces[1].isdigit():
return int(pieces[1])
elif self.url_scheme == 'https':
return 443
return 80
def __del__(self):
self.close()
def close(self):
"""Closes all files. If you put real :class:`file` objects into the
:attr:`files` dict you can call this method to automatically close
them all in one go.
"""
if self.closed:
return
try:
files = self.files.itervalues()
except AttributeError:
files = ()
for f in files:
try:
f.close()
except Exception, e:
pass
self.closed = True
def get_environ(self):
"""Return the built environ."""
input_stream = self.input_stream
content_length = self.content_length
content_type = self.content_type
if input_stream is not None:
start_pos = input_stream.tell()
input_stream.seek(0, 2)
end_pos = input_stream.tell()
input_stream.seek(start_pos)
content_length = end_pos - start_pos
elif content_type == 'multipart/form-data':
values = CombinedMultiDict([self.form, self.files])
input_stream, content_length, boundary = \
stream_encode_multipart(values, charset=self.charset)
content_type += '; boundary="%s"' % boundary
elif content_type == 'application/x-www-form-urlencoded':
values = url_encode(self.form, charset=self.charset)
content_length = len(values)
input_stream = StringIO(values)
else:
input_stream = _empty_stream
result = {}
if self.environ_base:
result.update(self.environ_base)
def _path_encode(x):
if isinstance(x, unicode):
x = x.encode(self.charset)
return urllib.unquote(x)
result.update({
'REQUEST_METHOD': self.method,
'SCRIPT_NAME': _path_encode(self.script_root),
'PATH_INFO': _path_encode(self.path),
'QUERY_STRING': self.query_string,
'SERVER_NAME': self.server_name,
'SERVER_PORT': str(self.server_port),
'HTTP_HOST': self.host,
'SERVER_PROTOCOL': self.server_protocol,
'CONTENT_TYPE': content_type or '',
'CONTENT_LENGTH': str(content_length or '0'),
'wsgi.version': self.wsgi_version,
'wsgi.url_scheme': self.url_scheme,
'wsgi.input': input_stream,
'wsgi.errors': self.errors_stream,
'wsgi.multithread': self.multithread,
'wsgi.multiprocess': self.multiprocess,
'wsgi.run_once': self.run_once
})
for key, value in self.headers.to_list(self.charset):
result['HTTP_%s' % key.upper().replace('-', '_')] = value
if self.environ_overrides:
result.update(self.environ_overrides)
return result
def get_request(self, cls=None):
"""Returns a request with the data. If the request class is not
specified :attr:`request_class` is used.
:param cls: The request wrapper to use.
"""
if cls is None:
cls = self.request_class
return cls(self.get_environ())
class ClientRedirectError(Exception):
"""
If a redirect loop is detected when using follow_redirects=True with
the :cls:`Client`, then this exception is raised.
"""
class Client(object):
"""This class allows to send requests to a wrapped application.
The response wrapper can be a class or factory function that takes
three arguments: app_iter, status and headers. The default response
wrapper just returns a tuple.
Example::
class ClientResponse(BaseResponse):
...
client = Client(MyApplication(), response_wrapper=ClientResponse)
The use_cookies parameter indicates whether cookies should be stored and
sent for subsequent requests. This is True by default, but passing False
will disable this behaviour.
.. versionadded:: 0.5
`use_cookies` is new in this version. Older versions did not provide
builtin cookie support.
"""
def __init__(self, application, response_wrapper=None, use_cookies=True):
self.application = application
if response_wrapper is None:
response_wrapper = lambda a, s, h: (a, s, h)
self.response_wrapper = response_wrapper
if use_cookies:
self.cookie_jar = _TestCookieJar()
else:
self.cookie_jar = None
self.redirect_client = None
def open(self, *args, **kwargs):
"""Takes the same arguments as the :class:`EnvironBuilder` class with
some additions: You can provide a :class:`EnvironBuilder` or a WSGI
environment as only argument instead of the :class:`EnvironBuilder`
arguments and two optional keyword arguments (`as_tuple`, `buffered`)
that change the type of the return value or the way the application is
executed.
.. versionchanged:: 0.5
If a dict is provided as file in the dict for the `data` parameter
the content type has to be called `content_type` now instead of
`mimetype`. This change was made for consistency with
:class:`werkzeug.FileWrapper`.
The `follow_redirects` parameter was added to :func:`open`.
Additional parameters:
:param as_tuple: Returns a tuple in the form ``(environ, result)``
:param buffered: Set this to True to buffer the application run.
This will automatically close the application for
you as well.
:param follow_redirects: Set this to True if the `Client` should
follow HTTP redirects.
"""
as_tuple = kwargs.pop('as_tuple', False)
buffered = kwargs.pop('buffered', False)
follow_redirects = kwargs.pop('follow_redirects', False)
environ = None
if not kwargs and len(args) == 1:
if isinstance(args[0], EnvironBuilder):
environ = args[0].get_environ()
elif isinstance(args[0], dict):
environ = args[0]
if environ is None:
builder = EnvironBuilder(*args, **kwargs)
try:
environ = builder.get_environ()
finally:
builder.close()
if self.cookie_jar is not None:
self.cookie_jar.inject_wsgi(environ)
rv = run_wsgi_app(self.application, environ, buffered=buffered)
if self.cookie_jar is not None:
self.cookie_jar.extract_wsgi(environ, rv[2])
# handle redirects
redirect_chain = []
status_code = int(rv[1].split(None, 1)[0])
while status_code in (301, 302, 303, 305, 307) and follow_redirects:
if not self.redirect_client:
# assume that we're not using the user defined response wrapper
# so that we don't need any ugly hacks to get the status
# code from the response.
self.redirect_client = Client(self.application)
self.redirect_client.cookie_jar = self.cookie_jar
redirect = dict(rv[2])['Location']
scheme, netloc, script_root, qs, anchor = urlparse.urlsplit(redirect)
base_url = urlparse.urlunsplit((scheme, netloc, '', '', '')).rstrip('/') + '/'
host = get_host(create_environ('/', base_url, query_string=qs)).split(':', 1)[0]
if get_host(environ).split(':', 1)[0] != host:
raise RuntimeError('%r does not support redirect to '
'external targets' % self.__class__)
redirect_chain.append((redirect, status_code))
# the redirect request should be a new request, and not be based on
# the old request
redirect_kwargs = {}
redirect_kwargs.update({
'path': script_root,
'base_url': base_url,
'query_string': qs,
'as_tuple': True,
'buffered': buffered,
'follow_redirects': False,
})
environ, rv = self.redirect_client.open(**redirect_kwargs)
status_code = int(rv[1].split(None, 1)[0])
# Prevent loops
if redirect_chain[-1] in redirect_chain[0:-1]:
raise ClientRedirectError("loop detected")
response = self.response_wrapper(*rv)
if as_tuple:
return environ, response
return response
def get(self, *args, **kw):
"""Like open but method is enforced to GET."""
kw['method'] = 'GET'
return self.open(*args, **kw)
def post(self, *args, **kw):
"""Like open but method is enforced to POST."""
kw['method'] = 'POST'
return self.open(*args, **kw)
def head(self, *args, **kw):
"""Like open but method is enforced to HEAD."""
kw['method'] = 'HEAD'
return self.open(*args, **kw)
def put(self, *args, **kw):
"""Like open but method is enforced to PUT."""
kw['method'] = 'PUT'
return self.open(*args, **kw)
def delete(self, *args, **kw):
"""Like open but method is enforced to DELETE."""
kw['method'] = 'DELETE'
return self.open(*args, **kw)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.application
)
def create_environ(*args, **kwargs):
"""Create a new WSGI environ dict based on the values passed. The first
parameter should be the path of the request which defaults to '/'. The
second one can either be an absolute path (in that case the host is
localhost:80) or a full path to the request with scheme, netloc port and
the path to the script.
This accepts the same arguments as the :class:`EnvironBuilder`
constructor.
.. versionchanged:: 0.5
This function is now a thin wrapper over :class:`EnvironBuilder` which
was added in 0.5. The `headers`, `environ_base`, `environ_overrides`
and `charset` parameters were added.
"""
builder = EnvironBuilder(*args, **kwargs)
try:
return builder.get_environ()
finally:
builder.close()
def run_wsgi_app(app, environ, buffered=False):
"""Return a tuple in the form (app_iter, status, headers) of the
application output. This works best if you pass it an application that
returns an iterator all the time.
Sometimes applications may use the `write()` callable returned
by the `start_response` function. This tries to resolve such edge
cases automatically. But if you don't get the expected output you
should set `buffered` to `True` which enforces buffering.
If passed an invalid WSGI application the behavior of this function is
undefined. Never pass non-conforming WSGI applications to this function.
:param app: the application to execute.
:param buffered: set to `True` to enforce buffering.
:return: tuple in the form ``(app_iter, status, headers)``
"""
environ = _get_environ(environ)
response = []
buffer = []
def start_response(status, headers, exc_info=None):
if exc_info is not None:
raise exc_info[0], exc_info[1], exc_info[2]
response[:] = [status, headers]
return buffer.append
app_iter = app(environ, start_response)
# when buffering we emit the close call early and convert the
# application iterator into a regular list
if buffered:
close_func = getattr(app_iter, 'close', None)
try:
app_iter = list(app_iter)
finally:
if close_func is not None:
close_func()
# otherwise we iterate the application iter until we have
# a response, chain the already received data with the already
# collected data and wrap it in a new `ClosingIterator` if
# we have a close callable.
else:
while not response:
buffer.append(app_iter.next())
if buffer:
close_func = getattr(app_iter, 'close', None)
app_iter = chain(buffer, app_iter)
if close_func is not None:
app_iter = ClosingIterator(app_iter, close_func)
return app_iter, response[0], response[1]
from werkzeug.wsgi import ClosingIterator
| mit |
h2oai/h2o-3 | h2o-py/h2o/model/regression.py | 2 | 5205 | # -*- encoding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
# noinspection PyUnresolvedReferences
from h2o.utils.compatibility import * # NOQA
from h2o.exceptions import H2OValueError
from h2o.model.extensions import has_extension
from h2o.model.model_base import ModelBase
from h2o.utils.shared_utils import _colmean
from h2o.utils.typechecks import assert_is_type
class H2ORegressionModel(ModelBase):
def _make_model(self):
return H2ORegressionModel()
def plot(self, timestep="AUTO", metric="AUTO", **kwargs):
"""
Plots training set (and validation set if available) scoring history for an H2ORegressionModel. The timestep
and metric arguments are restricted to what is available in its scoring history.
:param timestep: A unit of measurement for the x-axis.
:param metric: A unit of measurement for the y-axis.
:returns: A scoring history plot.
:examples:
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> r = cars[0].runif()
>>> train = cars[r > .2]
>>> valid = cars[r <= .2]
>>> response_col = "economy"
>>> distribution = "gaussian"
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> gbm = H2OGradientBoostingEstimator(nfolds=3,
... distribution=distribution,
... fold_assignment="Random")
>>> gbm.train(x=predictors,
... y=response_col,
... training_frame=train,
... validation_frame=valid)
>>> gbm.plot(timestep="AUTO", metric="AUTO",)
"""
if not has_extension(self, 'ScoringHistory'):
raise H2OValueError("Scoring history plot is not available for this type of model (%s)." % self.algo)
valid_metrics = self._allowed_metrics('regression')
if valid_metrics is not None:
assert_is_type(metric, 'AUTO', *valid_metrics), "metric for H2ORegressionModel must be one of %s" % valid_metrics
if metric == "AUTO":
metric = self._default_metric('regression') or 'AUTO'
self.scoring_history_plot(timestep=timestep, metric=metric, **kwargs)
def _mean_var(frame, weights=None):
"""
Compute the (weighted) mean and variance.
:param frame: Single column H2OFrame
:param weights: optional weights column
:returns: The (weighted) mean and variance
"""
return _colmean(frame), frame.var()
def h2o_mean_absolute_error(y_actual, y_predicted, weights=None):
"""
Mean absolute error regression loss.
:param y_actual: H2OFrame of actual response.
:param y_predicted: H2OFrame of predicted response.
:param weights: (Optional) sample weights
:returns: mean absolute error loss (best is 0.0).
"""
ModelBase._check_targets(y_actual, y_predicted)
return _colmean((y_predicted - y_actual).abs())
def h2o_mean_squared_error(y_actual, y_predicted, weights=None):
"""
Mean squared error regression loss
:param y_actual: H2OFrame of actual response.
:param y_predicted: H2OFrame of predicted response.
:param weights: (Optional) sample weights
:returns: mean squared error loss (best is 0.0).
"""
ModelBase._check_targets(y_actual, y_predicted)
return _colmean((y_predicted - y_actual) ** 2)
def h2o_median_absolute_error(y_actual, y_predicted):
"""
Median absolute error regression loss
:param y_actual: H2OFrame of actual response.
:param y_predicted: H2OFrame of predicted response.
:returns: median absolute error loss (best is 0.0)
"""
ModelBase._check_targets(y_actual, y_predicted)
return (y_predicted - y_actual).abs().median()
def h2o_explained_variance_score(y_actual, y_predicted, weights=None):
"""
Explained variance regression score function.
:param y_actual: H2OFrame of actual response.
:param y_predicted: H2OFrame of predicted response.
:param weights: (Optional) sample weights
:returns: the explained variance score.
"""
ModelBase._check_targets(y_actual, y_predicted)
_, numerator = _mean_var(y_actual - y_predicted, weights)
_, denominator = _mean_var(y_actual, weights)
if denominator == 0.0:
return 1. if numerator == 0 else 0. # 0/0 => 1, otherwise, 0
return 1 - numerator / denominator
def h2o_r2_score(y_actual, y_predicted, weights=1.):
"""
R-squared (coefficient of determination) regression score function
:param y_actual: H2OFrame of actual response.
:param y_predicted: H2OFrame of predicted response.
:param weights: (Optional) sample weights
:returns: R-squared (best is 1.0, lower is worse).
"""
ModelBase._check_targets(y_actual, y_predicted)
numerator = (weights * (y_actual - y_predicted) ** 2).sum().flatten()
denominator = (weights * (y_actual - _colmean(y_actual)) ** 2).sum().flatten()
if denominator == 0.0:
return 1. if numerator == 0. else 0. # 0/0 => 1, else 0
return 1 - numerator / denominator
| apache-2.0 |
yunhaowang/IDP-APA | utilities/py_idpapa_sam2gpd.py | 1 | 3045 | #!/usr/bin/env python
import sys,re,time,argparse
def main(args):
# print >>sys.stdout, "Start analysis: " + time.strftime("%a,%d %b %Y %H:%M:%S")
convert(args.input,args.output)
# print >>sys.stdout, "Finish analysis: " + time.strftime("%a,%d %b %Y %H:%M:%S")
def extract_exon_length_from_cigar(cigar):
cigar_m = ["0"] + re.findall(r"(\d+)M",cigar)
cigar_d = ["0"] + re.findall(r"(\d+)D",cigar)
cigar_m_s,cigar_d_s = [0,0]
for m in cigar_m:
cigar_m_s += int(m)
for d in cigar_d:
cigar_d_s += int(d)
exon_length = cigar_m_s+cigar_d_s
return exon_length
def extract_soft_clip_from_cigar(cigar):
cigar_5 = ["0"] + re.findall(r"^(\d+)S",cigar)
cigar_3 = ["0"] + re.findall(r"(\d+)S$",cigar)
cigar_5_s,cigar_3_s = [0,0]
for s5 in cigar_5:
cigar_5_s += int(s5)
for s3 in cigar_3:
cigar_3_s += int(s3)
return cigar_5_s,cigar_3_s
def convert(sam_file,gpd_file):
for line in sam_file:
if line[0] != "@":
qname,flag,rname,pos,mapq,cigar,rnext,pnext,tlen,seq = line.strip().split("\t")[:10]
tag = "\t".join(line.strip().split("\t")[11:])
if rname != "*" and re.search(r"XS:A:(\S)",tag):
s5,s3 = extract_soft_clip_from_cigar(cigar)
sf = str(s5)+"_"+str(s3)
strand = (re.search(r"XS:A:(\S)",tag)).group(1)
cigar_n_l = 0
exon_length = 0
exon_start = int(pos)-1
exon_end = 0
exon_start_list = []
exon_end_list = []
if "N" in cigar:
for exon in cigar.split("N"):
exon = exon + "N"
exon_start = exon_start + exon_length + cigar_n_l
exon_length = extract_exon_length_from_cigar(exon)
exon_end = exon_start + exon_length
if re.search(r"(\d+)N",exon):
cigar_n_l = int((re.search(r"(\d+)N",exon)).group(1))
exon_start_list.append(str(exon_start))
exon_end_list.append(str(exon_end))
else:
exon_start = exon_start
exon_length = extract_exon_length_from_cigar(cigar)
exon_end = exon_start + exon_length
exon_start_list.append(str(exon_start))
exon_end_list.append(str(exon_end))
exon_start_list.append("")
exon_end_list.append("")
print >>gpd_file, "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" % (qname,qname,rname,strand,str(int(pos)-1),str(exon_end),mapq,sf,str(len(exon_start_list)-1),",".join(exon_start_list),",".join(exon_end_list))
sam_file.close()
gpd_file.close()
def do_inputs():
output_gpd_format = '''
1. read id
2. read id
3. chromosome id
4. strand
5. start site of alignment
6. end site of alignment
7. MAPQ
8. Number of nucleotides that are softly-clipped by aligner; left_right
9. exon count
10. exon start set
11. exon end set'''
parser = argparse.ArgumentParser(description="Function: convert sam to gpd.",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i','--input',type=argparse.FileType('r'),required=True,help="Input: sam file")
parser.add_argument('-o','--output',type=argparse.FileType('w'),required=True,help="Output: gpd file")
args = parser.parse_args()
return args
if __name__=="__main__":
args = do_inputs()
main(args)
| apache-2.0 |
dongguangming/jsonpickle | tests/bson_test.py | 2 | 2313 | """Test serializing pymongo bson structures"""
import datetime
import pickle
import unittest
import jsonpickle
from helper import SkippableTest
bson = None
class Object(object):
def __init__(self, offset):
self.offset = datetime.timedelta(offset)
def __getinitargs__(self):
return self.offset,
class BSONTestCase(SkippableTest):
def setUp(self):
global bson
try:
bson = __import__('bson.tz_util')
self.should_skip = False
except ImportError:
self.should_skip = True
def test_FixedOffsetSerializable(self):
if self.should_skip:
return self.skip('bson is not installed')
fo = bson.tz_util.FixedOffset(-60*5, 'EST')
serialized = jsonpickle.dumps(fo)
restored = jsonpickle.loads(serialized)
self.assertEqual(vars(restored), vars(fo))
def test_timedelta(self):
if self.should_skip:
return self.skip('bson is not installed')
td = datetime.timedelta(-1, 68400)
serialized = jsonpickle.dumps(td)
restored = jsonpickle.loads(serialized)
self.assertEqual(restored, td)
def test_stdlib_pickle(self):
if self.should_skip:
return self.skip('bson is not installed')
fo = bson.tz_util.FixedOffset(-60*5, 'EST')
serialized = pickle.dumps(fo)
restored = pickle.loads(serialized)
self.assertEqual(vars(restored), vars(fo))
def test_nested_objects(self):
if self.should_skip:
return self.skip('bson is not installed')
o = Object(99)
serialized = jsonpickle.dumps(o)
restored = jsonpickle.loads(serialized)
self.assertEqual(restored.offset, datetime.timedelta(99))
def test_datetime_with_fixed_offset(self):
if self.should_skip:
return self.skip('bson is not installed')
fo = bson.tz_util.FixedOffset(-60*5, 'EST')
dt = datetime.datetime.now().replace(tzinfo=fo)
serialized = jsonpickle.dumps(dt)
restored = jsonpickle.loads(serialized)
self.assertEqual(restored, dt)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(BSONTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
fiduswriter/fiduswriter | fiduswriter/document/emails.py | 1 | 6864 | from django.core.mail import send_mail
from django.conf import settings
from django.utils.translation import ugettext as _
from base.html_email import html_email
def send_share_notification(
document_title,
owner,
link,
collaborator_name,
collaborator_email,
rights,
change
):
if len(document_title) == 0:
document_title = _('Untitled')
if change:
message_text = _(
('Hey %(collaborator_name)s,\n%(owner)s has changed your access '
'rights to %(rights)s on the document \'%(document_title)s\'. '
'\nOpen the document: %(link)s')
) % {
'owner': owner,
'rights': rights,
'collaborator_name': collaborator_name,
'link': link,
'document_title': document_title
}
body_html_intro = _(
('<p>Hey %(collaborator_name)s,<br>%(owner)s has changed your '
'access rights to %(rights)s on the document '
'\'%(document_title)s\'.</p>')
) % {
'owner': owner,
'rights': rights,
'collaborator_name': collaborator_name,
'document_title': document_title
}
else:
message_text = _(
('Hey %(collaborator_name)s,\n%(owner)s has shared the document '
'\'%(document_title)s\' with you and given you %(rights)s access '
'rights. '
'\nOpen document: %(link)s')
) % {
'owner': owner,
'rights': rights,
'collaborator_name': collaborator_name,
'link': link,
'document_title': document_title
}
body_html_intro = _(
('<p>Hey %(collaborator_name)s,<br>%(owner)s has shared the '
'document \'%(document_title)s\' with you and given you '
'%(rights)s access rights.</p>')
) % {
'owner': owner,
'rights': rights,
'collaborator_name': collaborator_name,
'document_title': document_title
}
body_html = (
'<h1>%(document_title)s %(shared)s</h1>'
'%(body_html_intro)s'
'<table>'
'<tr><td>'
'%(Document)s'
'</td><td>'
'<b>%(document_title)s</b>'
'</td></tr>'
'<tr><td>'
'%(Author)s'
'</td><td>'
'%(owner)s'
'</td></tr>'
'<tr><td>'
'%(AccessRights)s'
'</td><td>'
'%(rights)s'
'</td></tr>'
'</table>'
'<div class="actions"><a class="button" href="%(link)s">'
'%(AccessTheDocument)s'
'</a></div>'
) % {
'shared': _('shared'),
'body_html_intro': body_html_intro,
'Document': _('Document'),
'document_title': document_title,
'Author': _('Author'),
'owner': owner,
'AccessRights': _('Access Rights'),
'rights': rights,
'link': link,
'AccessTheDocument': _('Access the document')
}
send_mail(
_('Document shared:') +
' ' +
document_title,
message_text,
settings.DEFAULT_FROM_EMAIL,
[collaborator_email],
fail_silently=True,
html_message=html_email(body_html)
)
def send_comment_notification(
notification_type,
commentator,
collaborator_name,
collaborator_email,
link,
document_title,
comment_text,
comment_html
):
if notification_type == 'mention':
message_text = _(
('Hey %(collaborator_name)s,\n%(commentator)s has mentioned you '
'in a comment in the document \'%(document)s\':'
'\n\n%(comment_text)s'
'\n\nGo to the document here: %(link)s')
) % {
'commentator': commentator,
'collaborator_name': collaborator_name,
'link': link,
'document': document_title,
'comment_text': comment_text
}
body_html_title = _(
('Hey %(collaborator_name)s,<br>%(commentator)s has mentioned '
'you in a comment in the document \'%(document_title)s\'.')
) % {
'commentator': commentator,
'collaborator_name': collaborator_name,
'document_title': document_title
}
message_title = _('Comment on :') + ' ' + document_title
else:
message_text = _(
('Hey %(collaborator_name)s,\n%(commentator)s has assigned you to '
'a comment in the document \'%(document)s\':\n\n%(comment_text)s'
'\n\nGo to the document here: %(link)s')
) % {
'commentator': commentator,
'collaborator_name': collaborator_name,
'link': link,
'document': document_title,
'comment_text': comment_text
}
body_html_title = _(
('Hey %(collaborator_name)s,<br>%(commentator)s has assigned you '
'to a comment in the document \'%(document_title)s\'.')
) % {
'commentator': commentator,
'collaborator_name': collaborator_name,
'document_title': document_title
}
message_title = _('Comment assignment on :') + ' ' + document_title
body_html = _(
('<p>Hey %(collaborator_name)s,<br>%(commentator)s has assigned '
'you to a comment in the document \'%(document)s\':</p>'
'%(comment_html)s'
'<p>Go to the document <a href="%(link)s">here</a>.</p>')
) % {
'commentator': commentator,
'collaborator_name': collaborator_name,
'link': link,
'document': document_title,
'comment_html': comment_html
}
body_html = (
'<h1>%(body_html_title)s</h1>'
'<table>'
'<tr><td>'
'%(Document)s'
'</td><td>'
'<b>%(document_title)s</b>'
'</td></tr>'
'<tr><td>'
'%(Author)s'
'</td><td>'
'%(commentator)s'
'</td></tr>'
'<tr><td>'
'%(Comment)s'
'</td><td>'
'%(comment_html)s'
'</td></tr>'
'</table>'
'<div class="actions"><a class="button" href="%(link)s">'
'%(AccessTheDocument)s'
'</a></div>'
) % {
'body_html_title': body_html_title,
'Document': _('Document'),
'document_title': document_title,
'Author': _('Author'),
'commentator': commentator,
'Comment': _('Comment'),
'comment_html': comment_html,
'link': link,
'AccessTheDocument': _('Access the document')
}
send_mail(
message_title,
message_text,
settings.DEFAULT_FROM_EMAIL,
[collaborator_email],
fail_silently=True,
html_message=html_email(body_html)
)
| agpl-3.0 |
sandias42/mlware | models/SVM.py | 1 | 1693 | from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import SGDClassifier
#from sklearn.model_selection import cross_val_score
from scipy.io import mmread
import numpy as np
malware_classes = ["Agent", "AutoRun", "FraudLoad", "FraudPack", "Hupigon", "Krap",
"Lipler", "Magania", "None", "Poison", "Swizzor", "Tdss",
"VB", "Virut", "Zbot"]
# a function for writing predictions in the required format
def write_predictions(predictions, ids, outfile):
"""
assumes len(predictions) == len(ids), and that predictions[i] is the
index of the predicted class with the malware_classes list above for
the executable corresponding to ids[i].
outfile will be overwritten
"""
with open(outfile,"w+") as f:
# write header
f.write("Id,Prediction\n")
for i, history_id in enumerate(ids):
f.write("%s,%d\n" % (history_id, predictions[i]))
def classes_to_Y(classes):
output = []
for cls in classes:
output.append(malware_classes.index(cls))
return np.array(output)
# load training classes
classes = np.load("../data/features/train_classes.npy")
# convert csr to a numpy array
sparse = np.load("/n/regal/scrb152/Students/sandias42/cs181/bow.npy")
# pull out training examples
X = sparse[:classes.shape[0],:]
X_test = sparse[classes.shape[0]:,:]
print X_test.shape
Y = classes_to_Y(classes)
model = SGDClassifier(n_jobs=-1, n_iter=100, verbose=1, loss="modified_huber")
model.fit(X,Y)
test_pred = model.predict(X_test)
print test_pred
test_ids = np.load("../data/features/test_ids.npy")
print test_ids
write_predictions(test_pred, test_ids, "../predictions/sgd_bow.csv")
| mit |
jhsenjaliya/incubator-airflow | airflow/contrib/hooks/sqoop_hook.py | 5 | 12255 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains a sqoop 1.x hook
"""
import subprocess
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.utils.log.logging_mixin import LoggingMixin
class SqoopHook(BaseHook, LoggingMixin):
"""
This hook is a wrapper around the sqoop 1 binary. To be able to use the hook
it is required that "sqoop" is in the PATH.
Additional arguments that can be passed via the 'extra' JSON field of the
sqoop connection:
* job_tracker: Job tracker local|jobtracker:port.
* namenode: Namenode.
* lib_jars: Comma separated jar files to include in the classpath.
* files: Comma separated files to be copied to the map reduce cluster.
* archives: Comma separated archives to be unarchived on the compute
machines.
* password_file: Path to file containing the password.
:param conn_id: Reference to the sqoop connection.
:type conn_id: str
:param verbose: Set sqoop to verbose.
:type verbose: bool
:param num_mappers: Number of map tasks to import in parallel.
:type num_mappers: str
:param properties: Properties to set via the -D argument
:type properties: dict
"""
def __init__(self, conn_id='sqoop_default', verbose=False,
num_mappers=None, hcatalog_database=None,
hcatalog_table=None, properties=None):
# No mutable types in the default parameters
if properties is None:
properties = {}
self.conn = self.get_connection(conn_id)
connection_parameters = self.conn.extra_dejson
self.job_tracker = connection_parameters.get('job_tracker', None)
self.namenode = connection_parameters.get('namenode', None)
self.libjars = connection_parameters.get('libjars', None)
self.files = connection_parameters.get('files', None)
self.archives = connection_parameters.get('archives', None)
self.password_file = connection_parameters.get('password_file', None)
self.hcatalog_database = hcatalog_database
self.hcatalog_table = hcatalog_table
self.verbose = verbose
self.num_mappers = num_mappers
self.properties = properties
def get_conn(self):
pass
def cmd_mask_password(self, cmd):
try:
password_index = cmd.index('--password')
cmd[password_index + 1] = 'MASKED'
except ValueError:
self.log.debug("No password in sqoop cmd")
return cmd
def Popen(self, cmd, **kwargs):
"""
Remote Popen
:param cmd: command to remotely execute
:param kwargs: extra arguments to Popen (see subprocess.Popen)
:return: handle to subprocess
"""
self.log.info("Executing command: %s", ' '.join(cmd))
sp = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
**kwargs)
for line in iter(sp.stdout):
self.log.info(line.strip())
sp.wait()
self.log.info("Command exited with return code %s", sp.returncode)
if sp.returncode:
raise AirflowException("Sqoop command failed: %s", ' '.join(cmd))
def _prepare_command(self, export=False):
if export:
connection_cmd = ["sqoop", "export"]
else:
connection_cmd = ["sqoop", "import"]
if self.verbose:
connection_cmd += ["--verbose"]
if self.job_tracker:
connection_cmd += ["-jt", self.job_tracker]
if self.conn.login:
connection_cmd += ["--username", self.conn.login]
if self.conn.password:
connection_cmd += ["--password", self.conn.password]
if self.password_file:
connection_cmd += ["--password-file", self.password_file]
if self.libjars:
connection_cmd += ["-libjars", self.libjars]
if self.files:
connection_cmd += ["-files", self.files]
if self.namenode:
connection_cmd += ["-fs", self.namenode]
if self.archives:
connection_cmd += ["-archives", self.archives]
if self.num_mappers:
connection_cmd += ["--num-mappers", str(self.num_mappers)]
if self.hcatalog_database:
connection_cmd += ["--hcatalog-database", self.hcatalog_database]
if self.hcatalog_table:
connection_cmd += ["--hcatalog-table", self.hcatalog_table]
for key, value in self.properties.items():
connection_cmd += ["-D", "{}={}".format(key, value)]
connection_cmd += ["--connect", "{}:{}/{}".format(
self.conn.host,
self.conn.port,
self.conn.schema
)]
return connection_cmd
def _get_export_format_argument(self, file_type='text'):
if file_type == "avro":
return ["--as-avrodatafile"]
elif file_type == "sequence":
return ["--as-sequencefile"]
elif file_type == "parquet":
return ["--as-parquetfile"]
elif file_type == "text":
return ["--as-textfile"]
else:
raise AirflowException("Argument file_type should be 'avro', "
"'sequence', 'parquet' or 'text'.")
def _import_cmd(self, target_dir, append, file_type, split_by, direct,
driver):
cmd = self._prepare_command(export=False)
cmd += ["--target-dir", target_dir]
if append:
cmd += ["--append"]
cmd += self._get_export_format_argument(file_type)
if split_by:
cmd += ["--split-by", split_by]
if direct:
cmd += ["--direct"]
if driver:
cmd += ["--driver", driver]
return cmd
def import_table(self, table, target_dir, append=False, file_type="text",
columns=None, split_by=None, where=None, direct=False,
driver=None):
"""
Imports table from remote location to target dir. Arguments are
copies of direct sqoop command line arguments
:param table: Table to read
:param target_dir: HDFS destination dir
:param append: Append data to an existing dataset in HDFS
:param file_type: "avro", "sequence", "text" or "parquet".
Imports data to into the specified format. Defaults to text.
:param columns: <col,col,col…> Columns to import from table
:param split_by: Column of the table used to split work units
:param where: WHERE clause to use during import
:param direct: Use direct connector if exists for the database
:param driver: Manually specify JDBC driver class to use
"""
cmd = self._import_cmd(target_dir, append, file_type, split_by, direct,
driver)
cmd += ["--table", table]
if columns:
cmd += ["--columns", columns]
if where:
cmd += ["--where", where]
self.Popen(cmd)
def import_query(self, query, target_dir,
append=False, file_type="text",
split_by=None, direct=None, driver=None):
"""
Imports a specific query from the rdbms to hdfs
:param query: Free format query to run
:param target_dir: HDFS destination dir
:param append: Append data to an existing dataset in HDFS
:param file_type: "avro", "sequence", "text" or "parquet"
Imports data to hdfs into the specified format. Defaults to text.
:param split_by: Column of the table used to split work units
:param direct: Use direct import fast path
:param driver: Manually specify JDBC driver class to use
"""
cmd = self._import_cmd(target_dir, append, file_type, split_by, direct,
driver)
cmd += ["--query", query]
self.Popen(cmd)
def _export_cmd(self, table, export_dir, input_null_string,
input_null_non_string, staging_table, clear_staging_table,
enclosed_by, escaped_by, input_fields_terminated_by,
input_lines_terminated_by, input_optionally_enclosed_by,
batch, relaxed_isolation):
cmd = self._prepare_command(export=True)
if input_null_string:
cmd += ["--input-null-string", input_null_string]
if input_null_non_string:
cmd += ["--input-null-non-string", input_null_non_string]
if staging_table:
cmd += ["--staging-table", staging_table]
if clear_staging_table:
cmd += ["--clear-staging-table"]
if enclosed_by:
cmd += ["--enclosed-by", enclosed_by]
if escaped_by:
cmd += ["--escaped-by", escaped_by]
if input_fields_terminated_by:
cmd += ["--input-fields-terminated-by", input_fields_terminated_by]
if input_lines_terminated_by:
cmd += ["--input-lines-terminated-by", input_lines_terminated_by]
if input_optionally_enclosed_by:
cmd += ["--input-optionally-enclosed-by",
input_optionally_enclosed_by]
if batch:
cmd += ["--batch"]
if relaxed_isolation:
cmd += ["--relaxed-isolation"]
if export_dir:
cmd += ["--export-dir", export_dir]
# The required option
cmd += ["--table", table]
return cmd
def export_table(self, table, export_dir, input_null_string,
input_null_non_string, staging_table,
clear_staging_table, enclosed_by,
escaped_by, input_fields_terminated_by,
input_lines_terminated_by,
input_optionally_enclosed_by, batch,
relaxed_isolation):
"""
Exports Hive table to remote location. Arguments are copies of direct
sqoop command line Arguments
:param table: Table remote destination
:param export_dir: Hive table to export
:param input_null_string: The string to be interpreted as null for
string columns
:param input_null_non_string: The string to be interpreted as null
for non-string columns
:param staging_table: The table in which data will be staged before
being inserted into the destination table
:param clear_staging_table: Indicate that any data present in the
staging table can be deleted
:param enclosed_by: Sets a required field enclosing character
:param escaped_by: Sets the escape character
:param input_fields_terminated_by: Sets the field separator character
:param input_lines_terminated_by: Sets the end-of-line character
:param input_optionally_enclosed_by: Sets a field enclosing character
:param batch: Use batch mode for underlying statement execution
:param relaxed_isolation: Transaction isolation to read uncommitted
for the mappers
"""
cmd = self._export_cmd(table, export_dir, input_null_string,
input_null_non_string, staging_table,
clear_staging_table, enclosed_by, escaped_by,
input_fields_terminated_by,
input_lines_terminated_by,
input_optionally_enclosed_by, batch,
relaxed_isolation)
self.Popen(cmd)
| apache-2.0 |
apanimesh061/YDC | Python Files/dbscan_labels.py | 1 | 1206 | #-------------------------------------------------------------------------------
# Name: dbscan_labels
# Purpose: Reading the DBSCAN Cluster labels
#
# Author: Animesh Pandey
#
# Created: 20/04/2014
# Copyright: (c) Animesh Pandey 2014
#-------------------------------------------------------------------------------
##with open('dbscan_labels.dat', 'r') as f:
## content = f.readlines()
##
##labels = [int(float(e.strip())) for e in content]
##
##print labels
import json, sys, csv, urllib2
filename = 'review.csv'
ofile = open(filename, "w+")
count = 0
with open(filename[:-4] + '.json') as f:
for l in f:
count += 1
line = l.strip()
json_data = json.loads(line)
csv_string = []
for k in json_data.keys()[1:]:
if not isinstance(json_data[k], unicode):
csv_string.append(str(json_data[k]))
else:
csv_string.append(str(json_data[k].encode('utf-8').decode('ascii', 'ignore')))
row = ','.join(['"' + str(i).encode('utf-8') + '"' for i in csv_string])
ofile.write(row.replace('\n', '') + '\n')
if count == 100:
sys.exit(0)
ofile.close()
| mit |
loafbaker/django_ecommerce2 | orders/mixins.py | 1 | 4637 | from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.core.validators import validate_email
from django.utils.decorators import method_decorator
from carts.mixins import TokenMixin
from carts.models import Cart
from .models import UserCheckout, Order
User = get_user_model()
# API Mixins
class UserCheckoutAPIMixin(TokenMixin):
def user_failure(self, message=None):
data = {
'message': 'There was an error. Please try again.',
'success': False
}
if message:
data['message'] = message
return data
def get_checkout_data(self, user=None, email=None):
if email:
email = email.lower()
data = {}
user_checkout = None
if user is not None and user.is_authenticated():
if email is not None and email != user.email:
data = self.user_failure(message='The user data conflicts to the authenticated user. Please try again.')
else:
user_checkout, created = UserCheckout.objects.get_or_create(user=user, email=user.email)
elif email:
user_exists = User.objects.filter(email=email).exists()
if user_exists:
data = self.user_failure(message='This user already exists. Please login to continue.')
else:
try:
validate_email(email)
email_is_valid = True
except:
email_is_valid = False
if email_is_valid:
user_checkout, created = UserCheckout.objects.get_or_create(email=email)
else:
data = self.user_failure(message='There was an error when parsing the data. Please enter a valid email.')
else:
data = self.user_failure()
if user_checkout:
data['success'] = True
data['braintree_id'] = user_checkout.braintree_id
data['user_checkout_id'] = user_checkout.id
# Create custom token
data['user_checkout_token'] = self.create_token(data)
# Do not show extra data for user checkout
del data['braintree_id']
# Auxiliary token
data['braintree_client_token'] = user_checkout.get_client_token()
return data
# Mixins
class LoginRequiredMixin(object):
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(request, *args, **kwargs)
class CartOrderMixin(object):
def get_order(self, *args, **kwargs):
cart = self.get_cart()
order_id = self.request.session.get('order_id')
if order_id:
new_order = Order.objects.get(id=order_id)
else:
new_order = Order.objects.create(cart=cart)
self.request.session['order_id'] = new_order.id
return new_order
def get_cart(self, *args, **kwargs):
cart_id = self.request.session.get('cart_id')
if cart_id is None:
cart = Cart()
cart.save()
cart_id = cart.id
self.request.session['cart_id'] = cart_id
cart = Cart.objects.get(id=cart_id)
if self.request.user.is_authenticated(): # Login user
# if the cart is not belong to the current login user,
# start a new cart
if cart.user is not None and cart.user != self.request.user:
cart = Cart()
cart.save()
self.request.session['cart_id'] = cart.id
cart.user = self.request.user
cart.save()
else: # Guest user
if cart.user:
pass # Required Login or remind user to start a new session
return cart
class UserCheckoutMixin(object):
def get_user_checkout(self, *args, **kwargs):
user_checkout_id = self.request.session.get('user_checkout_id')
if self.request.user.is_authenticated():
user_checkout, created = UserCheckout.objects.get_or_create(email=self.request.user.email)
if created: # Do not validate if the user and the email match
user_checkout.user = self.request.user
user_checkout.save()
if user_checkout_id != user_checkout.id:
self.request.session['user_checkout_id'] = user_checkout.id
elif user_checkout_id:
user_checkout = UserCheckout.objects.get(id=user_checkout_id)
else:
return None
return user_checkout | mit |
koushikcgit/xen | tools/python/genwrap.py | 49 | 11753 | #!/usr/bin/python
import sys,os
import idl
(TYPE_DEFBOOL, TYPE_BOOL, TYPE_INT, TYPE_UINT, TYPE_STRING, TYPE_ARRAY, TYPE_AGGREGATE) = range(7)
def py_type(ty):
if ty == idl.bool:
return TYPE_BOOL
if ty.typename == "libxl_defbool":
return TYPE_DEFBOOL
if isinstance(ty, idl.Enumeration):
return TYPE_UINT
if isinstance(ty, idl.Number):
if ty.signed:
return TYPE_INT
else:
return TYPE_UINT
if isinstance(ty, idl.Array):
return TYPE_ARRAY
if isinstance(ty, idl.Aggregate):
return TYPE_AGGREGATE
if ty == idl.string:
return TYPE_STRING
return None
def py_wrapstruct(ty):
l = []
l.append('typedef struct {')
l.append(' PyObject_HEAD;')
l.append(' %s obj;'%ty.typename);
l.append('}Py_%s;'%ty.rawname)
l.append('')
return "\n".join(l) + "\n"
def fsanitize(name):
"Sanitise a function name given a C type"
ret = '_'.join(name.split())
return ret.replace('*', 'ptr')
def py_decls(ty):
l = []
if isinstance(ty, idl.Aggregate):
l.append('_hidden Py_%s *Py%s_New(void);\n'%(ty.rawname, ty.rawname))
l.append('_hidden int Py%s_Check(PyObject *self);\n'%ty.rawname)
for f in ty.fields:
if py_type(f.type) is not None:
continue
if py_type(f.type) == TYPE_DEFBOOL:
continue
if ty.marshal_out():
l.append('_hidden PyObject *attrib__%s_get(%s *%s);'%(\
fsanitize(f.type.typename), f.type.typename, f.name))
if ty.marshal_in():
l.append('_hidden int attrib__%s_set(PyObject *v, %s *%s);'%(\
fsanitize(f.type.typename), f.type.typename, f.name))
return '\n'.join(l) + "\n"
def py_attrib_get(ty, f):
t = py_type(f.type)
l = []
l.append('static PyObject *py_%s_%s_get(Py_%s *self, void *priv)'%(ty.rawname, f.name, ty.rawname))
l.append('{')
if t == TYPE_BOOL:
l.append(' PyObject *ret;')
l.append(' ret = (self->obj.%s) ? Py_True : Py_False;'%f.name)
l.append(' Py_INCREF(ret);')
l.append(' return ret;')
elif t == TYPE_DEFBOOL:
l.append(' return genwrap__defbool_get(&self->obj.%s);'%f.name)
elif t == TYPE_INT:
l.append(' return genwrap__ll_get(self->obj.%s);'%f.name)
elif t == TYPE_UINT:
l.append(' return genwrap__ull_get(self->obj.%s);'%f.name)
elif t == TYPE_STRING:
l.append(' return genwrap__string_get(&self->obj.%s);'%f.name)
elif t == TYPE_AGGREGATE or t == TYPE_ARRAY:
l.append(' PyErr_SetString(PyExc_NotImplementedError, "Getting %s");'%ty.typename)
l.append(' return NULL;')
else:
tn = f.type.typename
l.append(' return attrib__%s_get((%s *)&self->obj.%s);'%(fsanitize(tn), tn, f.name))
l.append('}')
return '\n'.join(l) + "\n\n"
def py_attrib_set(ty, f):
t = py_type(f.type)
l = []
l.append('static int py_%s_%s_set(Py_%s *self, PyObject *v, void *priv)'%(ty.rawname, f.name, ty.rawname))
l.append('{')
if t == TYPE_BOOL:
l.append(' self->obj.%s = (NULL == v || Py_None == v || Py_False == v) ? 0 : 1;'%f.name)
l.append(' return 0;')
elif t == TYPE_DEFBOOL:
l.append(' return genwrap__defbool_set(v, &self->obj.%s);'%f.name)
elif t == TYPE_UINT or t == TYPE_INT:
l.append(' %slong long tmp;'%(t == TYPE_UINT and 'unsigned ' or ''))
l.append(' int ret;')
if t == TYPE_UINT:
l.append(' ret = genwrap__ull_set(v, &tmp, (%s)~0);'%f.type.typename)
else:
l.append(' ret = genwrap__ll_set(v, &tmp, (%s)~0);'%f.type.typename)
l.append(' if ( ret >= 0 )')
l.append(' self->obj.%s = tmp;'%f.name)
l.append(' return ret;')
elif t == TYPE_STRING:
l.append(' return genwrap__string_set(v, &self->obj.%s);'%f.name)
elif t == TYPE_AGGREGATE or t == TYPE_ARRAY:
l.append(' PyErr_SetString(PyExc_NotImplementedError, "Setting %s");'%ty.typename)
l.append(' return -1;')
else:
tn = f.type.typename
l.append(' return attrib__%s_set(v, (%s *)&self->obj.%s);'%(fsanitize(tn), tn, f.name))
l.append('}')
return '\n'.join(l) + "\n\n"
def py_object_def(ty):
l = []
if ty.dispose_fn is not None:
dtor = ' %s(&self->obj);\n'%ty.dispose_fn
else:
dtor = ''
funcs="""static void Py%(rawname)s_dealloc(Py_%(rawname)s *self)
{
%(dtor)s self->ob_type->tp_free((PyObject *)self);
}
static int Py%(rawname)s_init(Py_%(rawname)s *self, PyObject *args, PyObject *kwds)
{
memset(&self->obj, 0, sizeof(self->obj));
return genwrap__obj_init((PyObject *)self, args, kwds);
}
static PyObject *Py%(rawname)s_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
{
Py_%(rawname)s *self = (Py_%(rawname)s *)type->tp_alloc(type, 0);
if (self == NULL)
return NULL;
memset(&self->obj, 0, sizeof(self->obj));
return (PyObject *)self;
}
"""%{'rawname': ty.rawname, 'dtor': dtor}
l.append('static PyGetSetDef Py%s_getset[] = {'%ty.rawname)
for f in ty.fields:
if f.type.private:
continue
l.append(' { .name = "%s", '%f.name)
if ty.marshal_out():
l.append(' .get = (getter)py_%s_%s_get, '%(ty.rawname, f.name))
else:
l.append(' .get = (getter)NULL, ')
if ty.marshal_in():
l.append(' .set = (setter)py_%s_%s_set,'%(ty.rawname, f.name))
else:
l.append(' .set = (setter)NULL,')
l.append(' },')
l.append(' { .name = NULL }')
l.append('};')
struct="""
static PyTypeObject Py%s_Type= {
PyObject_HEAD_INIT(NULL)
0,
PKG ".%s",
sizeof(Py_%s),
0,
(destructor)Py%s_dealloc, /* tp_dealloc */
NULL, /* tp_print */
NULL, /* tp_getattr */
NULL, /* tp_setattr */
NULL, /* tp_compare */
NULL, /* tp_repr */
NULL, /* tp_as_number */
NULL, /* tp_as_sequence */
NULL, /* tp_as_mapping */
NULL, /* tp_hash */
NULL, /* tp_call */
NULL, /* tp_str */
NULL, /* tp_getattro */
NULL, /* tp_setattro */
NULL, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /* tp_flags */
"%s", /* tp_doc */
NULL, /* tp_traverse */
NULL, /* tp_clear */
NULL, /* tp_richcompare */
0, /* tp_weaklistoffset */
NULL, /* tp_iter */
NULL, /* tp_iternext */
NULL, /* tp_methods */
NULL, /* tp_members */
Py%s_getset, /* tp_getset */
NULL, /* tp_base */
NULL, /* tp_dict */
NULL, /* tp_descr_get */
NULL, /* tp_descr_set */
0, /* tp_dictoffset */
(initproc)Py%s_init, /* tp_init */
NULL, /* tp_alloc */
Py%s_new, /* tp_new */
};
Py_%s *Py%s_New(void)
{
return (Py_%s *)Py%s_new(&Py%s_Type, NULL, NULL);
}
int Py%s_Check(PyObject *self)
{
return (self->ob_type == &Py%s_Type);
}
"""%tuple(ty.rawname for x in range(15))
return funcs + '\n'.join(l) + "\n" + struct
def py_initfuncs(types):
l = []
l.append('void genwrap__init(PyObject *m)')
l.append('{')
for ty in types:
if isinstance(ty, idl.Enumeration):
for v in ty.values:
l.append(' PyModule_AddIntConstant(m, "%s", %s);' % (v.rawname, v.name))
elif isinstance(ty, idl.Aggregate):
l.append(' if (PyType_Ready(&Py%s_Type) >= 0) {'%ty.rawname)
l.append(' Py_INCREF(&Py%s_Type);'%ty.rawname)
l.append(' PyModule_AddObject(m, "%s", (PyObject *)&Py%s_Type);'%(ty.rawname, ty.rawname))
l.append(' }')
else:
raise NotImplementedError("unknown type %s (%s)" % (ty.typename, type(ty)))
l.append('}')
return '\n'.join(l) + "\n\n"
def tree_frob(types):
ret = types[:]
for ty in [ty for ty in ret if isinstance(ty, idl.Aggregate)]:
ty.fields = filter(lambda f:f.name is not None and f.type.typename is not None, ty.fields)
return ret
if __name__ == '__main__':
if len(sys.argv) < 4:
print >>sys.stderr, "Usage: genwrap.py <idl> <decls> <defns>"
sys.exit(1)
(_,types) = idl.parse(sys.argv[1])
types = tree_frob(types)
decls = sys.argv[2]
f = open(decls, 'w')
f.write("""#ifndef __PYXL_TYPES_H
#define __PYXL_TYPES_H
/*
* DO NOT EDIT.
*
* This file is autogenerated by
* "%s"
*/
#define PKG "xen.lowlevel.xl"
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1)
#define _hidden __attribute__((visibility("hidden")))
#define _protected __attribute__((visibility("protected")))
#else
#define _hidden
#define _protected
#endif
/* Initialise all types */
_hidden void genwrap__init(PyObject *m);
/* Generic type initialiser */
_hidden int genwrap__obj_init(PyObject *self, PyObject *args, PyObject *kwds);
/* Auto-generated get/set functions for simple data-types */
_hidden int genwrap__string_set(PyObject *v, char **str);
_hidden PyObject *genwrap__string_get(char **str);
_hidden PyObject *genwrap__ull_get(unsigned long long val);
_hidden int genwrap__ull_set(PyObject *v, unsigned long long *val, unsigned long long mask);
_hidden PyObject *genwrap__ll_get(long long val);
_hidden int genwrap__ll_set(PyObject *v, long long *val, long long mask);
_hidden PyObject *genwrap__defbool_get(libxl_defbool *db);
_hidden int genwrap__defbool_set(PyObject *v, libxl_defbool *db);
""" % " ".join(sys.argv))
for ty in [ty for ty in types if isinstance(ty, idl.Aggregate)]:
f.write('/* Internal API for %s wrapper */\n'%ty.typename)
f.write(py_wrapstruct(ty))
f.write(py_decls(ty))
f.write('\n')
f.write('#endif /* __PYXL_TYPES_H */\n')
f.close()
defns = sys.argv[3]
f = open(defns, 'w')
f.write("""/*
* DO NOT EDIT.
*
* This file is autogenerated by
* "%s"
*/
#include <Python.h>
#include <string.h>
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
#include "libxl.h" /* gah */
#include "%s"
""" % tuple((' '.join(sys.argv),) + (os.path.split(decls)[-1:]),))
for ty in types:
if ty.private:
continue
if isinstance(ty, idl.Aggregate):
f.write('/* Attribute get/set functions for %s */\n'%ty.typename)
for a in ty.fields:
if a.type.private:
continue
if ty.marshal_out():
f.write(py_attrib_get(ty,a))
if ty.marshal_in():
f.write(py_attrib_set(ty,a))
f.write(py_object_def(ty))
f.write(py_initfuncs(types))
f.close()
| gpl-2.0 |
dpgeorge/micropython | tests/wipy/uart_irq.py | 14 | 3569 | """
UART IRQ test for the CC3200 based boards.
"""
from machine import UART
import os
import time
mch = os.uname().machine
if "LaunchPad" in mch:
uart_pins = [
[("GP12", "GP13"), ("GP12", "GP13", "GP7", "GP6")],
[("GP16", "GP17"), ("GP16", "GP17", "GP7", "GP6")],
]
elif "WiPy" in mch:
uart_pins = [
[("GP12", "GP13"), ("GP12", "GP13", "GP7", "GP6")],
[("GP16", "GP17"), ("GP16", "GP17", "GP7", "GP6")],
]
else:
raise Exception("Board not supported!")
# just in case we have stdio duplicated on any of the uarts
os.dupterm(None)
uart0 = UART(0, 1000000, pins=uart_pins[0][0])
uart1 = UART(1, 1000000, pins=uart_pins[1][0])
uart0_int_count = 0
uart1_int_count = 0
def uart0_handler(uart_o):
global uart0_irq
global uart0_int_count
if uart0_irq.flags() & UART.RX_ANY:
uart0_int_count += 1
def uart1_handler(uart_o):
global uart1_irq
global uart1_int_count
if uart1_irq.flags() & UART.RX_ANY:
uart1_int_count += 1
uart0_irq = uart0.irq(trigger=UART.RX_ANY, handler=uart0_handler)
uart1_irq = uart1.irq(trigger=UART.RX_ANY, handler=uart1_handler)
uart0.write(b"123")
# wait for the characters to be received
while not uart1.any():
pass
time.sleep_us(100)
print(uart1.any() == 3)
print(uart1_int_count > 0)
print(uart1_irq.flags() == 0)
print(uart0_irq.flags() == 0)
print(uart1.read() == b"123")
uart1.write(b"12345")
# wait for the characters to be received
while not uart0.any():
pass
time.sleep_us(100)
print(uart0.any() == 5)
print(uart0_int_count > 0)
print(uart0_irq.flags() == 0)
print(uart1_irq.flags() == 0)
print(uart0.read() == b"12345")
# do it again
uart1_int_count = 0
uart0.write(b"123")
# wait for the characters to be received
while not uart1.any():
pass
time.sleep_us(100)
print(uart1.any() == 3)
print(uart1_int_count > 0)
print(uart1_irq.flags() == 0)
print(uart0_irq.flags() == 0)
print(uart1.read() == b"123")
# disable the interrupt
uart1_irq.disable()
# do it again
uart1_int_count = 0
uart0.write(b"123")
# wait for the characters to be received
while not uart1.any():
pass
time.sleep_us(100)
print(uart1.any() == 3)
print(uart1_int_count == 0) # no interrupt triggered this time
print(uart1_irq.flags() == 0)
print(uart0_irq.flags() == 0)
print(uart1.read() == b"123")
# enable the interrupt
uart1_irq.enable()
# do it again
uart1_int_count = 0
uart0.write(b"123")
# wait for the characters to be received
while not uart1.any():
pass
time.sleep_us(100)
print(uart1.any() == 3)
print(uart1_int_count > 0)
print(uart1_irq.flags() == 0)
print(uart0_irq.flags() == 0)
print(uart1.read() == b"123")
uart1_irq.init(trigger=UART.RX_ANY, handler=None) # No handler
# do it again
uart1_int_count = 0
uart0.write(b"123")
# wait for the characters to be received
while not uart1.any():
pass
time.sleep_us(100)
print(uart1.any() == 3)
print(uart1_int_count == 0) # no interrupt handler called
print(uart1_irq.flags() == 0)
print(uart0_irq.flags() == 0)
print(uart1.read() == b"123")
# check for memory leaks
for i in range(0, 1000):
uart0_irq = uart0.irq(trigger=UART.RX_ANY, handler=uart0_handler)
uart1_irq = uart1.irq(trigger=UART.RX_ANY, handler=uart1_handler)
# next ones must raise
try:
uart0_irq = uart0.irq(trigger=100, handler=uart0_handler)
except:
print("Exception")
try:
uart0_irq = uart0.irq(trigger=0)
except:
print("Exception")
try:
uart0_irq = uart0.irq(trigger=UART.RX_ANY, wake=Sleep.SUSPENDED)
except:
print("Exception")
uart0_irq.disable()
uart1_irq.disable()
| mit |
asm-products/movie-database-service | ani/lib/python2.7/site-packages/django/template/debug.py | 121 | 3754 | from django.template.base import Lexer, Parser, tag_re, NodeList, VariableNode, TemplateSyntaxError
from django.utils.encoding import force_text
from django.utils.html import escape
from django.utils.safestring import SafeData, EscapeData
from django.utils.formats import localize
from django.utils.timezone import template_localtime
class DebugLexer(Lexer):
def __init__(self, template_string, origin):
super(DebugLexer, self).__init__(template_string, origin)
def tokenize(self):
"Return a list of tokens from a given template_string"
result, upto = [], 0
for match in tag_re.finditer(self.template_string):
start, end = match.span()
if start > upto:
result.append(self.create_token(self.template_string[upto:start], (upto, start), False))
upto = start
result.append(self.create_token(self.template_string[start:end], (start, end), True))
upto = end
last_bit = self.template_string[upto:]
if last_bit:
result.append(self.create_token(last_bit, (upto, upto + len(last_bit)), False))
return result
def create_token(self, token_string, source, in_tag):
token = super(DebugLexer, self).create_token(token_string, in_tag)
token.source = self.origin, source
return token
class DebugParser(Parser):
def __init__(self, lexer):
super(DebugParser, self).__init__(lexer)
self.command_stack = []
def enter_command(self, command, token):
self.command_stack.append( (command, token.source) )
def exit_command(self):
self.command_stack.pop()
def error(self, token, msg):
return self.source_error(token.source, msg)
def source_error(self, source, msg):
e = TemplateSyntaxError(msg)
e.django_template_source = source
return e
def create_nodelist(self):
return DebugNodeList()
def create_variable_node(self, contents):
return DebugVariableNode(contents)
def extend_nodelist(self, nodelist, node, token):
node.source = token.source
super(DebugParser, self).extend_nodelist(nodelist, node, token)
def unclosed_block_tag(self, parse_until):
command, source = self.command_stack.pop()
msg = "Unclosed tag '%s'. Looking for one of: %s " % (command, ', '.join(parse_until))
raise self.source_error(source, msg)
def compile_filter_error(self, token, e):
if not hasattr(e, 'django_template_source'):
e.django_template_source = token.source
def compile_function_error(self, token, e):
if not hasattr(e, 'django_template_source'):
e.django_template_source = token.source
class DebugNodeList(NodeList):
def render_node(self, node, context):
try:
return node.render(context)
except Exception as e:
if not hasattr(e, 'django_template_source'):
e.django_template_source = node.source
raise
class DebugVariableNode(VariableNode):
def render(self, context):
try:
output = self.filter_expression.resolve(context)
output = template_localtime(output, use_tz=context.use_tz)
output = localize(output, use_l10n=context.use_l10n)
output = force_text(output)
except UnicodeDecodeError:
return ''
except Exception as e:
if not hasattr(e, 'django_template_source'):
e.django_template_source = self.source
raise
if (context.autoescape and not isinstance(output, SafeData)) or isinstance(output, EscapeData):
return escape(output)
else:
return output
| agpl-3.0 |
chrisfilda/edx_platform | common/djangoapps/reverification/migrations/0001_initial.py | 53 | 1644 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'MidcourseReverificationWindow'
db.create_table('reverification_midcoursereverificationwindow', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('course_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('start_date', self.gf('django.db.models.fields.DateTimeField')(default=None, null=True, blank=True)),
('end_date', self.gf('django.db.models.fields.DateTimeField')(default=None, null=True, blank=True)),
))
db.send_create_signal('reverification', ['MidcourseReverificationWindow'])
def backwards(self, orm):
# Deleting model 'MidcourseReverificationWindow'
db.delete_table('reverification_midcoursereverificationwindow')
models = {
'reverification.midcoursereverificationwindow': {
'Meta': {'object_name': 'MidcourseReverificationWindow'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['reverification'] | agpl-3.0 |
tdicola/SmartAlarmClock | SmartAlarmClock/www/temboo/Library/Google/Gmail/SendEmail.py | 2 | 5025 | # -*- coding: utf-8 -*-
###############################################################################
#
# SendEmail
# Sends an email using a specified Gmail account.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class SendEmail(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the SendEmail Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
Choreography.__init__(self, temboo_session, '/Library/Google/Gmail/SendEmail')
def new_input_set(self):
return SendEmailInputSet()
def _make_result_set(self, result, path):
return SendEmailResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return SendEmailChoreographyExecution(session, exec_id, path)
class SendEmailInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the SendEmail
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AttachmentName(self, value):
"""
Set the value of the AttachmentName input for this Choreo. ((optional, string) The name of the file to attach to the email.)
"""
InputSet._set_input(self, 'AttachmentName', value)
def set_AttachmentURL(self, value):
"""
Set the value of the AttachmentURL input for this Choreo. ((optional, string) URL of a hosted file that you wish to add as an attachment. Use this instead of a normal Attachment.)
"""
InputSet._set_input(self, 'AttachmentURL', value)
def set_Attachment(self, value):
"""
Set the value of the Attachment input for this Choreo. ((optional, string) The Base64 encoded contents of the file to attach to the email. Use this instead of AttachmentURL.)
"""
InputSet._set_input(self, 'Attachment', value)
def set_BCC(self, value):
"""
Set the value of the BCC input for this Choreo. ((optional, string) An email address to BCC on the email you're sending. Can be a comma separated list of email addresses.)
"""
InputSet._set_input(self, 'BCC', value)
def set_CC(self, value):
"""
Set the value of the CC input for this Choreo. ((optional, string) An email address to CC on the email you're sending. Can be a comma separated list of email addresses.)
"""
InputSet._set_input(self, 'CC', value)
def set_FromAddress(self, value):
"""
Set the value of the FromAddress input for this Choreo. ((conditional, string) The name and email address that the message is being sent from.)
"""
InputSet._set_input(self, 'FromAddress', value)
def set_MessageBody(self, value):
"""
Set the value of the MessageBody input for this Choreo. ((required, string) The message body for the email.)
"""
InputSet._set_input(self, 'MessageBody', value)
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((required, password) Your Gmail password.)
"""
InputSet._set_input(self, 'Password', value)
def set_Subject(self, value):
"""
Set the value of the Subject input for this Choreo. ((required, string) The subject line of the email.)
"""
InputSet._set_input(self, 'Subject', value)
def set_ToAddress(self, value):
"""
Set the value of the ToAddress input for this Choreo. ((required, string) The email address that you want to send an email to. Can be a comma separated list of email addresses.)
"""
InputSet._set_input(self, 'ToAddress', value)
def set_Username(self, value):
"""
Set the value of the Username input for this Choreo. ((required, string) Your full Google email address e.g., martha.temboo@gmail.com.)
"""
InputSet._set_input(self, 'Username', value)
class SendEmailResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the SendEmail Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Success(self):
"""
Retrieve the value for the "Success" output from this Choreo execution. ((boolean) Indicates the result of the SMTP operation. The value will be "true" for a successful request.)
"""
return self._output.get('Success', None)
class SendEmailChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return SendEmailResultSet(response, path)
| mit |
jennywoites/MUSSA | MUSSA_Flask/app/API_Rest/GeneradorPlanCarreras/GeneradorPLE/GeneradorRestricciones.py | 1 | 26404 | from app.API_Rest.GeneradorPlanCarreras.Constantes import *
from app.API_Rest.GeneradorPlanCarreras.my_utils import get_str_cuatrimestre, es_horario_valido_para_el_cuatrimestre
def generar_restriccion_la_materia_debe_cursarse_en_unico_cuatrimestre(arch, parametros):
arch.write(
"# La materia i se debe cursar en un unico cuatrimestre. Ademas, si es obligatoria, debe cursarse si o si." + ENTER + ENTER)
for id_materia in parametros.materias:
ecuacion = "prob += ("
for cuatrimestre in range(1, parametros.max_cuatrimestres + 1):
if cuatrimestre > 1:
ecuacion += " + "
variable = "Y_{}_{}".format(id_materia, get_str_cuatrimestre(cuatrimestre))
ecuacion += variable
ecuacion_complementaria = ecuacion
ecuacion += " <= 1)"
ecuacion_complementaria += " >= 1)"
if parametros.materias[id_materia].tipo == OBLIGATORIA:
arch.write(ecuacion_complementaria + ENTER)
arch.write(ecuacion + ENTER)
arch.write(ENTER)
arch.write(ENTER)
def generar_restriccion_valor_cuatrimestre_en_que_se_cursa_la_materia(arch, parametros):
arch.write("# Numero de cuatrimestre en que es cursada la materia" + ENTER + ENTER)
for id_materia in parametros.plan:
ecuacion = "prob += ("
for cuatrimestre in range(1, parametros.max_cuatrimestres + 1):
if cuatrimestre > 1:
ecuacion += " + "
variable = "Y_{}_{}".format(id_materia, get_str_cuatrimestre(cuatrimestre))
ecuacion += "{}*{}".format(cuatrimestre, variable)
variable_c_materia = "C{}".format(id_materia)
ecuacion_complementaria = ecuacion
ecuacion += "<= {})".format(variable_c_materia)
ecuacion_complementaria += ">= {})".format(variable_c_materia)
arch.write(ecuacion + ENTER)
arch.write(ecuacion_complementaria + ENTER)
arch.write(ENTER)
arch.write(ENTER)
def generar_restriccion_cuatrimestre_minimo_en_que_se_puede_cursar_la_materia(arch, parametros):
if not parametros.cuatrimestre_minimo_para_materia:
return
arch.write("# Valor minimo para el cuatrimestre de una materia debido a que"
"el final de una correlativa se encuentra pendiente y sera aprobado"
"en ese cuatrimestre minimo" + ENTER + ENTER)
for id_materia in parametros.cuatrimestre_minimo_para_materia:
variable_c_materia = "C{}".format(id_materia)
# El cuatrimestre minimo es el siguiente de cuando estara aprobada la materia
cuatrimestre_min = parametros.cuatrimestre_minimo_para_materia[id_materia] + 1
ecuacion = "prob += ({} >= {})".format(variable_c_materia, cuatrimestre_min)
arch.write(ecuacion + ENTER + ENTER)
arch.write(ENTER)
def generar_restriccion_correlativas(arch, parametros):
arch.write("# Los cuatrimestres de las correlativas deben ser menores (cuando la materia se cursa)" + ENTER + ENTER)
for id_materia in parametros.plan:
correlativas = parametros.plan[id_materia]
if not correlativas:
continue
materia = parametros.materias[id_materia]
for id_m_correlativa in correlativas:
if materia.tipo == OBLIGATORIA:
escribir_ecuacion_correlativa_depende_de_obligatoria(arch, parametros, id_materia, id_m_correlativa)
else:
escribir_ecuacion_correlativa_depende_de_electiva(arch, parametros, id_materia, id_m_correlativa)
arch.write(ENTER + ENTER)
def escribir_ecuacion_correlativa_depende_de_obligatoria(arch, parametros, id_materia, id_m_correlativa):
cuatri_materia = "C{}".format(id_materia)
cuatri_correlativa = "C{}".format(id_m_correlativa)
if parametros.materias[id_m_correlativa].tipo == OBLIGATORIA:
ecuacion = "prob += ({} >= {} + 1)".format(cuatri_correlativa, cuatri_materia)
arch.write(ecuacion + ENTER)
else:
sumatoria = obtener_sumatoria_Y_cuatrimestres_para_materia(parametros, parametros.materias[id_m_correlativa])
ajuste_electiva_no_cursada = "{} * (1 - ({}))".format(INFINITO, sumatoria)
ecuacion = "prob += ({} + {} >= {} + 1)".format(cuatri_correlativa, ajuste_electiva_no_cursada, cuatri_materia)
arch.write(ecuacion + ENTER)
def obtener_sumatoria_Y_cuatrimestres_para_materia(parametros, materia):
sumatoria = ""
for cuatrimestre in range(1, parametros.max_cuatrimestres + 1):
variable = "Y_{}_{}".format(materia.id_materia, get_str_cuatrimestre(cuatrimestre))
sumatoria += variable + " + "
return sumatoria[:-3]
def escribir_ecuacion_correlativa_depende_de_electiva(arch, parametros, id_materia, id_m_correlativa):
cuatri_materia = "C{}".format(id_materia)
cuatri_correlativa = "C{}".format(id_m_correlativa)
# Si la materia electiva primera se cursa, entonces el cuatrimestre debe ser mayor
sumatoria_correlativa = obtener_sumatoria_Y_cuatrimestres_para_materia(parametros,
parametros.materias[id_m_correlativa])
ajuste_electiva_no_cursada = "{} * (1 - ({}))".format(INFINITO, sumatoria_correlativa)
ec_correlativa = "{} + {}".format(cuatri_correlativa, ajuste_electiva_no_cursada)
sumatoria_primaria = obtener_sumatoria_Y_cuatrimestres_para_materia(parametros, parametros.materias[id_materia])
ajuste_electiva_primaria_no_cursada = "(1 * ({}))".format(sumatoria_primaria)
ec_primer_materia = "{} + {}".format(cuatri_materia, ajuste_electiva_primaria_no_cursada)
ecuacion = "prob += ({} >= {})".format(ec_correlativa, ec_primer_materia)
arch.write(ecuacion + ENTER)
# Si la materia electiva primera no se cursa, entonces no se puede cursar la que la tiene como correlativa
ecuacion = "prob += ({} <= {} * ({}))".format(cuatri_correlativa, INFINITO, sumatoria_primaria)
arch.write(ecuacion + ENTER)
def generar_restriccion_maxima_cant_materias_por_cuatrimestre(arch, parametros):
arch.write("# La cantidad de materias por cuatrimestre no puede superar un valor maximo" + ENTER + ENTER)
for cuatrimestre in range(1, parametros.max_cuatrimestres + 1):
ecuacion = "prob += ("
for id_materia in parametros.plan:
variable = "Y_{}_{}".format(id_materia, get_str_cuatrimestre(cuatrimestre))
ecuacion += variable + " + "
if not parametros.materia_trabajo_final:
ecuacion = ecuacion[:-2]
else:
for materia in parametros.materia_trabajo_final:
variable = "Y_TP_FINAL_{}_{}_{}".format(materia.id_materia, materia.codigo,
get_str_cuatrimestre(cuatrimestre))
ecuacion += variable + " + "
ecuacion = ecuacion[:-2]
ecuacion += " <= {})".format(parametros.max_cant_materias_por_cuatrimestre)
arch.write(ecuacion + ENTER)
arch.write(ENTER + ENTER)
def generar_restriccion_maximo_cuatrimestres_para_func_objetivo(arch, parametros):
arch.write("#TOTAL_CUATRIMESTRES es el maximo de los Ci de las materias"
"y de lo C_TP_FINAL_i de las materias de trabajo final" + ENTER + ENTER)
arch.write("prob += (TOTAL_CUATRIMESTRES >= 0)" + ENTER + ENTER)
for id_materia in parametros.plan:
var_materia = "C{}".format(id_materia)
arch.write("prob += ({} <= TOTAL_CUATRIMESTRES)".format(var_materia) + ENTER)
arch.write("prob += (-{} <= TOTAL_CUATRIMESTRES)".format(var_materia) + ENTER)
arch.write(ENTER)
for materia in parametros.materia_trabajo_final:
var_materia = "C_TP_FINAL_{}_{}".format(materia.id_materia, materia.codigo)
arch.write("prob += ({} <= TOTAL_CUATRIMESTRES)".format(var_materia) + ENTER)
arch.write("prob += (-{} <= TOTAL_CUATRIMESTRES)".format(var_materia) + ENTER)
arch.write(ENTER)
arch.write(ENTER)
def generar_restriccion_calculo_creditos_obtenidos_por_cuatrimestre(arch, parametros):
arch.write("# Calculo de creditos al terminar cada cuatrimestre" + ENTER + ENTER)
for i in range(1, parametros.max_cuatrimestres + 1):
ecuacion = "prob += ("
for id_materia in parametros.plan:
materia = parametros.materias[id_materia]
variable_Y = "Y_{}_{}".format(id_materia, get_str_cuatrimestre(i))
ecuacion += "{}*{} + ".format(materia.creditos, variable_Y)
if not parametros.materia_trabajo_final:
ecuacion = ecuacion[:-2] # elimino el ultimo + agregado
else:
for materia in parametros.materia_trabajo_final:
variable_Y = "Y_TP_FINAL_{}_{}_{}".format(materia.id_materia, materia.codigo, get_str_cuatrimestre(i))
ecuacion += "{}*{} + ".format(materia.creditos, variable_Y)
ecuacion = ecuacion[:-2] # elimino el ultimo + agregado
if i > 1:
ecuacion += "+ CRED{}".format(get_str_cuatrimestre(i - 1))
else:
ecuacion += "+ {}".format(parametros.creditos_preacumulados)
arch.write(ecuacion + " <= CRED{})".format(get_str_cuatrimestre(i)) + ENTER)
arch.write(ecuacion + " >= CRED{})".format(get_str_cuatrimestre(i)) + ENTER)
arch.write(ENTER)
def generar_restriccion_maxima_cantidad_horas_extra_cursada(arch, parametros):
arch.write("# Maxima cantidad de horas extra cursada. El calculo es por una semana en medias"
"horas de cursada, pero es la misma restriccion para todo el cuatrimestre" + ENTER + ENTER)
for i in range(1, parametros.max_cuatrimestres + 1):
ecuacion = "prob += ("
for id_materia in parametros.plan:
materia = parametros.materias[id_materia]
variable_Y = "Y_{}_{}".format(id_materia, get_str_cuatrimestre(i))
ecuacion += "{}*{} + ".format(materia.medias_horas_extras_cursada, variable_Y)
if not parametros.materia_trabajo_final:
ecuacion = ecuacion[:-2] # elimino el ultimo + agregado
else:
for materia in parametros.materia_trabajo_final:
variable_Y = "Y_TP_FINAL_{}_{}_{}".format(materia.id_materia, materia.codigo, get_str_cuatrimestre(i))
ecuacion += "{}*{} + ".format(materia.medias_horas_extras_cursada, variable_Y)
ecuacion = ecuacion[:-2] # elimino el ultimo + agregado
arch.write(ecuacion + " <= {})".format(parametros.max_horas_extras) + ENTER)
arch.write(ENTER)
def generar_restriccion_creditos_minimos_ya_obtenidos_para_cursar(arch, parametros):
arch.write(
"# Restricciones sobre aquellas materias que requieren creditos minimos para poder cursar" + ENTER + ENTER)
for id_materia in parametros.plan:
materia = parametros.materias[id_materia]
if materia.creditos_minimos_aprobados == 0:
continue
for i in range(1, parametros.max_cuatrimestres + 1):
creditos = "CRED{}".format(get_str_cuatrimestre(i - 1)) if i > 1 else parametros.creditos_preacumulados
var_Y = "Y_{}_{}".format(id_materia, get_str_cuatrimestre(i))
arch.write("prob += ({}*{} <= {})".format(materia.creditos_minimos_aprobados, var_Y, creditos) + ENTER)
arch.write(ENTER)
arch.write(ENTER)
def generar_restriccion_si_se_elige_un_curso_se_cursa_su_horario_completo(arch, parametros):
arch.write(
"#Si la materia se cursa en ese cuatrimestre en ese curso en particular, entonces se deben cursar todos los horarios del mismo" + ENTER + ENTER)
for cuatri in range(1, parametros.max_cuatrimestres + 1):
for id_materia in parametros.horarios:
cursos = parametros.horarios[id_materia]
for curso in cursos:
H = "H_{}_{}_{}".format(id_materia, curso.id_curso, get_str_cuatrimestre(cuatri))
if not es_horario_valido_para_el_cuatrimestre(parametros, curso, cuatri):
continue
for c_horario in curso.horarios:
dia = c_horario.dia
franjas = c_horario.get_franjas_utilizadas()
for franja in franjas:
R = "R_{}_{}_{}_{}_{}".format(id_materia, curso.id_curso, dia, franja,
get_str_cuatrimestre(cuatri))
arch.write("prob += ({} <= {})".format(H, R) + ENTER)
arch.write("prob += ({} >= {})".format(H, R) + ENTER)
arch.write(ENTER)
def generar_restriccion_solo_puede_cursarse_en_un_lugar_al_mismo_tiempo(arch, parametros):
arch.write(
"#No hay giratiempos: Solo puede cursarse una materia en un unico curso en el mismo horario" + ENTER + ENTER)
for cuatrimestre in range(1, parametros.max_cuatrimestres + 1):
for dia in parametros.dias:
for franja in range(parametros.franja_minima, parametros.franja_maxima + 1):
ec_suma = ""
for id_materia in parametros.horarios:
for curso in parametros.horarios[id_materia]:
if not es_horario_valido_para_el_cuatrimestre(parametros, curso, cuatrimestre):
continue
if es_horario_restriccion_valido(curso, dia, franja):
ec_suma += "R_{}_{}_{}_{}_{} + ".format(id_materia, curso.id_curso, dia, franja,
get_str_cuatrimestre(cuatrimestre))
ec_suma = ec_suma[:-3]
if not ec_suma:
ec_suma = "0"
ecuacion = "prob += ({}_{}_{} {} ".format(dia, franja, get_str_cuatrimestre(cuatrimestre),
'{}') + ec_suma + ")"
arch.write(ecuacion.format("<=") + ENTER)
arch.write(ecuacion.format(">=") + ENTER)
arch.write(ENTER)
def es_horario_restriccion_valido(curso, dia, franja):
for c_horario in curso.horarios:
c_dia = c_horario.dia
franjas = c_horario.get_franjas_utilizadas()
if dia == c_dia and franja in franjas:
return True
return False
def generar_restriccion_si_la_materia_no_se_cursa_en_ese_cuatrimestre_no_se_cursa_ninguno_de_sus_cursos(arch,
parametros):
arch.write(
"# Si la materia no se cursa ese cuatrimestre, entonces no puede cursarse en ninguno de los cursos de ese cuatrimestre" + ENTER + ENTER)
for cuatrimestre in range(1, parametros.max_cuatrimestres + 1):
for id_materia in parametros.horarios:
for curso in parametros.horarios[id_materia]:
Y = "Y_{}_{}".format(id_materia, get_str_cuatrimestre(cuatrimestre))
R = "H_{}_{}_{}".format(id_materia, curso.id_curso, get_str_cuatrimestre(cuatrimestre))
ecuacion = "prob += ({} >= {})".format(Y, R)
arch.write(ecuacion + ENTER)
arch.write(ENTER)
def generar_restriccion_la_materia_no_puede_cursarse_en_mas_de_un_curso(arch, parametros):
arch.write("# La materia no puede cursarse en mas de un curso en el cuatrimestre" + ENTER + ENTER)
for cuatrimestre in range(1, parametros.max_cuatrimestres + 1):
for id_materia in parametros.horarios:
ecuacion = ""
for curso in parametros.horarios[id_materia]:
ecuacion += "H_{}_{}_{} + ".format(id_materia, curso.id_curso, get_str_cuatrimestre(cuatrimestre))
Y = "Y_{}_{}".format(id_materia, get_str_cuatrimestre(cuatrimestre))
arch.write("prob += ({} <= {})".format(Y, ecuacion[:-3]) + ENTER)
arch.write("prob += ({} >= {})".format(Y, ecuacion[:-3]) + ENTER)
arch.write(ENTER)
def generar_restriccion_creditos_minimos_electivas(arch, parametros):
if parametros.creditos_minimos_electivas == 0:
return
arch.write("#Se debe realizar un minimo de creditos de materias electivas" + ENTER + ENTER)
ecuacion = "prob += ("
for cuatrimestre in range(1, parametros.max_cuatrimestres + 1):
for id_materia in parametros.materias:
materia = parametros.materias[id_materia]
if materia.tipo == OBLIGATORIA:
continue
Y = "Y_{}_{}".format(id_materia, get_str_cuatrimestre(cuatrimestre))
ecuacion += Y + "*" + str(materia.creditos) + " + "
ecuacion = ecuacion[:-3]
# FIXME: Esto no anda por algun motivo. Solucion temporal, colocar que no supere los creditos en electivas por mas de 6
# arch.write(ecuacion + " <= CREDITOS_ELECTIVAS)" + ENTER)
# arch.write(ecuacion + " >= CREDITOS_ELECTIVAS)" + ENTER)
# arch.write("prob += (CREDITOS_ELECTIVAS >= " + str(parametros.creditos_minimos_electivas) + ")" + ENTER + ENTER)
CREDITOS_UNA_MATERIA_EXTRA = 6
arch.write(ecuacion + " >= " + str(parametros.creditos_minimos_electivas) + ")" + ENTER + ENTER)
arch.write(ecuacion + " <= " + str(parametros.creditos_minimos_electivas + CREDITOS_UNA_MATERIA_EXTRA) + ")"
+ ENTER + ENTER)
def generar_restriccion_creditos_minimos_por_tematica(arch, parametros):
if not parametros.creditos_minimos_tematicas:
return
arch.write("#Se debe realizar un minimo de creditos de materias electivas"
"con diferentes tematicas" + ENTER + ENTER)
for tematica in parametros.creditos_minimos_tematicas:
ecuacion = "prob += ("
for cuatrimestre in range(1, parametros.max_cuatrimestres + 1):
for id_materia in parametros.materias:
materia = parametros.materias[id_materia]
if materia.tipo == OBLIGATORIA or not tematica in materia.tematicas_principales:
continue
Y = "Y_{}_{}".format(id_materia, get_str_cuatrimestre(cuatrimestre))
ecuacion += Y + "*" + str(materia.creditos) + " + "
ecuacion = ecuacion[:-3]
arch.write(ecuacion + " >= " + str(parametros.creditos_minimos_tematicas[tematica]) + ")" + ENTER + ENTER)
arch.write(ENTER)
def generar_restriccion_no_todos_los_cursos_se_dictan_ambos_cuatrimestres(arch, parametros):
arch.write("# No todos los cursos se dictan ambos cuatrimestres" + ENTER + ENTER)
for cuatrimestre in range(1, parametros.max_cuatrimestres + 1):
for id_materia in parametros.horarios:
for curso in parametros.horarios[id_materia]:
if es_horario_valido_para_el_cuatrimestre(parametros, curso, cuatrimestre):
continue
variable = "H_{}_{}_{}".format(id_materia, curso.id_curso, get_str_cuatrimestre(cuatrimestre))
arch.write("prob += ({} <= 0)".format(variable) + ENTER)
arch.write("prob += ({} >= 0)".format(variable) + ENTER)
arch.write(ENTER)
def generar_restriccion_horarios_cursos(arch, parametros):
generar_restriccion_si_se_elige_un_curso_se_cursa_su_horario_completo(arch, parametros)
generar_restriccion_solo_puede_cursarse_en_un_lugar_al_mismo_tiempo(arch, parametros)
generar_restriccion_si_la_materia_no_se_cursa_en_ese_cuatrimestre_no_se_cursa_ninguno_de_sus_cursos(
arch,
parametros
)
generar_restriccion_la_materia_no_puede_cursarse_en_mas_de_un_curso(arch, parametros)
generar_restriccion_no_todos_los_cursos_se_dictan_ambos_cuatrimestres(arch, parametros)
def generar_restriccion_el_trabajo_debe_cursarse_en_unico_cuatrimestre(arch, parametros):
arch.write("# La El trabajo final debe cursar (cada una de sus partes) "
"en un unico cuatrimestre. Ademas, es obligatorio" + ENTER + ENTER)
for materia in parametros.materia_trabajo_final:
ecuacion = "prob += ("
for cuatrimestre in range(1, parametros.max_cuatrimestres + 1):
if cuatrimestre > 1:
ecuacion += " + "
variable = "Y_TP_FINAL_{}_{}_{}".format(materia.id_materia, materia.codigo,
get_str_cuatrimestre(cuatrimestre))
ecuacion += variable
arch.write(ecuacion + " <= 1)" + ENTER)
arch.write(ecuacion + " >= 1)" + ENTER)
arch.write(ENTER)
arch.write(ENTER)
def generar_restriccion_valor_cuatrimestre_en_que_se_cursa_el_trabajo_final(arch, parametros):
arch.write("# Numero de cuatrimestre en que son "
"cursadas las partes del trabajo final" + ENTER + ENTER)
for materia in parametros.materia_trabajo_final:
ecuacion = "prob += ("
for cuatrimestre in range(1, parametros.max_cuatrimestres + 1):
if cuatrimestre > 1:
ecuacion += " + "
variable = "Y_TP_FINAL_{}_{}_{}".format(materia.id_materia, materia.codigo,
get_str_cuatrimestre(cuatrimestre))
ecuacion += "{}*{}".format(cuatrimestre, variable)
variable_c_materia = "C_TP_FINAL_{}_{}".format(materia.id_materia, materia.codigo)
ecuacion_complementaria = ecuacion
ecuacion += "<= {})".format(variable_c_materia)
ecuacion_complementaria += ">= {})".format(variable_c_materia)
arch.write(ecuacion + ENTER)
arch.write(ecuacion_complementaria + ENTER)
arch.write(ENTER)
arch.write(ENTER)
def generar_restriccion_creditos_minimos_ya_obtenidos_para_cursar_el_trabajo_final(arch, parametros):
arch.write("# Restriccion de creditos minimos para el trabajo final" + ENTER + ENTER)
for materia in parametros.materia_trabajo_final:
if materia.creditos_minimos_aprobados == 0:
continue
for i in range(1, parametros.max_cuatrimestres + 1):
creditos = "CRED{}".format(get_str_cuatrimestre(i - 1)) if i > 1 else parametros.creditos_preacumulados
variable_Y = "Y_TP_FINAL_{}_{}_{}".format(materia.id_materia, materia.codigo,
get_str_cuatrimestre(get_str_cuatrimestre(i)))
arch.write("prob += ({}*{} <= {})".format(materia.creditos_minimos_aprobados, variable_Y, creditos) + ENTER)
arch.write(ENTER)
arch.write(ENTER)
def generar_restriccion_las_partes_del_tp_se_deben_hacer_en_cuatrimestres_consecutivos(arch, parametros):
arch.write("# Las partes del tp se deben hacer en cuatrimestres consecutivos" + ENTER + ENTER)
for i in range(len(parametros.materia_trabajo_final) - 1):
materia_anterior = parametros.materia_trabajo_final[i]
materia_actual = parametros.materia_trabajo_final[i + 1]
variable_c_materia_anterior = "C_TP_FINAL_{}_{}".format(materia_anterior.id_materia, materia_anterior.codigo)
variable_c_materia_actual = "C_TP_FINAL_{}_{}".format(materia_actual.id_materia, materia_actual.codigo)
ecuacion = "prob += ({} + 1 {} {})".format(variable_c_materia_anterior, "{}", variable_c_materia_actual)
arch.write(ecuacion.format("<=") + ENTER)
arch.write(ecuacion.format(">=") + ENTER)
arch.write(ENTER)
arch.write(ENTER)
def generar_restriccion_materias_incompatibles(arch, parametros):
arch.write("# Si una materia es incompatible con otra, solo puede "
"cursarse una de ellas" + ENTER + ENTER)
for id_materia in parametros.materias_incompatibles:
incompatibles = parametros.materias_incompatibles[id_materia] + [id_materia]
ecuacion = "prob += ("
for id_incompatible in incompatibles:
for cuatrimestre in range(1, parametros.max_cuatrimestres + 1):
variable = "Y_{}_{}".format(id_incompatible, get_str_cuatrimestre(cuatrimestre))
ecuacion += variable + " + "
ecuacion = ecuacion[:-3]
ecuacion += " <= 1)"
arch.write(ecuacion + ENTER)
arch.write(ENTER)
arch.write(ENTER)
def generar_restriccion_maxima_cantidad_horas_cursada(arch, parametros):
arch.write("# Maxima cantidad de horas semanales de cursada (se calcula una semana"
" por cuatrimestre)" + ENTER + ENTER)
for cuatrimestre in range(1, parametros.max_cuatrimestres + 1):
ecuacion = "prob += ("
for id_materia in parametros.horarios:
for curso in parametros.horarios[id_materia]:
variable = "H_{}_{}_{}".format(id_materia, curso.id_curso, get_str_cuatrimestre(cuatrimestre))
ecuacion += "{}*{} + ".format(curso.medias_horas_cursada, variable)
ecuacion = ecuacion[:-3]
arch.write("{} <= {})".format(ecuacion, parametros.max_horas_cursada) + ENTER)
arch.write(ENTER + ENTER)
def generar_restriccion_trabajo_final(arch, parametros):
if not parametros.materia_trabajo_final:
return
generar_restriccion_el_trabajo_debe_cursarse_en_unico_cuatrimestre(arch, parametros)
generar_restriccion_valor_cuatrimestre_en_que_se_cursa_el_trabajo_final(arch, parametros)
generar_restriccion_creditos_minimos_ya_obtenidos_para_cursar_el_trabajo_final(arch, parametros)
generar_restriccion_las_partes_del_tp_se_deben_hacer_en_cuatrimestres_consecutivos(arch, parametros)
def generar_restricciones(arch, parametros):
generar_restriccion_la_materia_debe_cursarse_en_unico_cuatrimestre(arch, parametros)
generar_restriccion_valor_cuatrimestre_en_que_se_cursa_la_materia(arch, parametros)
generar_restriccion_correlativas(arch, parametros)
generar_restriccion_calculo_creditos_obtenidos_por_cuatrimestre(arch, parametros)
generar_restriccion_creditos_minimos_ya_obtenidos_para_cursar(arch, parametros)
generar_restriccion_maxima_cant_materias_por_cuatrimestre(arch, parametros)
generar_restriccion_maxima_cantidad_horas_extra_cursada(arch, parametros)
generar_restriccion_maxima_cantidad_horas_cursada(arch, parametros)
generar_restriccion_maximo_cuatrimestres_para_func_objetivo(arch, parametros)
generar_restriccion_horarios_cursos(arch, parametros)
generar_restriccion_creditos_minimos_electivas(arch, parametros)
generar_restriccion_materias_incompatibles(arch, parametros)
generar_restriccion_cuatrimestre_minimo_en_que_se_puede_cursar_la_materia(arch, parametros)
generar_restriccion_trabajo_final(arch, parametros)
| gpl-3.0 |
Cactuslegs/audacity-of-nope | lib-src/lv2/lv2/plugins/eg04-sampler.lv2/waflib/Tools/cs.py | 133 | 4142 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
from waflib import Utils,Task,Options,Logs,Errors
from waflib.TaskGen import before_method,after_method,feature
from waflib.Tools import ccroot
from waflib.Configure import conf
import os,tempfile
ccroot.USELIB_VARS['cs']=set(['CSFLAGS','ASSEMBLIES','RESOURCES'])
ccroot.lib_patterns['csshlib']=['%s']
@feature('cs')
@before_method('process_source')
def apply_cs(self):
cs_nodes=[]
no_nodes=[]
for x in self.to_nodes(self.source):
if x.name.endswith('.cs'):
cs_nodes.append(x)
else:
no_nodes.append(x)
self.source=no_nodes
bintype=getattr(self,'bintype',self.gen.endswith('.dll')and'library'or'exe')
self.cs_task=tsk=self.create_task('mcs',cs_nodes,self.path.find_or_declare(self.gen))
tsk.env.CSTYPE='/target:%s'%bintype
tsk.env.OUT='/out:%s'%tsk.outputs[0].abspath()
self.env.append_value('CSFLAGS','/platform:%s'%getattr(self,'platform','anycpu'))
inst_to=getattr(self,'install_path',bintype=='exe'and'${BINDIR}'or'${LIBDIR}')
if inst_to:
mod=getattr(self,'chmod',bintype=='exe'and Utils.O755 or Utils.O644)
self.install_task=self.bld.install_files(inst_to,self.cs_task.outputs[:],env=self.env,chmod=mod)
@feature('cs')
@after_method('apply_cs')
def use_cs(self):
names=self.to_list(getattr(self,'use',[]))
get=self.bld.get_tgen_by_name
for x in names:
try:
y=get(x)
except Errors.WafError:
self.env.append_value('CSFLAGS','/reference:%s'%x)
continue
y.post()
tsk=getattr(y,'cs_task',None)or getattr(y,'link_task',None)
if not tsk:
self.bld.fatal('cs task has no link task for use %r'%self)
self.cs_task.dep_nodes.extend(tsk.outputs)
self.cs_task.set_run_after(tsk)
self.env.append_value('CSFLAGS','/reference:%s'%tsk.outputs[0].abspath())
@feature('cs')
@after_method('apply_cs','use_cs')
def debug_cs(self):
csdebug=getattr(self,'csdebug',self.env.CSDEBUG)
if not csdebug:
return
node=self.cs_task.outputs[0]
if self.env.CS_NAME=='mono':
out=node.parent.find_or_declare(node.name+'.mdb')
else:
out=node.change_ext('.pdb')
self.cs_task.outputs.append(out)
try:
self.install_task.source.append(out)
except AttributeError:
pass
if csdebug=='pdbonly':
val=['/debug+','/debug:pdbonly']
elif csdebug=='full':
val=['/debug+','/debug:full']
else:
val=['/debug-']
self.env.append_value('CSFLAGS',val)
class mcs(Task.Task):
color='YELLOW'
run_str='${MCS} ${CSTYPE} ${CSFLAGS} ${ASS_ST:ASSEMBLIES} ${RES_ST:RESOURCES} ${OUT} ${SRC}'
def exec_command(self,cmd,**kw):
bld=self.generator.bld
try:
if not kw.get('cwd',None):
kw['cwd']=bld.cwd
except AttributeError:
bld.cwd=kw['cwd']=bld.variant_dir
try:
tmp=None
if isinstance(cmd,list)and len(' '.join(cmd))>=8192:
program=cmd[0]
cmd=[self.quote_response_command(x)for x in cmd]
(fd,tmp)=tempfile.mkstemp()
os.write(fd,'\r\n'.join(i.replace('\\','\\\\')for i in cmd[1:]))
os.close(fd)
cmd=[program,'@'+tmp]
ret=self.generator.bld.exec_command(cmd,**kw)
finally:
if tmp:
try:
os.remove(tmp)
except OSError:
pass
return ret
def quote_response_command(self,flag):
if flag.lower()=='/noconfig':
return''
if flag.find(' ')>-1:
for x in('/r:','/reference:','/resource:','/lib:','/out:'):
if flag.startswith(x):
flag='%s"%s"'%(x,flag[len(x):])
break
else:
flag='"%s"'%flag
return flag
def configure(conf):
csc=getattr(Options.options,'cscbinary',None)
if csc:
conf.env.MCS=csc
conf.find_program(['csc','mcs','gmcs'],var='MCS')
conf.env.ASS_ST='/r:%s'
conf.env.RES_ST='/resource:%s'
conf.env.CS_NAME='csc'
if str(conf.env.MCS).lower().find('mcs')>-1:
conf.env.CS_NAME='mono'
def options(opt):
opt.add_option('--with-csc-binary',type='string',dest='cscbinary')
class fake_csshlib(Task.Task):
color='YELLOW'
inst_to=None
def runnable_status(self):
for x in self.outputs:
x.sig=Utils.h_file(x.abspath())
return Task.SKIP_ME
@conf
def read_csshlib(self,name,paths=[]):
return self(name=name,features='fake_lib',lib_paths=paths,lib_type='csshlib')
| gpl-2.0 |
ericfc/django | django/contrib/sites/models.py | 316 | 3743 | from __future__ import unicode_literals
import string
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.db import models
from django.db.models.signals import pre_delete, pre_save
from django.http.request import split_domain_port
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
SITE_CACHE = {}
def _simple_domain_name_validator(value):
"""
Validates that the given value contains no whitespaces to prevent common
typos.
"""
if not value:
return
checks = ((s in value) for s in string.whitespace)
if any(checks):
raise ValidationError(
_("The domain name cannot contain any spaces or tabs."),
code='invalid',
)
class SiteManager(models.Manager):
use_in_migrations = True
def _get_site_by_id(self, site_id):
if site_id not in SITE_CACHE:
site = self.get(pk=site_id)
SITE_CACHE[site_id] = site
return SITE_CACHE[site_id]
def _get_site_by_request(self, request):
host = request.get_host()
try:
# First attempt to look up the site by host with or without port.
if host not in SITE_CACHE:
SITE_CACHE[host] = self.get(domain__iexact=host)
return SITE_CACHE[host]
except Site.DoesNotExist:
# Fallback to looking up site after stripping port from the host.
domain, port = split_domain_port(host)
if not port:
raise
if domain not in SITE_CACHE:
SITE_CACHE[domain] = self.get(domain__iexact=domain)
return SITE_CACHE[domain]
def get_current(self, request=None):
"""
Returns the current Site based on the SITE_ID in the project's settings.
If SITE_ID isn't defined, it returns the site with domain matching
request.get_host(). The ``Site`` object is cached the first time it's
retrieved from the database.
"""
from django.conf import settings
if getattr(settings, 'SITE_ID', ''):
site_id = settings.SITE_ID
return self._get_site_by_id(site_id)
elif request:
return self._get_site_by_request(request)
raise ImproperlyConfigured(
"You're using the Django \"sites framework\" without having "
"set the SITE_ID setting. Create a site in your database and "
"set the SITE_ID setting or pass a request to "
"Site.objects.get_current() to fix this error."
)
def clear_cache(self):
"""Clears the ``Site`` object cache."""
global SITE_CACHE
SITE_CACHE = {}
@python_2_unicode_compatible
class Site(models.Model):
domain = models.CharField(_('domain name'), max_length=100,
validators=[_simple_domain_name_validator], unique=True)
name = models.CharField(_('display name'), max_length=50)
objects = SiteManager()
class Meta:
db_table = 'django_site'
verbose_name = _('site')
verbose_name_plural = _('sites')
ordering = ('domain',)
def __str__(self):
return self.domain
def clear_site_cache(sender, **kwargs):
"""
Clears the cache (if primed) each time a site is saved or deleted
"""
instance = kwargs['instance']
using = kwargs['using']
try:
del SITE_CACHE[instance.pk]
except KeyError:
pass
try:
del SITE_CACHE[Site.objects.using(using).get(pk=instance.pk).domain]
except (KeyError, Site.DoesNotExist):
pass
pre_save.connect(clear_site_cache, sender=Site)
pre_delete.connect(clear_site_cache, sender=Site)
| bsd-3-clause |
fenginx/django | django/db/models/functions/datetime.py | 13 | 11071 | from datetime import datetime
from django.conf import settings
from django.db.models.expressions import Func
from django.db.models.fields import (
DateField, DateTimeField, DurationField, Field, IntegerField, TimeField,
)
from django.db.models.lookups import (
Transform, YearExact, YearGt, YearGte, YearLt, YearLte,
)
from django.utils import timezone
class TimezoneMixin:
tzinfo = None
def get_tzname(self):
# Timezone conversions must happen to the input datetime *before*
# applying a function. 2015-12-31 23:00:00 -02:00 is stored in the
# database as 2016-01-01 01:00:00 +00:00. Any results should be
# based on the input datetime not the stored datetime.
tzname = None
if settings.USE_TZ:
if self.tzinfo is None:
tzname = timezone.get_current_timezone_name()
else:
tzname = timezone._get_timezone_name(self.tzinfo)
return tzname
class Extract(TimezoneMixin, Transform):
lookup_name = None
output_field = IntegerField()
def __init__(self, expression, lookup_name=None, tzinfo=None, **extra):
if self.lookup_name is None:
self.lookup_name = lookup_name
if self.lookup_name is None:
raise ValueError('lookup_name must be provided')
self.tzinfo = tzinfo
super().__init__(expression, **extra)
def as_sql(self, compiler, connection):
sql, params = compiler.compile(self.lhs)
lhs_output_field = self.lhs.output_field
if isinstance(lhs_output_field, DateTimeField):
tzname = self.get_tzname()
sql = connection.ops.datetime_extract_sql(self.lookup_name, sql, tzname)
elif isinstance(lhs_output_field, DateField):
sql = connection.ops.date_extract_sql(self.lookup_name, sql)
elif isinstance(lhs_output_field, TimeField):
sql = connection.ops.time_extract_sql(self.lookup_name, sql)
elif isinstance(lhs_output_field, DurationField):
if not connection.features.has_native_duration_field:
raise ValueError('Extract requires native DurationField database support.')
sql = connection.ops.time_extract_sql(self.lookup_name, sql)
else:
# resolve_expression has already validated the output_field so this
# assert should never be hit.
assert False, "Tried to Extract from an invalid type."
return sql, params
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
copy = super().resolve_expression(query, allow_joins, reuse, summarize, for_save)
field = copy.lhs.output_field
if not isinstance(field, (DateField, DateTimeField, TimeField, DurationField)):
raise ValueError(
'Extract input expression must be DateField, DateTimeField, '
'TimeField, or DurationField.'
)
# Passing dates to functions expecting datetimes is most likely a mistake.
if type(field) == DateField and copy.lookup_name in ('hour', 'minute', 'second'):
raise ValueError(
"Cannot extract time component '%s' from DateField '%s'. " % (copy.lookup_name, field.name)
)
return copy
class ExtractYear(Extract):
lookup_name = 'year'
class ExtractIsoYear(Extract):
"""Return the ISO-8601 week-numbering year."""
lookup_name = 'iso_year'
class ExtractMonth(Extract):
lookup_name = 'month'
class ExtractDay(Extract):
lookup_name = 'day'
class ExtractWeek(Extract):
"""
Return 1-52 or 53, based on ISO-8601, i.e., Monday is the first of the
week.
"""
lookup_name = 'week'
class ExtractWeekDay(Extract):
"""
Return Sunday=1 through Saturday=7.
To replicate this in Python: (mydatetime.isoweekday() % 7) + 1
"""
lookup_name = 'week_day'
class ExtractQuarter(Extract):
lookup_name = 'quarter'
class ExtractHour(Extract):
lookup_name = 'hour'
class ExtractMinute(Extract):
lookup_name = 'minute'
class ExtractSecond(Extract):
lookup_name = 'second'
DateField.register_lookup(ExtractYear)
DateField.register_lookup(ExtractMonth)
DateField.register_lookup(ExtractDay)
DateField.register_lookup(ExtractWeekDay)
DateField.register_lookup(ExtractWeek)
DateField.register_lookup(ExtractIsoYear)
DateField.register_lookup(ExtractQuarter)
TimeField.register_lookup(ExtractHour)
TimeField.register_lookup(ExtractMinute)
TimeField.register_lookup(ExtractSecond)
DateTimeField.register_lookup(ExtractHour)
DateTimeField.register_lookup(ExtractMinute)
DateTimeField.register_lookup(ExtractSecond)
ExtractYear.register_lookup(YearExact)
ExtractYear.register_lookup(YearGt)
ExtractYear.register_lookup(YearGte)
ExtractYear.register_lookup(YearLt)
ExtractYear.register_lookup(YearLte)
ExtractIsoYear.register_lookup(YearExact)
ExtractIsoYear.register_lookup(YearGt)
ExtractIsoYear.register_lookup(YearGte)
ExtractIsoYear.register_lookup(YearLt)
ExtractIsoYear.register_lookup(YearLte)
class Now(Func):
template = 'CURRENT_TIMESTAMP'
output_field = DateTimeField()
def as_postgresql(self, compiler, connection, **extra_context):
# PostgreSQL's CURRENT_TIMESTAMP means "the time at the start of the
# transaction". Use STATEMENT_TIMESTAMP to be cross-compatible with
# other databases.
return self.as_sql(compiler, connection, template='STATEMENT_TIMESTAMP()', **extra_context)
class TruncBase(TimezoneMixin, Transform):
kind = None
tzinfo = None
def __init__(self, expression, output_field=None, tzinfo=None, is_dst=None, **extra):
self.tzinfo = tzinfo
self.is_dst = is_dst
super().__init__(expression, output_field=output_field, **extra)
def as_sql(self, compiler, connection):
inner_sql, inner_params = compiler.compile(self.lhs)
if isinstance(self.output_field, DateTimeField):
tzname = self.get_tzname()
sql = connection.ops.datetime_trunc_sql(self.kind, inner_sql, tzname)
elif isinstance(self.output_field, DateField):
sql = connection.ops.date_trunc_sql(self.kind, inner_sql)
elif isinstance(self.output_field, TimeField):
sql = connection.ops.time_trunc_sql(self.kind, inner_sql)
else:
raise ValueError('Trunc only valid on DateField, TimeField, or DateTimeField.')
return sql, inner_params
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
copy = super().resolve_expression(query, allow_joins, reuse, summarize, for_save)
field = copy.lhs.output_field
# DateTimeField is a subclass of DateField so this works for both.
assert isinstance(field, (DateField, TimeField)), (
"%r isn't a DateField, TimeField, or DateTimeField." % field.name
)
# If self.output_field was None, then accessing the field will trigger
# the resolver to assign it to self.lhs.output_field.
if not isinstance(copy.output_field, (DateField, DateTimeField, TimeField)):
raise ValueError('output_field must be either DateField, TimeField, or DateTimeField')
# Passing dates or times to functions expecting datetimes is most
# likely a mistake.
class_output_field = self.__class__.output_field if isinstance(self.__class__.output_field, Field) else None
output_field = class_output_field or copy.output_field
has_explicit_output_field = class_output_field or field.__class__ is not copy.output_field.__class__
if type(field) == DateField and (
isinstance(output_field, DateTimeField) or copy.kind in ('hour', 'minute', 'second', 'time')):
raise ValueError("Cannot truncate DateField '%s' to %s. " % (
field.name, output_field.__class__.__name__ if has_explicit_output_field else 'DateTimeField'
))
elif isinstance(field, TimeField) and (
isinstance(output_field, DateTimeField) or
copy.kind in ('year', 'quarter', 'month', 'week', 'day', 'date')):
raise ValueError("Cannot truncate TimeField '%s' to %s. " % (
field.name, output_field.__class__.__name__ if has_explicit_output_field else 'DateTimeField'
))
return copy
def convert_value(self, value, expression, connection):
if isinstance(self.output_field, DateTimeField):
if not settings.USE_TZ:
pass
elif value is not None:
value = value.replace(tzinfo=None)
value = timezone.make_aware(value, self.tzinfo, is_dst=self.is_dst)
elif not connection.features.has_zoneinfo_database:
raise ValueError(
'Database returned an invalid datetime value. Are time '
'zone definitions for your database installed?'
)
elif isinstance(value, datetime):
if value is None:
pass
elif isinstance(self.output_field, DateField):
value = value.date()
elif isinstance(self.output_field, TimeField):
value = value.time()
return value
class Trunc(TruncBase):
def __init__(self, expression, kind, output_field=None, tzinfo=None, is_dst=None, **extra):
self.kind = kind
super().__init__(
expression, output_field=output_field, tzinfo=tzinfo,
is_dst=is_dst, **extra
)
class TruncYear(TruncBase):
kind = 'year'
class TruncQuarter(TruncBase):
kind = 'quarter'
class TruncMonth(TruncBase):
kind = 'month'
class TruncWeek(TruncBase):
"""Truncate to midnight on the Monday of the week."""
kind = 'week'
class TruncDay(TruncBase):
kind = 'day'
class TruncDate(TruncBase):
kind = 'date'
lookup_name = 'date'
output_field = DateField()
def as_sql(self, compiler, connection):
# Cast to date rather than truncate to date.
lhs, lhs_params = compiler.compile(self.lhs)
tzname = timezone.get_current_timezone_name() if settings.USE_TZ else None
sql = connection.ops.datetime_cast_date_sql(lhs, tzname)
return sql, lhs_params
class TruncTime(TruncBase):
kind = 'time'
lookup_name = 'time'
output_field = TimeField()
def as_sql(self, compiler, connection):
# Cast to time rather than truncate to time.
lhs, lhs_params = compiler.compile(self.lhs)
tzname = timezone.get_current_timezone_name() if settings.USE_TZ else None
sql = connection.ops.datetime_cast_time_sql(lhs, tzname)
return sql, lhs_params
class TruncHour(TruncBase):
kind = 'hour'
class TruncMinute(TruncBase):
kind = 'minute'
class TruncSecond(TruncBase):
kind = 'second'
DateTimeField.register_lookup(TruncDate)
DateTimeField.register_lookup(TruncTime)
| bsd-3-clause |
urandu/gumbo-parser | python/gumbo/__init__.py | 15 | 1284 | """Gumbo HTML parser.
These are the Python bindings for Gumbo. All public API classes and functions
are exported from this module. They include:
- CTypes representations of all structs and enums defined in gumbo.h. The
naming convention is to take the C name and strip off the "Gumbo" prefix.
- A low-level wrapper around the gumbo_parse function, returning the classes
exposed above. Usage:
import gumbo
with gumboc.parse(text, **options) as output:
do_stuff_with_doctype(output.document)
do_stuff_with_parse_tree(output.root)
- Higher-level bindings that mimic the API provided by html5lib. Usage:
from gumbo import html5lib
This requires that html5lib be installed (it uses their treebuilders), and is
intended as a drop-in replacement.
- Similarly, higher-level bindings that mimic BeautifulSoup and return
BeautifulSoup objects. For this, use:
import gumbo
soup = gumbo.soup_parse(text, **options)
It will give you back a soup object like BeautifulSoup.BeautifulSoup(text).
"""
from gumbo.gumboc import *
try:
from gumbo import html5lib_adapter as html5lib
except ImportError:
# html5lib not installed
pass
try:
from gumbo.soup_adapter import parse as soup_parse
except ImportError:
# BeautifulSoup not installed
pass
| apache-2.0 |
Paul-Ezell/cinder-1 | cinder/tests/unit/fake_driver.py | 17 | 6348 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.tests.unit.brick import fake_lvm
from cinder.volume import driver
from cinder.volume.drivers import lvm
from cinder.zonemanager import utils as fczm_utils
class FakeISCSIDriver(lvm.LVMISCSIDriver):
"""Logs calls instead of executing."""
def __init__(self, *args, **kwargs):
super(FakeISCSIDriver, self).__init__(execute=self.fake_execute,
*args, **kwargs)
self.vg = fake_lvm.FakeBrickLVM('cinder-volumes', False,
None, 'default',
self.fake_execute)
def check_for_setup_error(self):
"""No setup necessary in fake mode."""
pass
def initialize_connection(self, volume, connector):
volume_metadata = {}
for metadata in volume['volume_admin_metadata']:
volume_metadata[metadata['key']] = metadata['value']
access_mode = volume_metadata.get('attached_mode')
if access_mode is None:
access_mode = ('ro'
if volume_metadata.get('readonly') == 'True'
else 'rw')
return {'driver_volume_type': 'iscsi',
'data': {'access_mode': access_mode}}
def terminate_connection(self, volume, connector, **kwargs):
pass
@staticmethod
def fake_execute(cmd, *_args, **_kwargs):
"""Execute that simply logs the command."""
return (None, None)
class FakeISERDriver(FakeISCSIDriver):
"""Logs calls instead of executing."""
def __init__(self, *args, **kwargs):
super(FakeISERDriver, self).__init__(execute=self.fake_execute,
*args, **kwargs)
def initialize_connection(self, volume, connector):
return {
'driver_volume_type': 'iser',
'data': {}
}
@staticmethod
def fake_execute(cmd, *_args, **_kwargs):
"""Execute that simply logs the command."""
return (None, None)
class FakeFibreChannelDriver(driver.FibreChannelDriver):
@fczm_utils.AddFCZone
def initialize_connection(self, volume, connector):
return {
'driver_volume_type': 'fibre_channel',
'data': {
'initiator_target_map': {'fake_wwn': ['fake_wwn2']},
}}
@fczm_utils.AddFCZone
def no_zone_initialize_connection(self, volume, connector):
"""This shouldn't call the ZM."""
return {
'driver_volume_type': 'bogus',
'data': {
'initiator_target_map': {'fake_wwn': ['fake_wwn2']},
}}
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
return {
'driver_volume_type': 'fibre_channel',
'data': {
'initiator_target_map': {'fake_wwn': ['fake_wwn2']},
}}
@fczm_utils.RemoveFCZone
def no_zone_terminate_connection(self, volume, connector, **kwargs):
return {
'driver_volume_type': 'bogus',
'data': {
'initiator_target_map': {'fake_wwn': ['fake_wwn2']},
}}
class LoggingVolumeDriver(driver.VolumeDriver):
"""Logs and records calls, for unit tests."""
def check_for_setup_error(self):
pass
def create_volume(self, volume):
self.log_action('create_volume', volume)
def delete_volume(self, volume):
self.clear_volume(volume)
self.log_action('delete_volume', volume)
def clear_volume(self, volume):
self.log_action('clear_volume', volume)
def local_path(self, volume):
raise NotImplementedError()
def ensure_export(self, context, volume):
self.log_action('ensure_export', volume)
def create_export(self, context, volume):
self.log_action('create_export', volume)
def remove_export(self, context, volume):
self.log_action('remove_export', volume)
def initialize_connection(self, volume, connector):
self.log_action('initialize_connection', volume)
def terminate_connection(self, volume, connector):
self.log_action('terminate_connection', volume)
def create_export_snapshot(self, context, snapshot):
self.log_action('create_export_snapshot', snapshot)
def remove_export_snapshot(self, context, snapshot):
self.log_action('remove_export_snapshot', snapshot)
def initialize_connection_snapshot(self, snapshot, connector):
self.log_action('initialize_connection_snapshot', snapshot)
def terminate_connection_snapshot(self, snapshot, connector):
self.log_action('terminate_connection_snapshot', snapshot)
def create_cloned_volume(self, volume, src_vol):
self.log_action('create_cloned_volume', volume)
_LOGS = []
@staticmethod
def clear_logs():
LoggingVolumeDriver._LOGS = []
@staticmethod
def log_action(action, parameters):
"""Logs the command."""
log_dictionary = {}
if parameters:
log_dictionary = dict(parameters)
log_dictionary['action'] = action
LoggingVolumeDriver._LOGS.append(log_dictionary)
@staticmethod
def all_logs():
return LoggingVolumeDriver._LOGS
@staticmethod
def logs_like(action, **kwargs):
matches = []
for entry in LoggingVolumeDriver._LOGS:
if entry['action'] != action:
continue
match = True
for k, v in kwargs.items():
if entry.get(k) != v:
match = False
break
if match:
matches.append(entry)
return matches
| apache-2.0 |
DaniilLeksin/gc | wx/tools/Editra/src/extern/aui/dockart.py | 7 | 46060 | """
Dock art provider code - a dock provider provides all drawing functionality to
the AUI dock manager. This allows the dock manager to have a plugable look-and-feel.
By default, a :class:`~lib.agw.aui.framemanager` uses an instance of this class called :mod:`~lib.agw.aui.dockart`
which provides bitmap art and a colour scheme that is adapted to the major platforms'
look. You can either derive from that class to alter its behaviour or write a
completely new dock art class. Call :meth:`AuiManager.SetArtProvider() <lib.agw.aui.framemanager.AuiManager.SetArtProvider>`
to make use this new dock art.
"""
__author__ = "Andrea Gavana <andrea.gavana@gmail.com>"
__date__ = "31 March 2009"
import wx
import types
from aui_utilities import BitmapFromBits, StepColour, ChopText, GetBaseColour
from aui_utilities import DrawGradientRectangle, DrawMACCloseButton
from aui_utilities import DarkenBitmap, LightContrastColour
from aui_constants import *
optionActive = 2**14
""" Indicates that a pane is active and should display an active caption (if present). """
_ctypes = False
# Try to import winxptheme for ModernDockArt
if wx.Platform == "__WXMSW__":
try:
import ctypes
import winxptheme
_ctypes = True
except ImportError:
pass
# -- AuiDefaultDockArt class implementation --
class AuiDefaultDockArt(object):
"""
Dock art provider code - a dock provider provides all drawing functionality to the AUI dock manager.
This allows the dock manager to have a plugable look-and-feel.
By default, a :class:`~lib.agw.aui.framemanager.AuiManager` uses an instance of this class called
:class:`AuiDefaultDockArt` which provides bitmap art and a colour scheme that is adapted to the major
platforms' look. You can either derive from that class to alter its behaviour or
write a completely new dock art class.
Call :meth:`AuiManager.SetArtProvider() <lib.agw.aui.framemanager.AuiManager.SetArtProvider>`
to make use this new dock art.
**Metric Ordinals**
These are the possible pane dock art settings for :class:`AuiDefaultDockArt`:
================================================ ======================================
Metric Ordinal Constant Description
================================================ ======================================
``AUI_DOCKART_SASH_SIZE`` Customizes the sash size
``AUI_DOCKART_CAPTION_SIZE`` Customizes the caption size
``AUI_DOCKART_GRIPPER_SIZE`` Customizes the gripper size
``AUI_DOCKART_PANE_BORDER_SIZE`` Customizes the pane border size
``AUI_DOCKART_PANE_BUTTON_SIZE`` Customizes the pane button size
``AUI_DOCKART_BACKGROUND_COLOUR`` Customizes the background colour
``AUI_DOCKART_BACKGROUND_GRADIENT_COLOUR`` Customizes the background gradient colour
``AUI_DOCKART_SASH_COLOUR`` Customizes the sash colour
``AUI_DOCKART_ACTIVE_CAPTION_COLOUR`` Customizes the active caption colour
``AUI_DOCKART_ACTIVE_CAPTION_GRADIENT_COLOUR`` Customizes the active caption gradient colour
``AUI_DOCKART_INACTIVE_CAPTION_COLOUR`` Customizes the inactive caption colour
``AUI_DOCKART_INACTIVE_CAPTION_GRADIENT_COLOUR`` Customizes the inactive gradient caption colour
``AUI_DOCKART_ACTIVE_CAPTION_TEXT_COLOUR`` Customizes the active caption text colour
``AUI_DOCKART_INACTIVE_CAPTION_TEXT_COLOUR`` Customizes the inactive caption text colour
``AUI_DOCKART_BORDER_COLOUR`` Customizes the border colour
``AUI_DOCKART_GRIPPER_COLOUR`` Customizes the gripper colour
``AUI_DOCKART_CAPTION_FONT`` Customizes the caption font
``AUI_DOCKART_GRADIENT_TYPE`` Customizes the gradient type (no gradient, vertical or horizontal)
``AUI_DOCKART_DRAW_SASH_GRIP`` Draw a sash grip on the sash
``AUI_DOCKART_HINT_WINDOW_COLOUR`` Customizes the hint window background colour (currently light blue)
================================================ ======================================
**Gradient Types**
These are the possible gradient dock art settings for :class:`AuiDefaultDockArt`:
============================================ ======================================
Gradient Constant Description
============================================ ======================================
``AUI_GRADIENT_NONE`` No gradient on the captions
``AUI_GRADIENT_VERTICAL`` Vertical gradient on the captions
``AUI_GRADIENT_HORIZONTAL`` Horizontal gradient on the captions
============================================ ======================================
**Button States**
These are the possible pane button / :class:`~lib.agw.aui.auibook.AuiNotebook` button /
:class:`~lib.agw.aui.auibar.AuiToolBar` button states:
============================================ ======================================
Button State Constant Description
============================================ ======================================
``AUI_BUTTON_STATE_NORMAL`` Normal button state
``AUI_BUTTON_STATE_HOVER`` Hovered button state
``AUI_BUTTON_STATE_PRESSED`` Pressed button state
``AUI_BUTTON_STATE_DISABLED`` Disabled button state
``AUI_BUTTON_STATE_HIDDEN`` Hidden button state
``AUI_BUTTON_STATE_CHECKED`` Checked button state
============================================ ======================================
**Button Identifiers**
These are the possible pane button / :class:`~lib.agw.aui.auibook.AuiNotebook` button /
:class:`~lib.agw.aui.auibar.AuiToolBar` button identifiers:
============================================ ======================================
Button Identifier Description
============================================ ======================================
``AUI_BUTTON_CLOSE`` Shows a close button on the pane
``AUI_BUTTON_MAXIMIZE_RESTORE`` Shows a maximize/restore button on the pane
``AUI_BUTTON_MINIMIZE`` Shows a minimize button on the pane
``AUI_BUTTON_PIN`` Shows a pin button on the pane
``AUI_BUTTON_OPTIONS`` Shows an option button on the pane (not implemented)
``AUI_BUTTON_WINDOWLIST`` Shows a window list button on the pane (for :class:`~lib.agw.aui.auibook.AuiNotebook`)
``AUI_BUTTON_LEFT`` Shows a left button on the pane (for :class:`~lib.agw.aui.auibook.AuiNotebook`)
``AUI_BUTTON_RIGHT`` Shows a right button on the pane (for :class:`~lib.agw.aui.auibook.AuiNotebook`)
``AUI_BUTTON_UP`` Shows an up button on the pane (not implemented)
``AUI_BUTTON_DOWN`` Shows a down button on the pane (not implemented)
``AUI_BUTTON_CUSTOM1`` Shows a custom button on the pane (not implemented)
``AUI_BUTTON_CUSTOM2`` Shows a custom button on the pane (not implemented)
``AUI_BUTTON_CUSTOM3`` Shows a custom button on the pane (not implemented)
============================================ ======================================
"""
def __init__(self):
""" Default class constructor. """
self.Init()
isMac = wx.Platform == "__WXMAC__"
if isMac:
self._caption_font = wx.SMALL_FONT
else:
self._caption_font = wx.Font(8, wx.DEFAULT, wx.NORMAL, wx.NORMAL, False)
self.SetDefaultPaneBitmaps(isMac)
self._restore_bitmap = wx.BitmapFromXPMData(restore_xpm)
# default metric values
self._sash_size = 4
if isMac:
# This really should be implemented in wx.SystemSettings
# There is no way to do this that I am aware outside of using
# the cocoa python bindings. 8 pixels looks correct on my system
# so hard coding it for now.
# How do I translate this?!? Not sure of the below implementation...
# SInt32 height;
# GetThemeMetric( kThemeMetricSmallPaneSplitterHeight , &height );
# self._sash_size = height;
self._sash_size = 8 # Carbon.Appearance.kThemeMetricPaneSplitterHeight
elif wx.Platform == "__WXGTK__":
self._sash_size = wx.RendererNative.Get().GetSplitterParams(wx.GetTopLevelWindows()[0]).widthSash
else:
self._sash_size = 4
self._caption_size = 19
self._border_size = 1
self._button_size = 14
self._gripper_size = 9
self._gradient_type = AUI_GRADIENT_VERTICAL
self._draw_sash = False
def Init(self):
""" Initializes the dock art. """
self.SetDefaultColours()
isMac = wx.Platform == "__WXMAC__"
if isMac:
self._active_caption_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT)
else:
self._active_caption_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_ACTIVECAPTION)
self._active_caption_gradient_colour = LightContrastColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT))
self._active_caption_text_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHTTEXT)
self._inactive_caption_text_colour = wx.BLACK
def SetDefaultColours(self, base_colour=None):
"""
Sets the default colours, which are calculated from the given base colour.
:param `base_colour`: an instance of :class:`Colour`. If defaulted to ``None``, a colour
is generated accordingly to the platform and theme.
"""
if base_colour is None:
base_colour = GetBaseColour()
darker1_colour = StepColour(base_colour, 85)
darker2_colour = StepColour(base_colour, 75)
darker3_colour = StepColour(base_colour, 60)
darker4_colour = StepColour(base_colour, 40)
self._background_colour = base_colour
self._background_gradient_colour = StepColour(base_colour, 180)
self._inactive_caption_colour = darker1_colour
self._inactive_caption_gradient_colour = StepColour(base_colour, 97)
self._sash_brush = wx.Brush(base_colour)
self._background_brush = wx.Brush(base_colour)
self._border_pen = wx.Pen(darker2_colour)
self._gripper_brush = wx.Brush(base_colour)
self._gripper_pen1 = wx.Pen(darker4_colour)
self._gripper_pen2 = wx.Pen(darker3_colour)
self._gripper_pen3 = wx.WHITE_PEN
self._hint_background_colour = colourHintBackground
def GetMetric(self, id):
"""
Gets the value of a certain setting.
:param integer `id`: can be one of the size values in `Metric Ordinals`.
"""
if id == AUI_DOCKART_SASH_SIZE:
return self._sash_size
elif id == AUI_DOCKART_CAPTION_SIZE:
return self._caption_size
elif id == AUI_DOCKART_GRIPPER_SIZE:
return self._gripper_size
elif id == AUI_DOCKART_PANE_BORDER_SIZE:
return self._border_size
elif id == AUI_DOCKART_PANE_BUTTON_SIZE:
return self._button_size
elif id == AUI_DOCKART_GRADIENT_TYPE:
return self._gradient_type
elif id == AUI_DOCKART_DRAW_SASH_GRIP:
return self._draw_sash
else:
raise Exception("Invalid Metric Ordinal.")
def SetMetric(self, id, new_val):
"""
Sets the value of a certain setting using `new_val`
:param integer `id`: can be one of the size values in `Metric Ordinals`;
:param `new_val`: the new value of the setting.
"""
if id == AUI_DOCKART_SASH_SIZE:
self._sash_size = new_val
elif id == AUI_DOCKART_CAPTION_SIZE:
self._caption_size = new_val
elif id == AUI_DOCKART_GRIPPER_SIZE:
self._gripper_size = new_val
elif id == AUI_DOCKART_PANE_BORDER_SIZE:
self._border_size = new_val
elif id == AUI_DOCKART_PANE_BUTTON_SIZE:
self._button_size = new_val
elif id == AUI_DOCKART_GRADIENT_TYPE:
self._gradient_type = new_val
elif id == AUI_DOCKART_DRAW_SASH_GRIP:
self._draw_sash = new_val
else:
raise Exception("Invalid Metric Ordinal.")
def GetColor(self, id):
"""
Gets the colour of a certain setting.
:param integer `id`: can be one of the colour values in `Metric Ordinals`.
"""
if id == AUI_DOCKART_BACKGROUND_COLOUR:
return self._background_brush.GetColour()
elif id == AUI_DOCKART_BACKGROUND_GRADIENT_COLOUR:
return self._background_gradient_colour
elif id == AUI_DOCKART_SASH_COLOUR:
return self._sash_brush.GetColour()
elif id == AUI_DOCKART_INACTIVE_CAPTION_COLOUR:
return self._inactive_caption_colour
elif id == AUI_DOCKART_INACTIVE_CAPTION_GRADIENT_COLOUR:
return self._inactive_caption_gradient_colour
elif id == AUI_DOCKART_INACTIVE_CAPTION_TEXT_COLOUR:
return self._inactive_caption_text_colour
elif id == AUI_DOCKART_ACTIVE_CAPTION_COLOUR:
return self._active_caption_colour
elif id == AUI_DOCKART_ACTIVE_CAPTION_GRADIENT_COLOUR:
return self._active_caption_gradient_colour
elif id == AUI_DOCKART_ACTIVE_CAPTION_TEXT_COLOUR:
return self._active_caption_text_colour
elif id == AUI_DOCKART_BORDER_COLOUR:
return self._border_pen.GetColour()
elif id == AUI_DOCKART_GRIPPER_COLOUR:
return self._gripper_brush.GetColour()
elif id == AUI_DOCKART_HINT_WINDOW_COLOUR:
return self._hint_background_colour
else:
raise Exception("Invalid Colour Ordinal.")
def SetColor(self, id, colour):
"""
Sets the colour of a certain setting.
:param integer `id`: can be one of the colour values in `Metric Ordinals`;
:param `colour`: the new value of the setting.
:type `colour`: :class:`Colour` or tuple or integer
"""
if isinstance(colour, basestring):
colour = wx.NamedColour(colour)
elif isinstance(colour, types.TupleType):
colour = wx.Colour(*colour)
elif isinstance(colour, types.IntType):
colour = wx.ColourRGB(colour)
if id == AUI_DOCKART_BACKGROUND_COLOUR:
self._background_brush.SetColour(colour)
elif id == AUI_DOCKART_BACKGROUND_GRADIENT_COLOUR:
self._background_gradient_colour = colour
elif id == AUI_DOCKART_SASH_COLOUR:
self._sash_brush.SetColour(colour)
elif id == AUI_DOCKART_INACTIVE_CAPTION_COLOUR:
self._inactive_caption_colour = colour
if not self._custom_pane_bitmaps and wx.Platform == "__WXMAC__":
# No custom bitmaps for the pane close button
# Change the MAC close bitmap colour
self._inactive_close_bitmap = DrawMACCloseButton(wx.WHITE, colour)
elif id == AUI_DOCKART_INACTIVE_CAPTION_GRADIENT_COLOUR:
self._inactive_caption_gradient_colour = colour
elif id == AUI_DOCKART_INACTIVE_CAPTION_TEXT_COLOUR:
self._inactive_caption_text_colour = colour
elif id == AUI_DOCKART_ACTIVE_CAPTION_COLOUR:
self._active_caption_colour = colour
if not self._custom_pane_bitmaps and wx.Platform == "__WXMAC__":
# No custom bitmaps for the pane close button
# Change the MAC close bitmap colour
self._active_close_bitmap = DrawMACCloseButton(wx.WHITE, colour)
elif id == AUI_DOCKART_ACTIVE_CAPTION_GRADIENT_COLOUR:
self._active_caption_gradient_colour = colour
elif id == AUI_DOCKART_ACTIVE_CAPTION_TEXT_COLOUR:
self._active_caption_text_colour = colour
elif id == AUI_DOCKART_BORDER_COLOUR:
self._border_pen.SetColour(colour)
elif id == AUI_DOCKART_GRIPPER_COLOUR:
self._gripper_brush.SetColour(colour)
self._gripper_pen1.SetColour(StepColour(colour, 40))
self._gripper_pen2.SetColour(StepColour(colour, 60))
elif id == AUI_DOCKART_HINT_WINDOW_COLOUR:
self._hint_background_colour = colour
else:
raise Exception("Invalid Colour Ordinal.")
GetColour = GetColor
SetColour = SetColor
def SetFont(self, id, font):
"""
Sets a font setting.
:param integer `id`: must be ``AUI_DOCKART_CAPTION_FONT``;
:param `font`: an instance of :class:`Font`.
"""
if id == AUI_DOCKART_CAPTION_FONT:
self._caption_font = font
def GetFont(self, id):
"""
Gets a font setting.
:param integer `id`: must be ``AUI_DOCKART_CAPTION_FONT``, otherwise :class:`NullFont` is returned.
"""
if id == AUI_DOCKART_CAPTION_FONT:
return self._caption_font
return wx.NullFont
def DrawSash(self, dc, window, orient, rect):
"""
Draws a sash between two windows.
:param `dc`: a :class:`DC` device context;
:param `window`: an instance of :class:`Window`;
:param integer `orient`: the sash orientation;
:param Rect `rect`: the sash rectangle.
"""
# AG: How do we make this work?!?
# RendererNative does not use the sash_brush chosen by the user
# and the rect.GetSize() is ignored as the sash is always drawn
# 3 pixel wide
# wx.RendererNative.Get().DrawSplitterSash(window, dc, rect.GetSize(), pos, orient)
dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetBrush(self._sash_brush)
dc.DrawRectangle(rect.x, rect.y, rect.width, rect.height)
draw_sash = self.GetMetric(AUI_DOCKART_DRAW_SASH_GRIP)
if draw_sash:
self.DrawSashGripper(dc, orient, rect)
def DrawBackground(self, dc, window, orient, rect):
"""
Draws a background.
:param `dc`: a :class:`DC` device context;
:param `window`: an instance of :class:`Window`;
:param integer `orient`: the gradient (if any) orientation;
:param Rect `rect`: the background rectangle.
"""
dc.SetPen(wx.TRANSPARENT_PEN)
if wx.Platform == "__WXMAC__":
# we have to clear first, otherwise we are drawing a light striped pattern
# over an already darker striped background
dc.SetBrush(wx.WHITE_BRUSH)
dc.DrawRectangle(rect.x, rect.y, rect.width, rect.height)
DrawGradientRectangle(dc, rect, self._background_brush.GetColour(),
self._background_gradient_colour,
AUI_GRADIENT_HORIZONTAL, rect.x, 700)
def DrawBorder(self, dc, window, rect, pane):
"""
Draws the pane border.
:param `dc`: a :class:`DC` device context;
:param `window`: an instance of :class:`Window`;
:param Rect `rect`: the border rectangle;
:param `pane`: the pane for which the border is drawn.
"""
drect = wx.Rect(*rect)
dc.SetPen(self._border_pen)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
border_width = self.GetMetric(AUI_DOCKART_PANE_BORDER_SIZE)
if pane.IsToolbar():
for ii in xrange(0, border_width):
dc.SetPen(wx.WHITE_PEN)
dc.DrawLine(drect.x, drect.y, drect.x+drect.width, drect.y)
dc.DrawLine(drect.x, drect.y, drect.x, drect.y+drect.height)
dc.SetPen(self._border_pen)
dc.DrawLine(drect.x, drect.y+drect.height-1,
drect.x+drect.width, drect.y+drect.height-1)
dc.DrawLine(drect.x+drect.width-1, drect.y,
drect.x+drect.width-1, drect.y+drect.height)
drect.Deflate(1, 1)
else:
for ii in xrange(0, border_width):
dc.DrawRectangle(drect.x, drect.y, drect.width, drect.height)
drect.Deflate(1, 1)
def DrawCaptionBackground(self, dc, rect, pane):
"""
Draws the text caption background in the pane.
:param `dc`: a :class:`DC` device context;
:param Rect `rect`: the text caption rectangle;
:param `pane`: the pane for which the text background is drawn.
"""
active = pane.state & optionActive
if self._gradient_type == AUI_GRADIENT_NONE:
if active:
dc.SetBrush(wx.Brush(self._active_caption_colour))
else:
dc.SetBrush(wx.Brush(self._inactive_caption_colour))
dc.DrawRectangle(rect.x, rect.y, rect.width, rect.height)
else:
switch_gradient = pane.HasCaptionLeft()
gradient_type = self._gradient_type
if switch_gradient:
gradient_type = (self._gradient_type == AUI_GRADIENT_HORIZONTAL and [AUI_GRADIENT_VERTICAL] or \
[AUI_GRADIENT_HORIZONTAL])[0]
if active:
if wx.Platform == "__WXMAC__":
DrawGradientRectangle(dc, rect, self._active_caption_colour,
self._active_caption_gradient_colour,
gradient_type)
else:
DrawGradientRectangle(dc, rect, self._active_caption_gradient_colour,
self._active_caption_colour,
gradient_type)
else:
if wx.Platform == "__WXMAC__":
DrawGradientRectangle(dc, rect, self._inactive_caption_gradient_colour,
self._inactive_caption_colour,
gradient_type)
else:
DrawGradientRectangle(dc, rect, self._inactive_caption_colour,
self._inactive_caption_gradient_colour,
gradient_type)
def DrawIcon(self, dc, rect, pane):
"""
Draws the icon in the pane caption area.
:param `dc`: a :class:`DC` device context;
:param Rect `rect`: the pane caption rectangle;
:param `pane`: the pane for which the icon is drawn.
"""
# Draw the icon centered vertically
if pane.icon.Ok():
if pane.HasCaptionLeft():
bmp = wx.ImageFromBitmap(pane.icon).Rotate90(clockwise=False)
dc.DrawBitmap(bmp.ConvertToBitmap(), rect.x+(rect.width-pane.icon.GetWidth())/2, rect.y+rect.height-2-pane.icon.GetHeight(), True)
else:
dc.DrawBitmap(pane.icon, rect.x+2, rect.y+(rect.height-pane.icon.GetHeight())/2, True)
def DrawCaption(self, dc, window, text, rect, pane):
"""
Draws the text in the pane caption.
:param `dc`: a :class:`DC` device context;
:param `window`: an instance of :class:`Window`;
:param string `text`: the text to be displayed;
:param Rect `rect`: the pane caption rectangle;
:param `pane`: the pane for which the text is drawn.
"""
dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetFont(self._caption_font)
self.DrawCaptionBackground(dc, rect, pane)
if pane.state & optionActive:
dc.SetTextForeground(self._active_caption_text_colour)
else:
dc.SetTextForeground(self._inactive_caption_text_colour)
w, h = dc.GetTextExtent("ABCDEFHXfgkj")
clip_rect = wx.Rect(*rect)
btns = pane.CountButtons()
captionLeft = pane.HasCaptionLeft()
variable = (captionLeft and [rect.height] or [rect.width])[0]
variable -= 3 # text offset
variable -= 2 # button padding
caption_offset = 0
if pane.icon:
if captionLeft:
caption_offset += pane.icon.GetHeight() + 3
else:
caption_offset += pane.icon.GetWidth() + 3
self.DrawIcon(dc, rect, pane)
variable -= caption_offset
variable -= btns*(self._button_size + self._border_size)
draw_text = ChopText(dc, text, variable)
if captionLeft:
dc.DrawRotatedText(draw_text, rect.x+(rect.width/2)-(h/2)-1, rect.y+rect.height-3-caption_offset, 90)
else:
dc.DrawText(draw_text, rect.x+3+caption_offset, rect.y+(rect.height/2)-(h/2)-1)
def RequestUserAttention(self, dc, window, text, rect, pane):
"""
Requests the user attention by intermittently highlighting the pane caption.
:param `dc`: a :class:`DC` device context;
:param `window`: an instance of :class:`Window`;
:param string `text`: the text to be displayed;
:param Rect `rect`: the pane caption rectangle;
:param `pane`: the pane for which we want to attract the user attention.
"""
state = pane.state
pane.state &= ~optionActive
for indx in xrange(6):
active = (indx%2 == 0 and [True] or [False])[0]
if active:
pane.state |= optionActive
else:
pane.state &= ~optionActive
self.DrawCaptionBackground(dc, rect, pane)
self.DrawCaption(dc, window, text, rect, pane)
wx.SafeYield()
wx.MilliSleep(350)
pane.state = state
def DrawGripper(self, dc, window, rect, pane):
"""
Draws a gripper on the pane.
:param `dc`: a :class:`DC` device context;
:param `window`: an instance of :class:`Window`;
:param Rect `rect`: the pane caption rectangle;
:param `pane`: the pane for which the gripper is drawn.
"""
dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetBrush(self._gripper_brush)
dc.DrawRectangle(rect.x, rect.y, rect.width, rect.height)
if not pane.HasGripperTop():
y = 4
while 1:
dc.SetPen(self._gripper_pen1)
dc.DrawPoint(rect.x+3, rect.y+y)
dc.SetPen(self._gripper_pen2)
dc.DrawPoint(rect.x+3, rect.y+y+1)
dc.DrawPoint(rect.x+4, rect.y+y)
dc.SetPen(self._gripper_pen3)
dc.DrawPoint(rect.x+5, rect.y+y+1)
dc.DrawPoint(rect.x+5, rect.y+y+2)
dc.DrawPoint(rect.x+4, rect.y+y+2)
y = y + 4
if y > rect.GetHeight() - 4:
break
else:
x = 4
while 1:
dc.SetPen(self._gripper_pen1)
dc.DrawPoint(rect.x+x, rect.y+3)
dc.SetPen(self._gripper_pen2)
dc.DrawPoint(rect.x+x+1, rect.y+3)
dc.DrawPoint(rect.x+x, rect.y+4)
dc.SetPen(self._gripper_pen3)
dc.DrawPoint(rect.x+x+1, rect.y+5)
dc.DrawPoint(rect.x+x+2, rect.y+5)
dc.DrawPoint(rect.x+x+2, rect.y+4)
x = x + 4
if x > rect.GetWidth() - 4:
break
def DrawPaneButton(self, dc, window, button, button_state, _rect, pane):
"""
Draws a pane button in the pane caption area.
:param `dc`: a :class:`DC` device context;
:param `window`: an instance of :class:`Window`;
:param integer `button`: the button to be drawn;
:param integer `button_state`: the pane button state;
:param Rect `_rect`: the pane caption rectangle;
:param `pane`: the pane for which the button is drawn.
"""
if not pane:
return
if button == AUI_BUTTON_CLOSE:
if pane.state & optionActive:
bmp = self._active_close_bitmap
else:
bmp = self._inactive_close_bitmap
elif button == AUI_BUTTON_PIN:
if pane.state & optionActive:
bmp = self._active_pin_bitmap
else:
bmp = self._inactive_pin_bitmap
elif button == AUI_BUTTON_MAXIMIZE_RESTORE:
if pane.IsMaximized():
if pane.state & optionActive:
bmp = self._active_restore_bitmap
else:
bmp = self._inactive_restore_bitmap
else:
if pane.state & optionActive:
bmp = self._active_maximize_bitmap
else:
bmp = self._inactive_maximize_bitmap
elif button == AUI_BUTTON_MINIMIZE:
if pane.state & optionActive:
bmp = self._active_minimize_bitmap
else:
bmp = self._inactive_minimize_bitmap
isVertical = pane.HasCaptionLeft()
rect = wx.Rect(*_rect)
if isVertical:
old_x = rect.x
rect.x = rect.x + (rect.width/2) - (bmp.GetWidth()/2)
rect.width = old_x + rect.width - rect.x - 1
else:
old_y = rect.y
rect.y = rect.y + (rect.height/2) - (bmp.GetHeight()/2)
rect.height = old_y + rect.height - rect.y - 1
if button_state == AUI_BUTTON_STATE_PRESSED:
rect.x += 1
rect.y += 1
if button_state in [AUI_BUTTON_STATE_HOVER, AUI_BUTTON_STATE_PRESSED]:
if pane.state & optionActive:
dc.SetBrush(wx.Brush(StepColour(self._active_caption_colour, 120)))
dc.SetPen(wx.Pen(StepColour(self._active_caption_colour, 70)))
else:
dc.SetBrush(wx.Brush(StepColour(self._inactive_caption_colour, 120)))
dc.SetPen(wx.Pen(StepColour(self._inactive_caption_colour, 70)))
if wx.Platform != "__WXMAC__":
# draw the background behind the button
dc.DrawRectangle(rect.x, rect.y, 15, 15)
else:
# Darker the bitmap a bit
bmp = DarkenBitmap(bmp, self._active_caption_colour, StepColour(self._active_caption_colour, 110))
if isVertical:
bmp = wx.ImageFromBitmap(bmp).Rotate90(clockwise=False).ConvertToBitmap()
# draw the button itself
dc.DrawBitmap(bmp, rect.x, rect.y, True)
def DrawSashGripper(self, dc, orient, rect):
"""
Draws a sash gripper on a sash between two windows.
:param `dc`: a :class:`DC` device context;
:param integer `orient`: the sash orientation;
:param Rect `rect`: the sash rectangle.
"""
dc.SetBrush(self._gripper_brush)
if orient == wx.HORIZONTAL: # horizontal sash
x = rect.x + int((1.0/4.0)*rect.width)
xend = rect.x + int((3.0/4.0)*rect.width)
y = rect.y + (rect.height/2) - 1
while 1:
dc.SetPen(self._gripper_pen3)
dc.DrawRectangle(x, y, 2, 2)
dc.SetPen(self._gripper_pen2)
dc.DrawPoint(x+1, y+1)
x = x + 5
if x >= xend:
break
else:
y = rect.y + int((1.0/4.0)*rect.height)
yend = rect.y + int((3.0/4.0)*rect.height)
x = rect.x + (rect.width/2) - 1
while 1:
dc.SetPen(self._gripper_pen3)
dc.DrawRectangle(x, y, 2, 2)
dc.SetPen(self._gripper_pen2)
dc.DrawPoint(x+1, y+1)
y = y + 5
if y >= yend:
break
def SetDefaultPaneBitmaps(self, isMac):
"""
Assigns the default pane bitmaps.
:param bool `isMac`: whether we are on wxMAC or not.
"""
if isMac:
self._inactive_close_bitmap = DrawMACCloseButton(wx.WHITE, self._inactive_caption_colour)
self._active_close_bitmap = DrawMACCloseButton(wx.WHITE, self._active_caption_colour)
else:
self._inactive_close_bitmap = BitmapFromBits(close_bits, 16, 16, self._inactive_caption_text_colour)
self._active_close_bitmap = BitmapFromBits(close_bits, 16, 16, self._active_caption_text_colour)
if isMac:
self._inactive_maximize_bitmap = BitmapFromBits(max_bits, 16, 16, wx.WHITE)
self._active_maximize_bitmap = BitmapFromBits(max_bits, 16, 16, wx.WHITE)
else:
self._inactive_maximize_bitmap = BitmapFromBits(max_bits, 16, 16, self._inactive_caption_text_colour)
self._active_maximize_bitmap = BitmapFromBits(max_bits, 16, 16, self._active_caption_text_colour)
if isMac:
self._inactive_restore_bitmap = BitmapFromBits(restore_bits, 16, 16, wx.WHITE)
self._active_restore_bitmap = BitmapFromBits(restore_bits, 16, 16, wx.WHITE)
else:
self._inactive_restore_bitmap = BitmapFromBits(restore_bits, 16, 16, self._inactive_caption_text_colour)
self._active_restore_bitmap = BitmapFromBits(restore_bits, 16, 16, self._active_caption_text_colour)
if isMac:
self._inactive_minimize_bitmap = BitmapFromBits(minimize_bits, 16, 16, wx.WHITE)
self._active_minimize_bitmap = BitmapFromBits(minimize_bits, 16, 16, wx.WHITE)
else:
self._inactive_minimize_bitmap = BitmapFromBits(minimize_bits, 16, 16, self._inactive_caption_text_colour)
self._active_minimize_bitmap = BitmapFromBits(minimize_bits, 16, 16, self._active_caption_text_colour)
self._inactive_pin_bitmap = BitmapFromBits(pin_bits, 16, 16, self._inactive_caption_text_colour)
self._active_pin_bitmap = BitmapFromBits(pin_bits, 16, 16, self._active_caption_text_colour)
self._custom_pane_bitmaps = False
def SetCustomPaneBitmap(self, bmp, button, active, maximize=False):
"""
Sets a custom button bitmap for the pane button.
:param Bitmap `bmp`: the actual bitmap to set;
:param integer `button`: the button identifier;
:param bool `active`: whether it is the bitmap for the active button or not;
:param bool `maximize`: used to distinguish between the maximize and restore bitmaps.
"""
if bmp.GetWidth() > 16 or bmp.GetHeight() > 16:
raise Exception("The input bitmap is too big")
if button == AUI_BUTTON_CLOSE:
if active:
self._active_close_bitmap = bmp
else:
self._inactive_close_bitmap = bmp
if wx.Platform == "__WXMAC__":
self._custom_pane_bitmaps = True
elif button == AUI_BUTTON_PIN:
if active:
self._active_pin_bitmap = bmp
else:
self._inactive_pin_bitmap = bmp
elif button == AUI_BUTTON_MAXIMIZE_RESTORE:
if maximize:
if active:
self._active_maximize_bitmap = bmp
else:
self._inactive_maximize_bitmap = bmp
else:
if active:
self._active_restore_bitmap = bmp
else:
self._inactive_restore_bitmap = bmp
elif button == AUI_BUTTON_MINIMIZE:
if active:
self._active_minimize_bitmap = bmp
else:
self._inactive_minimize_bitmap = bmp
if _ctypes:
class RECT(ctypes.Structure):
""" Used to handle :class:`ModernDockArt` on Windows XP/Vista/7. """
_fields_ = [('left', ctypes.c_ulong),('top', ctypes.c_ulong),('right', ctypes.c_ulong),('bottom', ctypes.c_ulong)]
def dump(self):
""" Dumps `self` as a :class:`Rect`. """
return map(int, (self.left, self.top, self.right, self.bottom))
class SIZE(ctypes.Structure):
""" Used to handle :class:`ModernDockArt` on Windows XP/Vista/7. """
_fields_ = [('x', ctypes.c_long),('y', ctypes.c_long)]
class ModernDockArt(AuiDefaultDockArt):
"""
ModernDockArt is a custom `AuiDockArt` class, that implements a look similar to Firefox and other recents applications.
Is uses the `winxptheme <http://sourceforge.net/projects/pywin32/>`_ module and
XP themes whenever possible, so it should look good even if the user has a custom theme.
:note: This dock art is Windows only and will only work if you have installed
Mark Hammond's `pywin32` module (http://sourceforge.net/projects/pywin32/).
"""
def __init__(self, win):
"""
Default class constructor.
:param Window `win`: the window managed by :class:`~lib.agw.aui.framemanager.AuiManager`.
"""
AuiDefaultDockArt.__init__(self)
self.win = win
# Get the size of a small close button (themed)
hwnd = self.win.GetHandle()
self.usingTheme = False
if _ctypes:
self.hTheme1 = winxptheme.OpenThemeData(hwnd, "Window")
self.usingTheme = True
if not self.hTheme1:
self.usingTheme = False
self._button_size = 13
self._button_border_size = 3
self._caption_text_indent = 6
self._caption_size = 22
# We only highlight the active pane with the caption text being in bold.
# So we do not want a special colour for active elements.
self._active_close_bitmap = self._inactive_close_bitmap
self.Init()
def Init(self):
""" Initializes the dock art. """
AuiDefaultDockArt.Init(self)
self._active_caption_colour = self._inactive_caption_colour
self._active_caption_text_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_CAPTIONTEXT)
self._inactive_caption_text_colour = self._active_caption_text_colour
def DrawCaption(self, dc, window, text, rect, pane):
"""
Draws the text in the pane caption.
:param `dc`: a :class:`DC` device context;
:param `window`: an instance of :class:`Window`;
:param string `text`: the text to be displayed;
:param Rect `rect`: the pane caption rectangle;
:param `pane`: the pane for which the text is drawn.
"""
dc.SetPen(wx.TRANSPARENT_PEN)
self.DrawCaptionBackground(dc, rect, pane)
active = ((pane.state & optionActive) and [True] or [False])[0]
self._caption_font.SetWeight(wx.FONTWEIGHT_BOLD)
dc.SetFont(self._caption_font)
if active:
dc.SetTextForeground(self._active_caption_text_colour)
else:
dc.SetTextForeground(self._inactive_caption_text_colour)
w, h = dc.GetTextExtent("ABCDEFHXfgkj")
clip_rect = wx.Rect(*rect)
btns = pane.CountButtons()
captionLeft = pane.HasCaptionLeft()
variable = (captionLeft and [rect.height] or [rect.width])[0]
variable -= 3 # text offset
variable -= 2 # button padding
caption_offset = 0
if pane.icon:
if captionLeft:
caption_offset += pane.icon.GetHeight() + 3
else:
caption_offset += pane.icon.GetWidth() + 3
self.DrawIcon(dc, rect, pane)
diff = -2
if self.usingTheme:
diff = -1
variable -= caption_offset
variable -= btns*(self._button_size + self._button_border_size)
draw_text = ChopText(dc, text, variable)
if captionLeft:
dc.DrawRotatedText(draw_text, rect.x+(rect.width/2)-(h/2)-diff, rect.y+rect.height-3-caption_offset, 90)
else:
dc.DrawText(draw_text, rect.x+3+caption_offset, rect.y+(rect.height/2)-(h/2)-diff)
def DrawCaptionBackground(self, dc, rect, pane):
"""
Draws the text caption background in the pane.
:param `dc`: a :class:`DC` device context;
:param Rect `rect`: the text caption rectangle;
:param `pane`: the pane for which we are drawing the caption background.
"""
dc.SetBrush(self._background_brush)
dc.DrawRectangle(rect.x, rect.y, rect.width, rect.height)
active = ((pane.state & optionActive) and [True] or [False])[0]
if self.usingTheme:
rectangle = wx.Rect()
rc = RECT(rectangle.x, rectangle.y, rectangle.width, rectangle.height)
# If rect x/y values are negative rc.right/bottom values will overflow and winxptheme.DrawThemeBackground
# will raise a TypeError. Ensure they are never negative.
rect.x = max(0, rect.x)
rect.y = max(0, rect.y)
rc.top = rect.x
rc.left = rect.y
rc.right = rect.x + rect.width
rc.bottom = rect.y + rect.height
if active:
winxptheme.DrawThemeBackground(self.hTheme1, dc.GetHDC(), 5, 1, (rc.top, rc.left, rc.right, rc.bottom), None)
else:
winxptheme.DrawThemeBackground(self.hTheme1, dc.GetHDC(), 5, 2, (rc.top, rc.left, rc.right, rc.bottom), None)
else:
AuiDefaultDockArt.DrawCaptionBackground(self, dc, rect, pane)
def RequestUserAttention(self, dc, window, text, rect, pane):
"""
Requests the user attention by intermittently highlighting the pane caption.
:param `dc`: a :class:`DC` device context;
:param `window`: an instance of :class:`Window`;
:param string `text`: the text to be displayed;
:param Rect `rect`: the pane caption rectangle;
:param `pane`: the pane for which the text is drawn.
"""
state = pane.state
pane.state &= ~optionActive
for indx in xrange(6):
active = (indx%2 == 0 and [True] or [False])[0]
if active:
pane.state |= optionActive
else:
pane.state &= ~optionActive
self.DrawCaptionBackground(dc, rect, pane)
self.DrawCaption(dc, window, text, rect, pane)
wx.SafeYield()
wx.MilliSleep(350)
pane.state = state
def DrawPaneButton(self, dc, window, button, button_state, rect, pane):
"""
Draws a pane button in the pane caption area.
:param `dc`: a :class:`DC` device context;
:param `window`: an instance of :class:`Window`;
:param integer `button`: the button to be drawn;
:param integer `button_state`: the pane button state;
:param Rect `rect`: the pane caption rectangle;
:param `pane`: the pane for which the button is drawn.
"""
if self.usingTheme:
hTheme = self.hTheme1
# Get the real button position (compensating for borders)
drect = wx.Rect(rect.x, rect.y, self._button_size, self._button_size)
# Draw the themed close button
rc = RECT(0, 0, 0, 0)
if pane.HasCaptionLeft():
rc.top = rect.x + self._button_border_size
rc.left = int(rect.y + 1.5*self._button_border_size)
rc.right = rect.x + self._button_size + self._button_border_size
rc.bottom = int(rect.y + self._button_size + 1.5*self._button_border_size)
else:
rc.top = rect.x - self._button_border_size
rc.left = int(rect.y + 1.5*self._button_border_size)
rc.right = rect.x + self._button_size- self._button_border_size
rc.bottom = int(rect.y + self._button_size + 1.5*self._button_border_size)
if button == AUI_BUTTON_CLOSE:
btntype = 19
elif button == AUI_BUTTON_PIN:
btntype = 23
elif button == AUI_BUTTON_MAXIMIZE_RESTORE:
if not pane.IsMaximized():
btntype = 17
else:
btntype = 21
else:
btntype = 15
state = 4 # CBS_DISABLED
if pane.state & optionActive:
if button_state == AUI_BUTTON_STATE_NORMAL:
state = 1 # CBS_NORMAL
elif button_state == AUI_BUTTON_STATE_HOVER:
state = 2 # CBS_HOT
elif button_state == AUI_BUTTON_STATE_PRESSED:
state = 3 # CBS_PUSHED
else:
raise Exception("ERROR: Unknown State.")
else: # inactive pane
if button_state == AUI_BUTTON_STATE_NORMAL:
state = 5 # CBS_NORMAL
elif button_state == AUI_BUTTON_STATE_HOVER:
state = 6 # CBS_HOT
elif button_state == AUI_BUTTON_STATE_PRESSED:
state = 7 # CBS_PUSHED
else:
raise Exception("ERROR: Unknown State.")
try:
winxptheme.DrawThemeBackground(hTheme, dc.GetHDC(), btntype, state, (rc.top, rc.left, rc.right, rc.bottom), None)
except TypeError:
return
else:
# Fallback to default closebutton if themes are not enabled
rect2 = wx.Rect(rect.x-4, rect.y+2, rect.width, rect.height)
AuiDefaultDockArt.DrawPaneButton(self, dc, window, button, button_state, rect2, pane)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.