repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
cctags/pelican-plugins | video_privacy_enhancer/video_privacy_enhancer.py | 35 | 10128 | """
Video Privacy Enhancer
--------------------------
Authored by Jacob Levernier, 2014
Released under the GNU AGPLv3
For more information on this plugin, please see the attached Readme.md file.
"""
"""
LIBRARIES FOR PYTHON TO USE
"""
from pelican import signals # For making this plugin work with Pelican.
import os.path # For checking whether files are present in the filesystem.
import re # For using regular expressions.
import logging
logger = logging.getLogger(__name__) # For using logger.debug() to log errors or other notes.
import urllib
import video_service_thumbnail_url_generating_functions as video_thumbnail_functions # These are functions defined in 'video_service_thumbnail_url_generating_functions.py', which is located in the same directory as this file.
"""
END OF LIBRARIES
"""
"""
SETTINGS
"""
# Do not use a leading or trailing slash below (e.g., use "images/video-thumbnails"):
output_directory_for_thumbnails = "images/video-thumbnails"
# See the note in the Readme file re: adding support for other video services to this list.
supported_video_services = {
"youtube": {
"shortcode_not_including_exclamation_point": "youtube",
"function_for_generating_thumbnail_url": video_thumbnail_functions.generate_thumbnail_download_link_youtube,
},
"vimeo": {
"shortcode_not_including_exclamation_point": "vimeo",
"function_for_generating_thumbnail_url": video_thumbnail_functions.generate_thumbnail_download_link_vimeo,
},
}
"""
In order for this plugin to work optimally, you need to do just a few things:
1. Enable the plugn in pelicanconf.py (see http://docs.getpelican.com/en/3.3.0/plugins.html for documentation):
PLUGIN_PATH = "/pelican-plugins"
PLUGINS = ["video_privacy_enhancer"]
2a. If necessary, install jQuery on your site (See https://stackoverflow.com/questions/1458349/installing-jquery -- the jQuery base file should go into your Pelican theme's 'static' directory)
2b. Copy the jQuery file in this folder into, for example, your_theme_folder/static/video_privacy_enhancer_jQuery.js, and add a line like this to the <head></head> element of your website's base.html (or equivalent) template:
`<script src="{{ SITEURL }}/theme/video_privacy_enhancer_jquery.js"></script> <!--Load jQuery functions for the Video Privacy Enhancer Pelican plugin -->`
3. Choose a default video embed size and add corresponding CSS to your theme's CSS file:
Youtube allows the following sizes in its embed GUI (as of this writing, in March 2014). I recommend choosing one, and then having the iframe for the actual video embed match it (so that it's a seamless transition). This can be handled with CSS in both cases, so I haven't hard-coded it here:
1280 W x 720 H
853 W x 480 H
640 W x 360 H
560 W x 315 H
Here's an example to add to your CSS file:
```
/* For use with the video-privacy-enhancer Pelican plugin */
img.video-embed-dummy-image,
iframe.embedded_privacy_video {
width: 853px;
max-height: 480px;
/* Center the element on the screen */
display: block;
margin-top: 2em;
margin-bottom: 2em;
margin-left: auto;
margin-right: auto;
}
iframe.embedded_privacy_video {
width: 843px;
height: 480px;
}
```
"""
"""
END SETTINGS
"""
# A function to check whtether output_directory_for_thumbnails (a variable set above in the SETTINGS section) exists. If it doesn't exist, we'll create it.
def check_for_thumbnail_directory(pelican_output_path):
# Per http://stackoverflow.com/a/84173, check if a file exists. isfile() works on files, and exists() works on files and directories.
try:
if not os.path.exists(pelican_output_path + "/" + output_directory_for_thumbnails): # If the directory doesn't exist already...
os.makedirs(pelican_output_path + "/" + output_directory_for_thumbnails) # Create the directory to hold the video thumbnails.
return True
except Exception as e:
logger.error("Video Privacy Enhancer Plugin: Error in checking if thumbnail folder exists and making the directory if it doesn't: %s", e) # In case something goes wrong.
return False
def download_thumbnail(video_id_from_shortcode, video_thumbnail_url, video_service_name, pelican_output_path):
# Check if the thumbnail directory exists already:
check_for_thumbnail_directory(pelican_output_path)
# Check if the thumbnail for this video exists already (if it's been previously downloaded). If it doesn't, download it:
if not os.path.exists(pelican_output_path + "/" + output_directory_for_thumbnails + "/" + video_service_name + "_" + video_id_from_shortcode + ".jpg"): # If the thumbnail doesn't already exist...
urllib.urlretrieve(video_thumbnail_url, pelican_output_path + "/" + output_directory_for_thumbnails + "/" + video_service_name + "_" + video_id_from_shortcode + ".jpg") # Download the thumbnail. This follows the instructions at http://www.reelseo.com/youtube-thumbnail-image/ for downloading YouTube thumbnail images.
# A function to read through each page and post as it comes through from Pelican, find all instances of a shortcode (e.g., `!youtube(...)`) and change it into an HTML <img> element with the video thumbnail.
# 'dictionary_of_services' below should be the dictionary defined in the settings above, which includes the service's name as the dictionary key, and, for each service, has a dictionary containing 'shortcode_to_search_for_not_including_exclamation_point' and 'function_for_generating_thumbnail_url'
def process_shortcodes(data_passed_from_pelican):
dictionary_of_services = supported_video_services # This should be defined in the settings section above.
# Good for debugging:
logger.debug("Video Privacy Enhancer Plugin: Using the following dictionary of video services:")
logger.debug(dictionary_of_services)
if not data_passed_from_pelican._content: # If the item passed from Pelican has a "content" attribute (i.e., if it's not an image file or something else like that). NOTE: data_passed_from_pelican.content (without an underscore in front of 'content') seems to be read-only, whereas data_passed_from_pelican._content is able to be overwritten. This is somewhat explained in an IRC log from 2013-02-03 from user alexis to user webdesignhero_ at https://botbot.me/freenode/pelican/2013-02-01/?tz=America/Los_Angeles.
return # Exit the function, essentially passing over the (non-text) file.
# Loop through services (e.g., youtube, vimeo), processing the output for each:
for video_service_name, video_service_information in dictionary_of_services.iteritems():
# Good for debugging:
logger.debug("Video Privacy Enhancer Plugin: Currently processing the following video service information:")
logger.debug(video_service_name)
logger.debug("Video Privacy Enhancer Plugin: The name of the current service being processed is '" + video_service_name + "'")
shortcode_to_search_for_not_including_exclamation_point = video_service_information['shortcode_not_including_exclamation_point']
logger.debug("Video Privacy Enhancer Plugin: Currently looking for the shortcode '" + shortcode_to_search_for_not_including_exclamation_point + "'")
function_for_generating_thumbnail_url = video_service_information['function_for_generating_thumbnail_url']
logger.debug("Video Privacy Enhancer Plugin: Using the following function name to get thumbnails for the current video service:")
logger.debug(function_for_generating_thumbnail_url)
all_instances_of_the_shortcode = re.findall('\!' + shortcode_to_search_for_not_including_exclamation_point + '.*?\)', data_passed_from_pelican._content) # Use a regular expression to find every instance of, e.g., '!youtube' followed by anything up to the first matching ')'.
if(len(all_instances_of_the_shortcode) > 0): # If the article/page HAS any shortcodes, go on. Otherwise, don't (to do so would inadvertantly wipe out the output content for that article/page).
replace_shortcode_in_text = "" # This just gives this an initial value before going into the loop below.
# Go through each shortcode instance that we found above, and parse it:
for shortcode_to_parse in all_instances_of_the_shortcode:
video_id_from_shortcode = re.findall('(?<=' + shortcode_to_search_for_not_including_exclamation_point + '\().*?(?=\))', shortcode_to_parse)[0] # Get what's inside of the parentheses in, e.g., '!youtube(...).'
# print "Video ID is " + video_id_from_shortcode # Good for debugging purposes.
# Use the Pelican pelicanconf.py settings:
pelican_output_path = data_passed_from_pelican.settings['OUTPUT_PATH']
pelican_site_url = data_passed_from_pelican.settings['SITEURL']
# Download the video thumbnail if it's not already on the filesystem:
video_thumbnail_url = function_for_generating_thumbnail_url(video_id_from_shortcode)
download_thumbnail(video_id_from_shortcode, video_thumbnail_url, video_service_name, pelican_output_path)
# Replace the shortcode (e.g., '!youtube(...)') with '<img>...</img>'. Note that the <img> is given a class that the jQuery file mentioned at the top of this file will watch over. Any time an image with that class is clicked, the jQuery function will trigger and turn it into the full video embed.
replace_shortcode_in_text = re.sub(r'\!' + shortcode_to_search_for_not_including_exclamation_point + '\(' + video_id_from_shortcode + '\)', r'<img class="video-embed-dummy-image" id="' + video_id_from_shortcode + '" src="' + pelican_site_url + '/' + output_directory_for_thumbnails + '/' + video_service_name + '_' + video_id_from_shortcode + '.jpg" alt="Embedded Video - Click to view" title="Embedded Video - Click to view" embed-service="' + video_service_name + '"></img>', data_passed_from_pelican._content)
# Replace the content of the page or post with our now-updated content (having gone through all instances of the shortcode and updated them all, exiting the loop above.
data_passed_from_pelican._content = replace_shortcode_in_text
# Make Pelican work (see http://docs.getpelican.com/en/3.3.0/plugins.html#how-to-create-plugins):
def register():
signals.content_object_init.connect(process_shortcodes)
| agpl-3.0 |
marmarko/ml101 | tensorflow/examples/skflow/text_classification_character_rnn.py | 6 | 3028 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is an example of using recurrent neural networks over characters
for DBpedia dataset to predict class from description of an entity.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import metrics
import pandas
import tensorflow as tf
from tensorflow.contrib import learn
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_bool('test_with_fake_data', False,
'Test the example code with fake data.')
MAX_DOCUMENT_LENGTH = 100
HIDDEN_SIZE = 20
def char_rnn_model(x, y):
"""Character level recurrent neural network model to predict classes."""
y = tf.one_hot(y, 15, 1, 0)
byte_list = learn.ops.one_hot_matrix(x, 256)
byte_list = tf.unpack(byte_list, axis=1)
cell = tf.nn.rnn_cell.GRUCell(HIDDEN_SIZE)
_, encoding = tf.nn.rnn(cell, byte_list, dtype=tf.float32)
prediction, loss = learn.models.logistic_regression(encoding, y)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(),
optimizer='Adam', learning_rate=0.01)
return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op
def main(unused_argv):
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = learn.preprocessing.ByteProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
# Build model
classifier = learn.Estimator(model_fn=char_rnn_model)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(x_test, as_iterable=True)]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| bsd-2-clause |
inim4/googletest | test/gtest_xml_output_unittest.py | 1815 | 14580 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module"""
__author__ = 'eefacm@gmail.com (Sean Mcafee)'
import datetime
import errno
import os
import re
import sys
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_FILTER_FLAG = '--gtest_filter'
GTEST_LIST_TESTS_FLAG = '--gtest_list_tests'
GTEST_OUTPUT_FLAG = "--gtest_output"
GTEST_DEFAULT_OUTPUT_FILE = "test_detail.xml"
GTEST_PROGRAM_NAME = "gtest_xml_output_unittest_"
SUPPORTS_STACK_TRACES = False
if SUPPORTS_STACK_TRACES:
STACK_TRACE_TEMPLATE = '\nStack trace:\n*'
else:
STACK_TRACE_TEMPLATE = ''
EXPECTED_NON_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="23" failures="4" disabled="2" errors="0" time="*" timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
<testsuite name="FailedTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="Fails" status="run" time="*" classname="FailedTest">
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="MixedResultTest" tests="3" failures="1" disabled="1" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="MixedResultTest"/>
<testcase name="Fails" status="run" time="*" classname="MixedResultTest">
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 3
Expected: 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 3
Expected: 2%(stack)s]]></failure>
</testcase>
<testcase name="DISABLED_test" status="notrun" time="*" classname="MixedResultTest"/>
</testsuite>
<testsuite name="XmlQuotingTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="OutputsCData" status="run" time="*" classname="XmlQuotingTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]></top>" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]>]]><![CDATA[</top>%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="InvalidCharactersTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="InvalidCharactersInMessage" status="run" time="*" classname="InvalidCharactersTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="DisabledTest" tests="1" failures="0" disabled="1" errors="0" time="*">
<testcase name="DISABLED_test_not_run" status="notrun" time="*" classname="DisabledTest"/>
</testsuite>
<testsuite name="PropertyRecordingTest" tests="4" failures="0" disabled="0" errors="0" time="*" SetUpTestCase="yes" TearDownTestCase="aye">
<testcase name="OneProperty" status="run" time="*" classname="PropertyRecordingTest" key_1="1"/>
<testcase name="IntValuedProperty" status="run" time="*" classname="PropertyRecordingTest" key_int="1"/>
<testcase name="ThreeProperties" status="run" time="*" classname="PropertyRecordingTest" key_1="1" key_2="2" key_3="3"/>
<testcase name="TwoValuesForOneKeyUsesLastValue" status="run" time="*" classname="PropertyRecordingTest" key_1="2"/>
</testsuite>
<testsuite name="NoFixtureTest" tests="3" failures="0" disabled="0" errors="0" time="*">
<testcase name="RecordProperty" status="run" time="*" classname="NoFixtureTest" key="1"/>
<testcase name="ExternalUtilityThatCallsRecordIntValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_int="1"/>
<testcase name="ExternalUtilityThatCallsRecordStringValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_string="1"/>
</testsuite>
<testsuite name="Single/ValueParamTest" tests="4" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="HasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
</testsuite>
<testsuite name="TypedTest/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/0" />
</testsuite>
<testsuite name="TypedTest/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/1" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/0" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/1" />
</testsuite>
</testsuites>""" % {'stack': STACK_TRACE_TEMPLATE}
EXPECTED_FILTERED_TEST_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0"
errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
</testsuites>"""
EXPECTED_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="0" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests">
</testsuites>"""
GTEST_PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME)
SUPPORTS_TYPED_TESTS = 'TypedTest' in gtest_test_utils.Subprocess(
[GTEST_PROGRAM_PATH, GTEST_LIST_TESTS_FLAG], capture_stderr=False).output
class GTestXMLOutputUnitTest(gtest_xml_test_utils.GTestXMLTestCase):
"""
Unit test for Google Test's XML output functionality.
"""
# This test currently breaks on platforms that do not support typed and
# type-parameterized tests, so we don't run it under them.
if SUPPORTS_TYPED_TESTS:
def testNonEmptyXmlOutput(self):
"""
Runs a test program that generates a non-empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_NON_EMPTY_XML, 1)
def testEmptyXmlOutput(self):
"""Verifies XML output for a Google Test binary without actual tests.
Runs a test program that generates an empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput('gtest_no_test_unittest', EXPECTED_EMPTY_XML, 0)
def testTimestampValue(self):
"""Checks whether the timestamp attribute in the XML output is valid.
Runs a test program that generates an empty XML output, and checks if
the timestamp attribute in the testsuites tag is valid.
"""
actual = self._GetXmlOutput('gtest_no_test_unittest', [], 0)
date_time_str = actual.documentElement.getAttributeNode('timestamp').value
# datetime.strptime() is only available in Python 2.5+ so we have to
# parse the expected datetime manually.
match = re.match(r'(\d+)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)', date_time_str)
self.assertTrue(
re.match,
'XML datettime string %s has incorrect format' % date_time_str)
date_time_from_xml = datetime.datetime(
year=int(match.group(1)), month=int(match.group(2)),
day=int(match.group(3)), hour=int(match.group(4)),
minute=int(match.group(5)), second=int(match.group(6)))
time_delta = abs(datetime.datetime.now() - date_time_from_xml)
# timestamp value should be near the current local time
self.assertTrue(time_delta < datetime.timedelta(seconds=600),
'time_delta is %s' % time_delta)
actual.unlink()
def testDefaultOutputFile(self):
"""
Confirms that Google Test produces an XML output file with the expected
default name if no name is explicitly specified.
"""
output_file = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_DEFAULT_OUTPUT_FILE)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(
'gtest_no_test_unittest')
try:
os.remove(output_file)
except OSError, e:
if e.errno != errno.ENOENT:
raise
p = gtest_test_utils.Subprocess(
[gtest_prog_path, '%s=xml' % GTEST_OUTPUT_FLAG],
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
self.assert_(os.path.isfile(output_file))
def testSuppressedXmlOutput(self):
"""
Tests that no XML file is generated if the default XML listener is
shut down before RUN_ALL_TESTS is invoked.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_PROGRAM_NAME + 'out.xml')
if os.path.isfile(xml_path):
os.remove(xml_path)
command = [GTEST_PROGRAM_PATH,
'%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path),
'--shut_down_xml']
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
# p.signal is avalable only if p.terminated_by_signal is True.
self.assertFalse(
p.terminated_by_signal,
'%s was killed by signal %d' % (GTEST_PROGRAM_NAME, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(1, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, 1))
self.assert_(not os.path.isfile(xml_path))
def testFilteredTestXmlOutput(self):
"""Verifies XML output when a filter is applied.
Runs a test program that executes only some tests and verifies that
non-selected tests do not show up in the XML output.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_FILTERED_TEST_XML, 0,
extra_args=['%s=SuccessfulTest.*' % GTEST_FILTER_FLAG])
def _GetXmlOutput(self, gtest_prog_name, extra_args, expected_exit_code):
"""
Returns the xml output generated by running the program gtest_prog_name.
Furthermore, the program's exit code must be expected_exit_code.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
gtest_prog_name + 'out.xml')
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(gtest_prog_name)
command = ([gtest_prog_path, '%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path)] +
extra_args)
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
self.assert_(False,
'%s was killed by signal %d' % (gtest_prog_name, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(expected_exit_code, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, expected_exit_code))
actual = minidom.parse(xml_path)
return actual
def _TestXmlOutput(self, gtest_prog_name, expected_xml,
expected_exit_code, extra_args=None):
"""
Asserts that the XML document generated by running the program
gtest_prog_name matches expected_xml, a string containing another
XML document. Furthermore, the program's exit code must be
expected_exit_code.
"""
actual = self._GetXmlOutput(gtest_prog_name, extra_args or [],
expected_exit_code)
expected = minidom.parseString(expected_xml)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == '__main__':
os.environ['GTEST_STACK_TRACE_DEPTH'] = '1'
gtest_test_utils.Main()
| bsd-3-clause |
sunghan-chang/TizenRT | tools/ttrace_parser/scripts/ttrace.py | 10 | 19521 | #!/usr/bin/env python
###########################################################################
#
# Copyright 2017 Samsung Electronics All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
###########################################################################
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Android system-wide tracing utility.
This is a tool for capturing a trace that includes data from both userland and
the kernel. It creates an HTML file for visualizing the trace.
"""
from __future__ import print_function
import os
import sys
import time
import zlib
import errno
import string
import select
import optparse
import pid_parser
import subprocess
flattened_css_file = 'style.css'
flattened_js_file = 'tscript.js'
g_device_serial = None
class OptionParserIgnoreErrors(optparse.OptionParser):
def error(self, msg):
pass
def exit(self):
pass
def print_usage(self):
pass
def print_help(self):
pass
def print_version(self):
pass
def compose_html_win(script_dir, options, css, js, templates):
data = []
ret_fd = os.open(options.from_file_win, os.O_RDONLY | os.O_BINARY)
out = os.read(ret_fd, 4096)
parts = out.split('TRACE:', 1)
data.append(parts[1])
while True:
out = os.read(ret_fd, 4096)
keepReading = False
if len(out) > 0:
keepReading = True
data.append(out)
if not keepReading:
break
data = ''.join(data)
if data.startswith('\r\n'):
data = data.replace('\r\n', '\n')
data = data[1:]
html_filename = options.output_file
html_prefix = read_asset(script_dir, 'prefix.html')
html_suffix = read_asset(script_dir, 'suffix.html')
html_file = open(html_filename, 'w')
html_file.write(html_prefix % (css, js, templates))
size = 4096
dec = zlib.decompressobj()
for chunk in (data[i:i + size] for i in range(0, len(data), size)):
decoded_chunk = dec.decompress(chunk)
html_chunk = decoded_chunk.replace('\n', '\\n\\\n')
html_file.write(html_chunk)
html_out = dec.flush().replace('\n', '\\n\\\n')
# write body
html_file.write(html_out)
# write suffix
html_file.write(html_suffix)
html_file.close()
print("\n wrote file://%s\n" % os.path.abspath(options.output_file))
return
def compose_html(script_dir, options, css, js, templates):
html_filename = options.output_file
html_prefix = read_asset(script_dir, 'prefix.html')
html_suffix = read_asset(script_dir, 'suffix.html')
html_file = open(html_filename, 'w')
html_file.write(html_prefix % (css, js, templates))
cur_dir = os.getcwd()
# remove useless 2 lines
with open(os.path.join(cur_dir, options.from_text_file), "r") as input:
with open(os.path.join(cur_dir, options.from_text_file + 'composing'), "wb") as output:
for line in input:
if "capturing trace" in line:
continue
elif "TRACE:" in line:
continue
elif " done" in line:
continue
elif '\n' == line:
continue
else:
output.write(line)
# case not compressed, boot case
html_out = read_asset(script_dir, os.path.join(cur_dir, options.from_text_file + 'composing'))
html_out = html_out.replace('\n', '\\n\\\n')
os.remove(os.path.join(cur_dir, options.from_text_file + 'composing'))
# write body
html_file.write(html_out)
# Write suffix
html_file.write(html_suffix)
html_file.close()
print("\n wrote file://%s\n" % os.path.abspath(options.output_file))
return
def get_os_cmd(cmdARGS):
fd_popen = subprocess.Popen(cmdARGS.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ready = select.select([fd_popen.stdout, fd_popen.stderr], [], [fd_popen.stdout, fd_popen.stderr])
if fd_popen.stdout in ready[0]:
out = os.read(fd_popen.stdout.fileno(), 4096)
return out
else:
return 0
def sdb_shell(str_param):
global g_options
cmd_str = ['sdb']
if g_device_serial:
cmd_str.extend(['-s', str(g_device_serial)])
cmd_str.extend([str(str_param)])
os.system(string.join(cmd_str))
os.system('sleep 2')
def is_sdb_available():
no = 0
max_no = 10
sdb_shell('kill-server')
while(no < max_no):
str_cmd = get_os_cmd('sdb start-server')
str_cmd = get_os_cmd('sdb devices')
os.system('sleep 2')
l_devices = str_cmd.split('\n')
if len(l_devices) > 3:
if g_device_serial is None:
print('Please specify serial with -e option')
sys.exit(1)
dev_type = str_cmd.split("List of devices attached")[-1].split()
if 'device' in dev_type:
print('Ready to connect')
return dev_type[0]
else:
no = no + 1
print('retry...' + str(no))
sdb_shell('kill-server')
if no == max_no:
print('Could not connect to SDB devices')
sys.exit(1)
def set_sdb_root():
dev_type = is_sdb_available()
if dev_type == 0:
return 0
sdb_shell('root on')
if not ('emulator' in dev_type):
sdb_shell('shell change-booting-mode.sh --update')
print('SDB was rooted!!!')
return 1
def trace_bootup(cmd):
if set_sdb_root() == 0:
return
print(cmd + ' > /etc/ttrace.conf\'')
str_cmd = cmd + ' > /etc/ttrace.conf\''
os.system(str_cmd)
os.system('sleep 2')
sdb_shell('shell sync')
sdb_shell('shell reboot')
sdb_shell('kill-server')
def add_sdb_serial(command, serial):
if serial is not None:
command.insert(1, serial)
command.insert(1, '-s')
def main():
global g_device_serial
usage = "Usage: %prog [options] [category1 [category2 ...]]"
desc = "Example: %prog -b 32768 -t 15 gfx input view sched freq"
parser = optparse.OptionParser(usage=usage, description=desc)
parser.add_option('-o', dest='output_file', help='write HTML to FILE',
default='trace.html', metavar='FILE')
parser.add_option('-t', '--time', dest='trace_time', type='int',
help='trace for N seconds', metavar='N')
parser.add_option('-b', '--buf-size', dest='trace_buf_size', type='int',
help='use a trace buffer size of N KB', metavar='N')
parser.add_option('-l', '--list-categories', dest='list_categories', default=False,
action='store_true', help='list the available categories and exit')
parser.add_option('-u', '--bootup', dest='trace_bootup', default=False,
action='store_true', help='trace boot up')
parser.add_option('--link-assets', dest='link_assets', default=False,
action='store_true', help='link to original CSS or JS resources '
'instead of embedding them')
parser.add_option('--from-file', dest='from_file', action='store',
help='read the trace from a file (compressed) rather than running a live trace')
parser.add_option('--from-file-win', dest='from_file_win', action='store',
help='read the trace from a file (compressed) rather than running a live trace on windows')
parser.add_option('--from-text-file', dest='from_text_file', action='store',
help='read the trace from a file (not compressed) rather than running a live trace')
parser.add_option('--asset-dir', dest='asset_dir', default='trace-viewer',
type='string', help='')
parser.add_option('-e', '--serial', dest='device_serial', type='string',
help='sdb device serial number')
parser.add_option('--async_start', dest='async_start', default=False, action='store_true',
help='start circular trace and return immediately')
parser.add_option('--async_dump', dest='async_dump', default=False, action='store_true',
help='dump the current contents of circular trace buffer')
parser.add_option('--async_stop', dest='async_stop', default=False, action='store_true',
help='stop tracing and dump the current contents of circular trace buffer')
parser.add_option('--append', dest='append', default=False, action='store_true',
help='append traces to the existing traces. do not clear the trace buffer')
parser.add_option('--backup', dest='backup', default=False, action='store_true',
help='back up the existing traces to /tmp/trace.backup and then clear the trace buffer')
options, args = parser.parse_args()
if options.list_categories:
atrace_args = ['sdb', 'shell', 'atrace', '--list_categories']
expect_trace = False
elif options.from_file is not None:
atrace_args = ['cat', options.from_file]
expect_trace = True
elif options.from_file_win is not None:
atrace_args = ['type', options.from_file_win]
expect_trace = True
elif options.from_text_file is not None:
atrace_args = ['cat', options.from_text_file]
expect_trace = True
else:
if options.trace_bootup:
atrace_args = ['sdb', 'shell', '\'echo', 'atrace']
expect_trace = True
else:
atrace_args = ['sdb', 'shell', 'atrace', '-z']
expect_trace = True
if options.trace_time is not None:
if options.trace_time > 0:
atrace_args.extend(['-t', str(options.trace_time)])
else:
parser.error('the trace time must be a positive number')
if options.trace_buf_size is not None:
if options.trace_buf_size > 0:
atrace_args.extend(['-b', str(options.trace_buf_size)])
else:
parser.error('the trace buffer size must be a positive number')
atrace_args.extend(args)
if atrace_args[0] == 'sdb':
add_sdb_serial(atrace_args, options.device_serial)
if options.device_serial:
g_device_serial = str(options.device_serial).strip()
else:
g_device_serial = None
script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
if options.link_assets:
src_dir = os.path.join(script_dir, options.asset_dir, 'src')
build_dir = os.path.join(script_dir, options.asset_dir, 'build')
js_files, js_flattenizer, css_files, templates = get_assets(src_dir, build_dir)
css = '\n'.join(linked_css_tag % (os.path.join(src_dir, f)) for f in css_files)
js = '<script language="javascript">\n%s</script>\n' % js_flattenizer
js += '\n'.join(linked_js_tag % (os.path.join(src_dir, f)) for f in js_files)
else:
css_filename = os.path.join(script_dir, flattened_css_file)
js_filename = os.path.join(script_dir, flattened_js_file)
css = compiled_css_tag % (open(css_filename).read())
js = compiled_js_tag % (open(js_filename).read())
templates = ''
html_filename = options.output_file
if options.trace_bootup:
print("Trace for bootup")
atrace_args.extend(['--async_start'])
trace_bootup(string.join(atrace_args))
print("Please pull out the usb cable on target")
os.system('sleep ' + '40')
print("Please plug the usb cable to target")
os.system('sleep ' + '20')
atrace_args.remove('--async_start')
atrace_args.remove('\'echo')
atrace_args.extend(['-z', '--async_stop'])
expect_trace = True
if options.from_text_file:
compose_html(script_dir, options, css, js, templates)
return
elif options.from_file_win:
compose_html_win(script_dir, options, css, js, templates)
return
elif options.from_file:
print("From file")
if options.async_start:
atrace_args.extend(['--async_start'])
if options.async_dump:
atrace_args.extend(['--async_dump'])
if options.async_stop:
atrace_args.extend(['--async_stop'])
if options.append:
atrace_args.extend(['--append'])
if options.backup:
atrace_args.extend(['--backup'])
backup_trace = True
sdb = subprocess.Popen(atrace_args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if options.async_start:
return
result = None
data = []
# Read the text portion of the output and watch for the 'TRACE:' marker that
# indicates the start of the trace data.
while result is None:
ready = select.select([sdb.stdout, sdb.stderr], [], [sdb.stdout, sdb.stderr])
if sdb.stderr in ready[0]:
err = os.read(sdb.stderr.fileno(), 4096)
sys.stderr.write(err)
sys.stderr.flush()
if sdb.stdout in ready[0]:
out = os.read(sdb.stdout.fileno(), 4096)
parts = out.split('\nTRACE:', 1)
txt = parts[0].replace('\r', '')
if len(parts) == 2:
# The '\nTRACE:' match stole the last newline from the text, so add it
# back here.
txt += '\n'
sys.stdout.write(txt)
sys.stdout.flush()
if len(parts) == 2:
data.append(parts[1])
sys.stdout.write("downloading trace...")
sys.stdout.flush()
break
result = sdb.poll()
# Read and buffer the data portion of the output.
while True:
ready = select.select([sdb.stdout, sdb.stderr], [], [sdb.stdout, sdb.stderr])
keepReading = False
if sdb.stderr in ready[0]:
err = os.read(sdb.stderr.fileno(), 4096)
if len(err) > 0:
keepReading = True
sys.stderr.write(err)
sys.stderr.flush()
if sdb.stdout in ready[0]:
out = os.read(sdb.stdout.fileno(), 4096)
if len(out) > 0:
keepReading = True
data.append(out)
if result is not None and not keepReading:
break
result = sdb.poll()
if result == 0:
if expect_trace:
if not data:
print(('No data was captured. Output file was not ' +
'written.'), file=sys.stderr)
sys.exit(1)
else:
# Indicate to the user that the data download is complete.
print(" done\n")
data = ''.join(data)
# Collapse CRLFs that are added by sdb shell.
if data.startswith('\r\n'):
data = data.replace('\r\n', '\n')
# Skip the initial newline.
data = data[1:]
html_prefix = read_asset(script_dir, 'prefix.html')
html_suffix = read_asset(script_dir, 'suffix.html')
html_file = open(html_filename, 'w')
trace_filename = html_filename + '.trace.raw'
trace_file = open(trace_filename, 'w')
html_file.write(html_prefix % (css, js, templates))
size = 4096
dec = zlib.decompressobj()
for chunk in (data[i:i + size] for i in range(0, len(data), size)):
decoded_chunk = dec.decompress(chunk)
html_chunk = decoded_chunk.replace('\n', '\\n\\\n')
html_file.write(html_chunk)
trace_file.write(html_chunk)
html_out = dec.flush().replace('\n', '\\n\\\n')
html_file.write(html_out)
# Write suffix
html_file.write(html_suffix)
html_file.close()
trace_file.close()
pid_parser.parse(trace_filename)
os.remove(trace_filename)
print("\nwrote file://%s\n" % os.path.abspath(options.output_file))
else: # i.e. result != 0
print('sdb returned error code %d' % result, file=sys.stderr)
sys.exit(1)
def read_asset(src_dir, filename):
return open(os.path.join(src_dir, filename)).read()
def get_assets(src_dir, build_dir):
sys.path.append(build_dir)
gen = __import__('generate_standalone_timeline_view', {}, {})
parse_deps = __import__('parse_deps', {}, {})
gen_templates = __import__('generate_template_contents', {}, {})
filenames = gen._get_input_filenames()
load_sequence = parse_deps.calc_load_sequence(filenames, src_dir)
js_files = []
js_flattenizer = "window.FLATTENED = {};\n"
js_flattenizer += "window.FLATTENED_RAW_SCRIPTS = {};\n"
css_files = []
for module in load_sequence:
js_files.append(os.path.relpath(module.filename, src_dir))
js_flattenizer += "window.FLATTENED['%s'] = true;\n" % module.name
for dependent_raw_script_name in module.dependent_raw_script_names:
js_flattenizer += (
"window.FLATTENED_RAW_SCRIPTS['%s'] = true;\n"
% dependent_raw_script_name)
for style_sheet in module.style_sheets:
css_files.append(os.path.relpath(style_sheet.filename, src_dir))
templates = gen_templates.generate_templates()
sys.path.pop()
return (js_files, js_flattenizer, css_files, templates)
compiled_css_tag = """<style type="text/css">%s</style>"""
compiled_js_tag = """<script language="javascript">%s</script>"""
linked_css_tag = """<link rel="stylesheet" href="%s"></link>"""
linked_js_tag = """<script language="javascript" src="%s"></script>"""
if __name__ == '__main__':
main()
| apache-2.0 |
bitcoin/bitcoin | test/functional/mempool_updatefromblock.py | 35 | 6203 | #!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool descendants/ancestors information update.
Test mempool update of transaction descendants/ancestors information (count, size)
when transactions have been re-added from a disconnected block to the mempool.
"""
import time
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class MempoolUpdateFromBlockTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [['-limitdescendantsize=1000', '-limitancestorsize=1000']]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def transaction_graph_test(self, size, n_tx_to_mine=None, start_input_txid='', end_address='', fee=Decimal(0.00100000)):
"""Create an acyclic tournament (a type of directed graph) of transactions and use it for testing.
Keyword arguments:
size -- the order N of the tournament which is equal to the number of the created transactions
n_tx_to_mine -- the number of transaction that should be mined into a block
If all of the N created transactions tx[0]..tx[N-1] reside in the mempool,
the following holds:
the tx[K] transaction:
- has N-K descendants (including this one), and
- has K+1 ancestors (including this one)
More details: https://en.wikipedia.org/wiki/Tournament_(graph_theory)
"""
if not start_input_txid:
start_input_txid = self.nodes[0].getblock(self.nodes[0].getblockhash(1))['tx'][0]
if not end_address:
end_address = self.nodes[0].getnewaddress()
first_block_hash = ''
tx_id = []
tx_size = []
self.log.info('Creating {} transactions...'.format(size))
for i in range(0, size):
self.log.debug('Preparing transaction #{}...'.format(i))
# Prepare inputs.
if i == 0:
inputs = [{'txid': start_input_txid, 'vout': 0}]
inputs_value = self.nodes[0].gettxout(start_input_txid, 0)['value']
else:
inputs = []
inputs_value = 0
for j, tx in enumerate(tx_id[0:i]):
# Transaction tx[K] is a child of each of previous transactions tx[0]..tx[K-1] at their output K-1.
vout = i - j - 1
inputs.append({'txid': tx_id[j], 'vout': vout})
inputs_value += self.nodes[0].gettxout(tx, vout)['value']
self.log.debug('inputs={}'.format(inputs))
self.log.debug('inputs_value={}'.format(inputs_value))
# Prepare outputs.
tx_count = i + 1
if tx_count < size:
# Transaction tx[K] is an ancestor of each of subsequent transactions tx[K+1]..tx[N-1].
n_outputs = size - tx_count
output_value = ((inputs_value - fee) / Decimal(n_outputs)).quantize(Decimal('0.00000001'))
outputs = {}
for _ in range(n_outputs):
outputs[self.nodes[0].getnewaddress()] = output_value
else:
output_value = (inputs_value - fee).quantize(Decimal('0.00000001'))
outputs = {end_address: output_value}
self.log.debug('output_value={}'.format(output_value))
self.log.debug('outputs={}'.format(outputs))
# Create a new transaction.
unsigned_raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
signed_raw_tx = self.nodes[0].signrawtransactionwithwallet(unsigned_raw_tx)
tx_id.append(self.nodes[0].sendrawtransaction(signed_raw_tx['hex']))
tx_size.append(self.nodes[0].getrawmempool(True)[tx_id[-1]]['vsize'])
if tx_count in n_tx_to_mine:
# The created transactions are mined into blocks by batches.
self.log.info('The batch of {} transactions has been accepted into the mempool.'.format(len(self.nodes[0].getrawmempool())))
block_hash = self.nodes[0].generate(1)[0]
if not first_block_hash:
first_block_hash = block_hash
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.log.info('All of the transactions from the current batch have been mined into a block.')
elif tx_count == size:
# At the end all of the mined blocks are invalidated, and all of the created
# transactions should be re-added from disconnected blocks to the mempool.
self.log.info('The last batch of {} transactions has been accepted into the mempool.'.format(len(self.nodes[0].getrawmempool())))
start = time.time()
self.nodes[0].invalidateblock(first_block_hash)
end = time.time()
assert_equal(len(self.nodes[0].getrawmempool()), size)
self.log.info('All of the recently mined transactions have been re-added into the mempool in {} seconds.'.format(end - start))
self.log.info('Checking descendants/ancestors properties of all of the in-mempool transactions...')
for k, tx in enumerate(tx_id):
self.log.debug('Check transaction #{}.'.format(k))
assert_equal(self.nodes[0].getrawmempool(True)[tx]['descendantcount'], size - k)
assert_equal(self.nodes[0].getrawmempool(True)[tx]['descendantsize'], sum(tx_size[k:size]))
assert_equal(self.nodes[0].getrawmempool(True)[tx]['ancestorcount'], k + 1)
assert_equal(self.nodes[0].getrawmempool(True)[tx]['ancestorsize'], sum(tx_size[0:(k + 1)]))
def run_test(self):
# Use batch size limited by DEFAULT_ANCESTOR_LIMIT = 25 to not fire "too many unconfirmed parents" error.
self.transaction_graph_test(size=100, n_tx_to_mine=[25, 50, 75])
if __name__ == '__main__':
MempoolUpdateFromBlockTest().main()
| mit |
xkmato/tracpro | tracpro/profiles/views.py | 1 | 8127 | from __future__ import absolute_import, unicode_literals
from dash.orgs.views import OrgPermsMixin
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import Http404
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from smartmin.views import (
SmartCRUDL, SmartCreateView, SmartListView, SmartReadView, SmartUpdateView)
from .forms import UserForm
class UserFormMixin(object):
"""
Mixin for views that use a user form
"""
def get_form_kwargs(self):
kwargs = super(UserFormMixin, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def derive_initial(self):
initial = super(UserFormMixin, self).derive_initial()
if self.object:
initial['full_name'] = self.object.profile.full_name
return initial
def post_save(self, obj):
obj = super(UserFormMixin, self).post_save(obj)
data = self.form.cleaned_data
obj.profile.full_name = data['full_name']
obj.profile.save()
password = data.get('new_password', None) or data.get('password', None)
if password:
obj.set_password(password)
obj.save()
return obj
class UserFieldsMixin(object):
def get_full_name(self, obj):
return obj.profile.full_name
def get_regions(self, obj):
return ", ".join([force_text(r) for r in obj.regions.all()])
class UserCRUDL(SmartCRUDL):
model = User
actions = ('create', 'update', 'read', 'self', 'list')
class Create(OrgPermsMixin, UserFormMixin, SmartCreateView):
fields = ('full_name', 'email', 'password', 'confirm_password', 'change_password', 'regions')
form_class = UserForm
permission = 'profiles.profile_user_create'
success_message = _("New supervisor created")
title = _("Create Supervisor")
def save(self, obj):
org = self.request.user.get_org()
full_name = self.form.cleaned_data['full_name']
password = self.form.cleaned_data['password']
change_password = self.form.cleaned_data['change_password']
regions = self.form.cleaned_data['regions']
self.object = User.create(
org, full_name, obj.email, password, change_password, regions)
class Update(OrgPermsMixin, UserFormMixin, SmartUpdateView):
fields = (
'full_name', 'email', 'new_password', 'confirm_password',
'regions', 'is_active')
form_class = UserForm
permission = 'profiles.profile_user_update'
success_message = _("Supervisor updated")
title = _("Edit Supervisor")
def derive_initial(self):
initial = super(UserCRUDL.Update, self).derive_initial()
initial['regions'] = self.object.regions.all()
return initial
def post_save(self, obj):
obj = super(UserCRUDL.Update, self).post_save(obj)
obj.update_regions(self.form.cleaned_data['regions'])
return obj
class Self(OrgPermsMixin, UserFormMixin, SmartUpdateView):
"""
Limited update form for users to edit their own profiles
"""
form_class = UserForm
success_url = '@home.home'
success_message = _("Profile updated")
title = _("Edit My Profile")
@classmethod
def derive_url_pattern(cls, path, action):
return r'^profile/self/$'
def has_permission(self, request, *args, **kwargs):
return self.request.user.is_authenticated()
def get_object(self, queryset=None):
if not self.request.user.has_profile():
raise Http404(_("User doesn't have a chat profile"))
return self.request.user
def pre_save(self, obj):
obj = super(UserCRUDL.Self, self).pre_save(obj)
if 'password' in self.form.cleaned_data:
self.object.profile.change_password = False
return obj
def derive_fields(self):
fields = ['full_name', 'email']
if self.object.profile.change_password:
fields += ['password']
else:
fields += ['new_password']
return fields + ['confirm_password']
class Read(OrgPermsMixin, UserFieldsMixin, SmartReadView):
permission = 'profiles.profile_user_read'
def derive_title(self):
if self.object == self.request.user:
return _("My Profile")
else:
return super(UserCRUDL.Read, self).derive_title()
def derive_fields(self):
fields = ['full_name', 'type', 'email']
if not self.object.is_admin_for(self.request.org):
fields += ['regions']
return fields
def get_queryset(self):
# only allow access to active users attached to this org
org = self.request.org
qs = super(UserCRUDL.Read, self).get_queryset()
qs = qs.filter(Q(org_editors=org) | Q(org_admins=org))
qs = qs.filter(is_active=True)
qs = qs.distinct()
return qs
def get_context_data(self, **kwargs):
context = super(UserCRUDL.Read, self).get_context_data(**kwargs)
edit_button_url = None
if self.object == self.request.user:
edit_button_url = reverse('profiles.user_self')
elif self.has_org_perm('profiles.profile_user_update'):
edit_button_url = reverse('profiles.user_update', args=[self.object.pk])
context['edit_button_url'] = edit_button_url
return context
def get_type(self, obj):
if obj.is_admin_for(self.request.org):
return _("Administrator")
else:
return _("Supervisor")
class List(OrgPermsMixin, UserFieldsMixin, SmartListView):
default_order = ('profile__full_name',)
fields = ('full_name', 'email', 'regions')
permission = 'profiles.profile_user_list'
select_related = ('profile',)
title = _("Supervisors")
def derive_queryset(self, **kwargs):
qs = super(UserCRUDL.List, self).derive_queryset(**kwargs)
qs = qs.filter(pk__in=self.request.org.get_org_editors(), is_active=True)
return qs
class ManageUserCRUDL(SmartCRUDL):
"""
CRUDL used only by superusers to manage users outside the context of an organization
"""
model = User
model_name = 'Admin'
path = 'admin'
actions = ('create', 'update', 'list')
class Create(OrgPermsMixin, UserFormMixin, SmartCreateView):
fields = ('full_name', 'email', 'password', 'confirm_password', 'change_password')
form_class = UserForm
def save(self, obj):
full_name = self.form.cleaned_data['full_name']
password = self.form.cleaned_data['password']
change_password = self.form.cleaned_data['change_password']
self.object = User.create(None, full_name, obj.email, password, change_password)
class Update(OrgPermsMixin, UserFormMixin, SmartUpdateView):
fields = ('full_name', 'email', 'new_password', 'confirm_password', 'is_active')
form_class = UserForm
class List(UserFieldsMixin, SmartListView):
fields = ('full_name', 'email', 'orgs')
default_order = ('profile__full_name',)
select_related = ('profile',)
def derive_queryset(self, **kwargs):
qs = super(ManageUserCRUDL.List, self).derive_queryset(**kwargs)
qs = qs.filter(is_active=True).exclude(profile=None)
return qs
def get_orgs(self, obj):
orgs = set(obj.org_admins.all()) | set(obj.org_editors.all())
return ", ".join([force_text(o) for o in orgs])
def lookup_field_link(self, context, field, obj):
return reverse('profiles.admin_update', args=[obj.pk])
| bsd-3-clause |
jimi-c/ansible | lib/ansible/modules/windows/win_service.py | 8 | 8121 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Chris Hoffman <choffman@chathamfinancial.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_service
version_added: "1.7"
short_description: Manage and query Windows services
description:
- Manage and query Windows services.
- For non-Windows targets, use the M(service) module instead.
options:
dependencies:
description:
- A list of service dependencies to set for this particular service.
- This should be a list of service names and not the display name of the
service.
- This works by C(dependency_action) to either add/remove or set the
services in this list.
type: list
version_added: "2.3"
dependency_action:
description:
- Used in conjunction with C(dependency) to either add the dependencies to
the existing service dependencies.
- Remove the dependencies to the existing dependencies.
- Set the dependencies to only the values in the list replacing the
existing dependencies.
choices: [ add, remove, set ]
default: set
version_added: "2.3"
desktop_interact:
description:
- Whether to allow the service user to interact with the desktop.
- This should only be set to C(yes) when using the LocalSystem username.
type: bool
default: 'no'
version_added: "2.3"
description:
description:
- The description to set for the service.
version_added: "2.3"
display_name:
description:
- The display name to set for the service.
version_added: "2.3"
force_dependent_services:
description:
- If C(yes), stopping or restarting a service with dependent services will
force the dependent services to stop or restart also.
- If C(no), stopping or restarting a service with dependent services may
fail.
type: bool
default: 'no'
version_added: "2.3"
name:
description:
- Name of the service.
- If only the name parameter is specified, the module will report
on whether the service exists or not without making any changes.
required: yes
path:
description:
- The path to the executable to set for the service.
version_added: "2.3"
password:
description:
- The password to set the service to start as.
- This and the C(username) argument must be supplied together.
- If specifying LocalSystem, NetworkService or LocalService this field
must be an empty string and not null.
version_added: "2.3"
start_mode:
description:
- Set the startup type for the service.
- C(delayed) added in Ansible 2.3
choices: [ auto, delayed, disabled, manual ]
state:
description:
- C(started)/C(stopped)/C(absent)/C(pause) are idempotent actions that will not run
commands unless necessary.
- C(restarted) will always bounce the service.
- C(absent) added in Ansible 2.3
- C(pause) was added in Ansible 2.4
- Only services that support the paused state can be paused, you can
check the return value C(can_pause_and_continue).
- You can only pause a service that is already started.
choices: [ absent, paused, started, stopped, restarted ]
username:
description:
- The username to set the service to start as.
- This and the C(password) argument must be supplied together when using
a local or domain account.
- Set to C(LocalSystem) to use the SYSTEM account.
version_added: "2.3"
notes:
- For non-Windows targets, use the M(service) module instead.
author:
- Chris Hoffman (@chrishoffman)
'''
EXAMPLES = r'''
- name: Restart a service
win_service:
name: spooler
state: restarted
- name: Set service startup mode to auto and ensure it is started
win_service:
name: spooler
start_mode: auto
state: started
- name: Pause a service
win_service:
name: Netlogon
state: paused
# a new service will also default to the following values:
# - username: LocalSystem
# - state: stopped
# - start_mode: auto
- name: Create a new service
win_service:
name: service name
path: C:\temp\test.exe
- name: Create a new service with extra details
win_service:
name: service name
path: C:\temp\test.exe
display_name: Service Name
description: A test service description
- name: Remove a service
win_service:
name: service name
state: absent
- name: Check if a service is installed
win_service:
name: service name
register: service_info
- name: Set the log on user to a domain account
win_service:
name: service name
state: restarted
username: DOMAIN\User
password: Password
- name: Set the log on user to a local account
win_service:
name: service name
state: restarted
username: .\Administrator
password: Password
- name: Set the log on user to Local System
win_service:
name: service name
state: restarted
username: LocalSystem
password: ""
- name: Set the log on user to Local System and allow it to interact with the desktop
win_service:
name: service name
state: restarted
username: LocalSystem
password: ""
desktop_interact: yes
- name: Set the log on user to Network Service
win_service:
name: service name
state: restarted
username: NT AUTHORITY\NetworkService
password: ""
- name: Set the log on user to Local Service
win_service:
name: service name
state: restarted
username: NT AUTHORITY\LocalService
password: ""
- name: Set dependencies to ones only in the list
win_service:
name: service name
dependencies: ['service1', 'service2']
- name: Add dependencies to existing dependencies
win_service:
name: service name
dependencies: ['service1', 'service2']
dependency_action: add
- name: Remove dependencies from existing dependencies
win_service:
name: service name
dependencies: ['service1', 'service2']
dependency_action: remove
'''
RETURN = r'''
exists:
description: Whether the service exists or not.
returned: success
type: boolean
sample: true
name:
description: The service name or id of the service.
returned: success and service exists
type: string
sample: CoreMessagingRegistrar
display_name:
description: The display name of the installed service.
returned: success and service exists
type: string
sample: CoreMessaging
state:
description: The current running status of the service.
returned: success and service exists
type: string
sample: stopped
start_mode:
description: The startup type of the service.
returned: success and service exists
type: string
sample: manual
path:
description: The path to the service executable.
returned: success and service exists
type: string
sample: C:\Windows\system32\svchost.exe -k LocalServiceNoNetwork
can_pause_and_continue:
description: Whether the service can be paused and unpaused.
returned: success and service exists
type: bool
sample: True
description:
description: The description of the service.
returned: success and service exists
type: string
sample: Manages communication between system components.
username:
description: The username that runs the service.
returned: success and service exists
type: string
sample: LocalSystem
desktop_interact:
description: Whether the current user is allowed to interact with the desktop.
returned: success and service exists
type: boolean
sample: False
dependencies:
description: A list of services that is depended by this service.
returned: success and service exists
type: list
sample: False
depended_by:
description: A list of services that depend on this service.
returned: success and service exists
type: list
sample: False
'''
| gpl-3.0 |
fivejjs/pyspider | pyspider/message_queue/redis_queue.py | 34 | 3013 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<roy@binux.me>
# http://binux.me
# Created on 2015-04-27 22:48:04
import time
import redis
import umsgpack
from six.moves import queue as BaseQueue
class RedisQueue(object):
"""
A Queue like message built over redis
"""
Empty = BaseQueue.Empty
Full = BaseQueue.Full
max_timeout = 0.3
def __init__(self, name, host='localhost', port=6379, db=0,
maxsize=0, lazy_limit=True):
"""
Constructor for RedisQueue
maxsize: an integer that sets the upperbound limit on the number of
items that can be placed in the queue.
lazy_limit: redis queue is shared via instance, a lazy size limit is used
for better performance.
"""
self.name = name
self.redis = redis.StrictRedis(host=host, port=port, db=db)
self.maxsize = maxsize
self.lazy_limit = lazy_limit
self.last_qsize = 0
def qsize(self):
self.last_qsize = self.redis.llen(self.name)
return self.last_qsize
def empty(self):
if self.qsize() == 0:
return True
else:
return False
def full(self):
if self.maxsize and self.qsize() >= self.maxsize:
return True
else:
return False
def put_nowait(self, obj):
if self.lazy_limit and self.last_qsize < self.maxsize:
pass
elif self.full():
raise self.Full
self.last_qsize = self.redis.rpush(self.name, umsgpack.packb(obj))
return True
def put(self, obj, block=True, timeout=None):
if not block:
return self.put_nowait()
start_time = time.time()
while True:
try:
return self.put_nowait(obj)
except self.Full:
if timeout:
lasted = time.time() - start_time
if timeout > lasted:
time.sleep(min(self.max_timeout, timeout - lasted))
else:
raise
else:
time.sleep(self.max_timeout)
def get_nowait(self):
ret = self.redis.lpop(self.name)
if ret is None:
raise self.Empty
return umsgpack.unpackb(ret)
def get(self, block=True, timeout=None):
if not block:
return self.get_nowait()
start_time = time.time()
while True:
try:
return self.get_nowait()
except self.Empty:
if timeout:
lasted = time.time() - start_time
if timeout > lasted:
time.sleep(min(self.max_timeout, timeout - lasted))
else:
raise
else:
time.sleep(self.max_timeout)
Queue = RedisQueue
| apache-2.0 |
leonardowolf/bookfree | flask/lib/python2.7/site-packages/pip/_vendor/colorama/win32.py | 535 | 5365 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
# from winbase.h
STDOUT = -11
STDERR = -12
try:
import ctypes
from ctypes import LibraryLoader
windll = LibraryLoader(ctypes.WinDLL)
from ctypes import wintypes
except (AttributeError, ImportError):
windll = None
SetConsoleTextAttribute = lambda *_: None
winapi_test = lambda *_: None
else:
from ctypes import byref, Structure, c_char, POINTER
COORD = wintypes._COORD
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", wintypes.WORD),
("srWindow", wintypes.SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
def __str__(self):
return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
self.dwSize.Y, self.dwSize.X
, self.dwCursorPosition.Y, self.dwCursorPosition.X
, self.wAttributes
, self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
, self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
)
_GetStdHandle = windll.kernel32.GetStdHandle
_GetStdHandle.argtypes = [
wintypes.DWORD,
]
_GetStdHandle.restype = wintypes.HANDLE
_GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
_GetConsoleScreenBufferInfo.argtypes = [
wintypes.HANDLE,
POINTER(CONSOLE_SCREEN_BUFFER_INFO),
]
_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
_SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
_SetConsoleTextAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
]
_SetConsoleTextAttribute.restype = wintypes.BOOL
_SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
_SetConsoleCursorPosition.argtypes = [
wintypes.HANDLE,
COORD,
]
_SetConsoleCursorPosition.restype = wintypes.BOOL
_FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA
_FillConsoleOutputCharacterA.argtypes = [
wintypes.HANDLE,
c_char,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputCharacterA.restype = wintypes.BOOL
_FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
_FillConsoleOutputAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputAttribute.restype = wintypes.BOOL
_SetConsoleTitleW = windll.kernel32.SetConsoleTitleA
_SetConsoleTitleW.argtypes = [
wintypes.LPCSTR
]
_SetConsoleTitleW.restype = wintypes.BOOL
handles = {
STDOUT: _GetStdHandle(STDOUT),
STDERR: _GetStdHandle(STDERR),
}
def winapi_test():
handle = handles[STDOUT]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return bool(success)
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
handle = handles[stream_id]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
handle = handles[stream_id]
return _SetConsoleTextAttribute(handle, attrs)
def SetConsoleCursorPosition(stream_id, position, adjust=True):
position = COORD(*position)
# If the position is out of range, do nothing.
if position.Y <= 0 or position.X <= 0:
return
# Adjust for Windows' SetConsoleCursorPosition:
# 1. being 0-based, while ANSI is 1-based.
# 2. expecting (x,y), while ANSI uses (y,x).
adjusted_position = COORD(position.Y - 1, position.X - 1)
if adjust:
# Adjust for viewport's scroll position
sr = GetConsoleScreenBufferInfo(STDOUT).srWindow
adjusted_position.Y += sr.Top
adjusted_position.X += sr.Left
# Resume normal processing
handle = handles[stream_id]
return _SetConsoleCursorPosition(handle, adjusted_position)
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = handles[stream_id]
char = c_char(char.encode())
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
success = _FillConsoleOutputCharacterA(
handle, char, length, start, byref(num_written))
return num_written.value
def FillConsoleOutputAttribute(stream_id, attr, length, start):
''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
handle = handles[stream_id]
attribute = wintypes.WORD(attr)
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
return _FillConsoleOutputAttribute(
handle, attribute, length, start, byref(num_written))
def SetConsoleTitle(title):
return _SetConsoleTitleW(title)
| mit |
czgu/metaHack | env/lib/python2.7/site-packages/django/db/migrations/executor.py | 34 | 7361 | from __future__ import unicode_literals
from django.db import migrations
from django.apps.registry import apps as global_apps
from .loader import MigrationLoader
from .recorder import MigrationRecorder
class MigrationExecutor(object):
"""
End-to-end migration execution - loads migrations, and runs them
up or down to a specified set of targets.
"""
def __init__(self, connection, progress_callback=None):
self.connection = connection
self.loader = MigrationLoader(self.connection)
self.recorder = MigrationRecorder(self.connection)
self.progress_callback = progress_callback
def migration_plan(self, targets):
"""
Given a set of targets, returns a list of (Migration instance, backwards?).
"""
plan = []
applied = set(self.loader.applied_migrations)
for target in targets:
# If the target is (app_label, None), that means unmigrate everything
if target[1] is None:
for root in self.loader.graph.root_nodes():
if root[0] == target[0]:
for migration in self.loader.graph.backwards_plan(root):
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.remove(migration)
# If the migration is already applied, do backwards mode,
# otherwise do forwards mode.
elif target in applied:
# Don't migrate backwards all the way to the target node (that
# may roll back dependencies in other apps that don't need to
# be rolled back); instead roll back through target's immediate
# child(ren) in the same app, and no further.
next_in_app = sorted(
n for n in
self.loader.graph.dependents.get(target, set())
if n[0] == target[0]
)
for node in next_in_app:
for migration in self.loader.graph.backwards_plan(node):
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.remove(migration)
else:
for migration in self.loader.graph.forwards_plan(target):
if migration not in applied:
plan.append((self.loader.graph.nodes[migration], False))
applied.add(migration)
return plan
def migrate(self, targets, plan=None, fake=False):
"""
Migrates the database up to the given targets.
"""
if plan is None:
plan = self.migration_plan(targets)
for migration, backwards in plan:
if not backwards:
self.apply_migration(migration, fake=fake)
else:
self.unapply_migration(migration, fake=fake)
def collect_sql(self, plan):
"""
Takes a migration plan and returns a list of collected SQL
statements that represent the best-efforts version of that plan.
"""
statements = []
for migration, backwards in plan:
with self.connection.schema_editor(collect_sql=True) as schema_editor:
project_state = self.loader.project_state((migration.app_label, migration.name), at_end=False)
if not backwards:
migration.apply(project_state, schema_editor, collect_sql=True)
else:
migration.unapply(project_state, schema_editor, collect_sql=True)
statements.extend(schema_editor.collected_sql)
return statements
def apply_migration(self, migration, fake=False):
"""
Runs a migration forwards.
"""
if self.progress_callback:
self.progress_callback("apply_start", migration, fake)
if not fake:
# Test to see if this is an already-applied initial migration
if self.detect_soft_applied(migration):
fake = True
else:
# Alright, do it normally
with self.connection.schema_editor() as schema_editor:
project_state = self.loader.project_state((migration.app_label, migration.name), at_end=False)
migration.apply(project_state, schema_editor)
# For replacement migrations, record individual statuses
if migration.replaces:
for app_label, name in migration.replaces:
self.recorder.record_applied(app_label, name)
else:
self.recorder.record_applied(migration.app_label, migration.name)
# Report progress
if self.progress_callback:
self.progress_callback("apply_success", migration, fake)
def unapply_migration(self, migration, fake=False):
"""
Runs a migration backwards.
"""
if self.progress_callback:
self.progress_callback("unapply_start", migration, fake)
if not fake:
with self.connection.schema_editor() as schema_editor:
project_state = self.loader.project_state((migration.app_label, migration.name), at_end=False)
migration.unapply(project_state, schema_editor)
# For replacement migrations, record individual statuses
if migration.replaces:
for app_label, name in migration.replaces:
self.recorder.record_unapplied(app_label, name)
else:
self.recorder.record_unapplied(migration.app_label, migration.name)
# Report progress
if self.progress_callback:
self.progress_callback("unapply_success", migration, fake)
def detect_soft_applied(self, migration):
"""
Tests whether a migration has been implicitly applied - that the
tables it would create exist. This is intended only for use
on initial migrations (as it only looks for CreateModel).
"""
project_state = self.loader.project_state((migration.app_label, migration.name), at_end=True)
apps = project_state.render()
found_create_migration = False
# Bail if the migration isn't the first one in its app
if [name for app, name in migration.dependencies if app == migration.app_label]:
return False
# Make sure all create model are done
for operation in migration.operations:
if isinstance(operation, migrations.CreateModel):
model = apps.get_model(migration.app_label, operation.name)
if model._meta.swapped:
# We have to fetch the model to test with from the
# main app cache, as it's not a direct dependency.
model = global_apps.get_model(model._meta.swapped)
if model._meta.db_table not in self.connection.introspection.get_table_list(self.connection.cursor()):
return False
found_create_migration = True
# If we get this far and we found at least one CreateModel migration,
# the migration is considered implicitly applied.
return found_create_migration
| apache-2.0 |
zdw/xos | xos/tosca/flavorselect.py | 4 | 1259 | import os
import sys
from core.models import Slice,Instance,User,Flavor,Node,Image
class XOSFlavorSelector(object):
def __init__(self, user, mem_size=None, num_cpus=None, disk_size=None):
self.user = user
self.mem_size = self.get_mb(mem_size)
self.num_cpus = int(num_cpus)
self.disk_size = self.get_gb(disk_size)
def get_gb(self, s):
if "GB" in s:
return int(s.split("GB")[0].strip())
if "MB" in s:
return int(s.split("MB")[0].strip())/1024
return int(s)
def get_mb(self, s):
if "GB" in s:
return int(s.split("GB")[0].strip())*1024
if "MB" in s:
return int(s.split("MB")[0].strip())
return int(s)
def get_flavor(self):
flavor = "m1.tiny"
if (self.mem_size>512) or (self.disk_size>1):
flavor = "m1.small"
if (self.mem_size>2048) or (self.disk_size>20) or (self.num_cpus>1):
flavor = "m1.medium"
if (self.mem_size>4096) or (self.disk_size>40) or (self.num_cpus>2):
flavor = "m1.large"
if (self.mem_size>8192) or (self.disk_size>80) or (self.num_cpus>4):
flavor = "m1.xlarge"
return Flavor.objects.get(name=flavor)
| apache-2.0 |
dslomov/intellij-community | python/lib/Lib/site-packages/django/contrib/staticfiles/views.py | 71 | 6101 | """
Views and functions for serving static files. These are only to be used during
development, and SHOULD NOT be used in a production setting.
"""
import mimetypes
import os
import posixpath
import re
import stat
import urllib
from email.Utils import parsedate_tz, mktime_tz
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.http import Http404, HttpResponse, HttpResponseRedirect, HttpResponseNotModified
from django.template import loader, Template, Context, TemplateDoesNotExist
from django.utils.http import http_date
from django.contrib.staticfiles import finders, utils
def serve(request, path, document_root=None, show_indexes=False, insecure=False):
"""
Serve static files below a given point in the directory structure or
from locations inferred from the static files finders.
To use, put a URL pattern such as::
(r'^(?P<path>.*)$', 'django.contrib.staticfiles.views.serve')
in your URLconf.
If you provide the ``document_root`` parameter, the file won't be looked
up with the staticfiles finders, but in the given filesystem path, e.g.::
(r'^(?P<path>.*)$', 'django.contrib.staticfiles.views.serve', {'document_root' : '/path/to/my/files/'})
You may also set ``show_indexes`` to ``True`` if you'd like to serve a
basic index of the directory. This index view will use the
template hardcoded below, but if you'd like to override it, you can create
a template called ``static/directory_index.html``.
"""
if not settings.DEBUG and not insecure:
raise ImproperlyConfigured("The view to serve static files can only "
"be used if the DEBUG setting is True or "
"the --insecure option of 'runserver' is "
"used")
if not document_root:
absolute_path = finders.find(path)
if not absolute_path:
raise Http404('"%s" could not be found' % path)
document_root, path = os.path.split(absolute_path)
# Clean up given path to only allow serving files below document_root.
path = posixpath.normpath(urllib.unquote(path))
path = path.lstrip('/')
newpath = ''
for part in path.split('/'):
if not part:
# Strip empty path components.
continue
drive, part = os.path.splitdrive(part)
head, part = os.path.split(part)
if part in (os.curdir, os.pardir):
# Strip '.' and '..' in path.
continue
newpath = os.path.join(newpath, part).replace('\\', '/')
if newpath and path != newpath:
return HttpResponseRedirect(newpath)
fullpath = os.path.join(document_root, newpath)
if os.path.isdir(fullpath):
if show_indexes:
return directory_index(newpath, fullpath)
raise Http404("Directory indexes are not allowed here.")
if not os.path.exists(fullpath):
raise Http404('"%s" does not exist' % fullpath)
# Respect the If-Modified-Since header.
statobj = os.stat(fullpath)
mimetype, encoding = mimetypes.guess_type(fullpath)
mimetype = mimetype or 'application/octet-stream'
if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'),
statobj[stat.ST_MTIME], statobj[stat.ST_SIZE]):
return HttpResponseNotModified(mimetype=mimetype)
contents = open(fullpath, 'rb').read()
response = HttpResponse(contents, mimetype=mimetype)
response["Last-Modified"] = http_date(statobj[stat.ST_MTIME])
response["Content-Length"] = len(contents)
if encoding:
response["Content-Encoding"] = encoding
return response
DEFAULT_DIRECTORY_INDEX_TEMPLATE = """
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8" />
<meta http-equiv="Content-Language" content="en-us" />
<meta name="robots" content="NONE,NOARCHIVE" />
<title>Index of {{ directory }}</title>
</head>
<body>
<h1>Index of {{ directory }}</h1>
<ul>
{% ifnotequal directory "/" %}
<li><a href="../">../</a></li>
{% endifnotequal %}
{% for f in file_list %}
<li><a href="{{ f|urlencode }}">{{ f }}</a></li>
{% endfor %}
</ul>
</body>
</html>
"""
def directory_index(path, fullpath):
try:
t = loader.select_template(['static/directory_index.html',
'static/directory_index'])
except TemplateDoesNotExist:
t = Template(DEFAULT_DIRECTORY_INDEX_TEMPLATE, name='Default directory index template')
files = []
for f in os.listdir(fullpath):
if not f.startswith('.'):
if os.path.isdir(os.path.join(fullpath, f)):
f += '/'
files.append(f)
c = Context({
'directory' : path + '/',
'file_list' : files,
})
return HttpResponse(t.render(c))
def was_modified_since(header=None, mtime=0, size=0):
"""
Was something modified since the user last downloaded it?
header
This is the value of the If-Modified-Since header. If this is None,
I'll just return True.
mtime
This is the modification time of the item we're talking about.
size
This is the size of the item we're talking about.
"""
try:
if header is None:
raise ValueError
matches = re.match(r"^([^;]+)(; length=([0-9]+))?$", header,
re.IGNORECASE)
header_date = parsedate_tz(matches.group(1))
if header_date is None:
raise ValueError
header_mtime = mktime_tz(header_date)
header_len = matches.group(3)
if header_len and int(header_len) != size:
raise ValueError
if mtime > header_mtime:
raise ValueError
except (AttributeError, ValueError, OverflowError):
return True
return False
| apache-2.0 |
lvduit/islab-portfolio-by-ghost | node_modules/testem/node_modules/tap/node_modules/yamlish/yamlish-py/yamlish.py | 132 | 7602 | # -*- coding: utf-8 -*-
#Copyright (C) 2012 Red Hat, Inc.
#
#Permission is hereby granted, free of charge, to any person obtaining
#a copy of this software and associated documentation files (the
#"Software"), to deal in the Software without restriction, including
#without limitation the rights to use, copy, modify, merge, publish,
#distribute, sublicense, and/or sell copies of the Software, and to
#permit persons to whom the Software is furnished to do so, subject to
#the following conditions:
#
#The above copyright notice and this permission notice shall be included
#in all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
#OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
#MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
#CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
#TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Easy YAML serialisation compatible with TAP format.
Port of `Data::YAML Perl module <https://github.com/AndyA/Data--YAML>`,
satisfying all its tests, intended to be used for support of
`TAP <http://testanything.org/>` data format. Port of the original
documentation follows.
The syntax accepted by this module is a subset of `YAML <http://yaml.org>`.
===========
YAML syntax
===========
Although YAML appears to be a simple language, the entire YAML
specification is huge. This module implements a small subset of the
complete syntax trading completeness for compactness and simplicity.
This restricted syntax is known (to me at least) as 'YAMLish'.
These examples demonstrates the full range of supported syntax.
All YAML documents must begin with '---' and end with a line
containing '...'.
::
--- Simple scalar
...
Unprintable characters are represented using standard escapes in double
quoted strings.
::
--- "\\t\\x01\\x02\\n"
...
Array and hashes are represented thusly
::
---
- "This"
- "is"
- "an"
- "array"
...
---
This: is
a: hash
...
Structures may nest arbitrarily
::
---
-
name: 'Hash one'
value: 1
-
name: 'Hash two'
value: 2
...
Undef is a tilde
::
--- ~
...
====
Uses
====
This module may be used any time you need to freeze and thaw Python
data structures into a human readable format. The output from
`yamlish.dump()` should be readable by any YAML parser.
The original Perl module was originally written to allow machine-readable
diagnostic information to be passed from test scripts to
the Perl module `TAP::Harness`. That means that if you're writing
a testing system that needs to output TAP version 13 or later
syntax you might find this module useful.
Read more about TAP and YAMLish on `<http://testanything.org/wiki>`
"""
from __future__ import absolute_import, print_function, unicode_literals
import logging
import yaml
import sys
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger("yamlish")
log.addHandler(NullHandler())
#log.setLevel(logging.DEBUG)
__docformat__ = 'reStructuredText'
__version__ = "0.10"
__author__ = u"Matěj Cepl <mcepl_at_redhat_dot_com>"
py3k = sys.version_info[0] > 2
try:
isinstance('a', basestring)
except NameError:
basestring = (bytes, str)
class _YamlishLoader(yaml.loader.SafeLoader):
"""
Remove a datetime resolving.
YAMLish returns unchanged string.
"""
def __init__(self, stream):
yaml.loader.SafeLoader.__init__(self, stream)
@classmethod
def remove_implicit_resolver(cls, tag):
"""
Remove an implicit resolver from a Loader class identified by its tag.
"""
if not 'yaml_implicit_resolvers' in cls.__dict__:
cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
for key in cls.yaml_implicit_resolvers:
resolvers_set = cls.yaml_implicit_resolvers[key]
for idx in range(len(resolvers_set)):
if resolvers_set[idx][0] == tag:
del resolvers_set[idx]
if len(resolvers_set) == 0:
del cls.yaml_implicit_resolvers[key]
_YamlishLoader.remove_implicit_resolver(u'tag:yaml.org,2002:timestamp')
class _YamlishDumper(yaml.dumper.SafeDumper):
pass
def str_representer_compact_multiline(dumper, data):
style = None
if not py3k and isinstance(data, str):
# assumes all your strings are UTF-8 encoded
data = data.decode('utf-8')
if '\n' in data:
style = '|'
tag = u'tag:yaml.org,2002:str'
return dumper.represent_scalar(tag, data, style)
if py3k:
yaml.add_representer(bytes, str_representer_compact_multiline,
Dumper=_YamlishDumper)
yaml.add_representer(str, str_representer_compact_multiline,
Dumper=_YamlishDumper)
else:
yaml.add_representer(str, str_representer_compact_multiline,
Dumper=_YamlishDumper)
yaml.add_representer(unicode, str_representer_compact_multiline,
Dumper=_YamlishDumper)
def load(source, ignore_wrong_characters=False):
"""
Return object loaded from a YAML document in source.
Source is either a representation of the YAML document itself
or any document providing an iterator (that includes file, list, and
many others).
"""
out = None
log.debug("inobj: (%s)\n%s", type(source), source)
log.debug('before ignore_wrong_characters = %s', ignore_wrong_characters)
if isinstance(source, basestring):
out = yaml.load(source, Loader=_YamlishLoader)
log.debug("out (string) = %s", out)
elif hasattr(source, "__iter__"):
inobj = u""
for line in source:
try:
if not py3k or isinstance(line, bytes):
line = line.decode('utf8')
logging.debug('inobj, line ... %s, %s',
type(inobj), type(line))
inobj += line + u'\n'
except UnicodeDecodeError:
log.debug('in ignore_wrong_characters = %s',
ignore_wrong_characters)
if ignore_wrong_characters:
inobj += line.decode('utf8', 'ignore') + '\n'
else:
raise
log.debug('restarting load with inobj as string')
out = load(inobj, ignore_wrong_characters)
log.debug("out (iter) = %s", out)
log.debug("out (iter) = type %s", type(out))
return out
def dump(source, destination):
"""
Store source in destination file.
Destination is either a file object or a string with a filename.
"""
if isinstance(destination, basestring):
with open(destination, "w") as outf:
dump(source, outf)
elif hasattr(destination, "fileno"):
yaml.dump(source, destination, encoding="utf-8",
default_flow_style=False, canonical=False,
Dumper=_YamlishDumper)
else:
raise NameError
def dumps(source):
"""
Return YAMLish string from given source.
"""
return yaml.dump(source, encoding=None,
explicit_start=True, explicit_end=True,
default_flow_style=False, default_style=False,
canonical=False, Dumper=_YamlishDumper)
| mit |
koehlermichael/olympia | apps/users/tests/test_forms.py | 12 | 26892 | import hashlib
from datetime import datetime
from django.contrib.auth.tokens import default_token_generator
from django.core import mail
from django.utils.http import urlsafe_base64_encode
from django.conf import settings
from mock import Mock, patch
from nose.tools import eq_
from pyquery import PyQuery as pq
import amo
import amo.tests
from amo.helpers import urlparams
from amo.urlresolvers import reverse
from amo.tests.test_helpers import get_uploaded_file
from users.models import BlacklistedPassword, UserProfile
from users.forms import AuthenticationForm, UserEditForm
class UserFormBase(amo.tests.TestCase):
fixtures = ['users/test_backends']
def setUp(self):
super(UserFormBase, self).setUp()
self.user = self.user_profile = UserProfile.objects.get(id='4043307')
self.uidb64 = urlsafe_base64_encode(str(self.user.id))
self.token = default_token_generator.make_token(self.user)
class TestSetPasswordForm(UserFormBase):
def _get_reset_url(self):
return "/en-US/firefox/users/pwreset/%s/%s" % (self.uidb64, self.token)
def test_url_fail(self):
r = self.client.get('/users/pwreset/junk/', follow=True)
eq_(r.status_code, 404)
r = self.client.get('/en-US/firefox/users/pwreset/%s/12-345' %
self.uidb64)
self.assertContains(r, "Password reset unsuccessful")
def test_set_fail(self):
url = self._get_reset_url()
r = self.client.post(url, {'new_password1': '', 'new_password2': ''})
self.assertFormError(r, 'form', 'new_password1',
"This field is required.")
self.assertFormError(r, 'form', 'new_password2',
"This field is required.")
r = self.client.post(url, {'new_password1': 'onelonger',
'new_password2': 'twolonger'})
self.assertFormError(r, 'form', 'new_password2',
"The two password fields didn't match.")
def test_set_blacklisted(self):
BlacklistedPassword.objects.create(password='password')
url = self._get_reset_url()
r = self.client.post(url, {'new_password1': 'password',
'new_password2': 'password'})
self.assertFormError(r, 'form', 'new_password1',
'That password is not allowed.')
def test_set_short(self):
url = self._get_reset_url()
r = self.client.post(url, {'new_password1': 'short',
'new_password2': 'short'})
self.assertFormError(r, 'form', 'new_password1',
'Must be 8 characters or more.')
def test_set_success(self):
url = self._get_reset_url()
assert self.user_profile.check_password('testlonger') is False
self.client.post(url, {'new_password1': 'testlonger',
'new_password2': 'testlonger'})
self.user_profile = UserProfile.objects.get(id='4043307')
assert self.user_profile.check_password('testlonger')
eq_(self.user_profile.userlog_set
.filter(activity_log__action=amo.LOG.CHANGE_PASSWORD.id)
.count(), 1)
class TestPasswordResetForm(UserFormBase):
def test_request_with_unkown_email(self):
r = self.client.post(
reverse('password_reset_form'),
{'email': 'someemail@somedomain.com'}
)
eq_(len(mail.outbox), 0)
self.assertRedirects(r, reverse('password_reset_done'))
def test_request_success(self):
self.client.post(
reverse('password_reset_form'),
{'email': self.user.email}
)
eq_(len(mail.outbox), 1)
assert mail.outbox[0].subject.find('Password reset') == 0
assert mail.outbox[0].body.find('pwreset/%s' % self.uidb64) > 0
def test_request_success_getpersona_password(self):
"""Email is sent even if the user has no password and the profile has
an "unusable" password according to django's AbstractBaseUser."""
bytes_ = '\xb1\x98og\x88\x87\x08q'
md5 = hashlib.md5('password').hexdigest()
hsh = hashlib.sha512(bytes_ + md5).hexdigest()
self.user.password = 'sha512+MD5$%s$%s' % (bytes, hsh)
self.user.save()
self.client.post(
reverse('password_reset_form'),
{'email': self.user.email}
)
eq_(len(mail.outbox), 1)
assert mail.outbox[0].subject.find('Password reset') == 0
assert mail.outbox[0].body.find('pwreset/%s' % self.uidb64) > 0
def test_required_attrs(self):
res = self.client.get(reverse('password_reset_form'))
email_input = pq(res.content.decode('utf-8'))('#id_email')
eq_(email_input.attr('required'), 'required')
eq_(email_input.attr('aria-required'), 'true')
class TestUserDeleteForm(UserFormBase):
def test_bad_password(self):
self.client.login(username='jbalogh@mozilla.com', password='password')
data = {'password': 'wrongpassword', 'confirm': True, }
r = self.client.post('/en-US/firefox/users/delete', data)
msg = "Wrong password entered!"
self.assertFormError(r, 'form', 'password', msg)
def test_not_confirmed(self):
self.client.login(username='jbalogh@mozilla.com', password='password')
data = {'password': 'password'}
r = self.client.post('/en-US/firefox/users/delete', data)
self.assertFormError(r, 'form', 'confirm', 'This field is required.')
def test_success(self):
self.client.login(username='jbalogh@mozilla.com', password='password')
data = {'password': 'password', 'confirm': True, }
self.client.post('/en-US/firefox/users/delete', data, follow=True)
# TODO XXX: Bug 593055
#self.assertContains(r, "Profile Deleted")
u = UserProfile.objects.get(id=4043307)
eq_(u.deleted, True)
eq_(u.email, None)
@patch('users.models.UserProfile.is_developer')
def test_developer_attempt(self, f):
"""A developer's attempt to delete one's self must be thwarted."""
f.return_value = True
self.client.login(username='jbalogh@mozilla.com', password='password')
data = {'password': 'password', 'confirm': True, }
r = self.client.post('/en-US/firefox/users/delete', data, follow=True)
self.assertContains(r, 'You cannot delete your account')
class TestUserEditForm(UserFormBase):
def setUp(self):
super(TestUserEditForm, self).setUp()
self.client.login(username='jbalogh@mozilla.com', password='password')
self.url = reverse('users.edit')
def test_no_names(self):
data = {'username': '',
'email': 'jbalogh@mozilla.com', }
r = self.client.post(self.url, data)
self.assertFormError(r, 'form', 'username', 'This field is required.')
def test_no_real_name(self):
data = {'username': 'blah',
'email': 'jbalogh@mozilla.com',
'lang': 'en-US'}
r = self.client.post(self.url, data, follow=True)
self.assertContains(r, 'Profile Updated')
def test_set_wrong_password(self):
data = {'email': 'jbalogh@mozilla.com',
'oldpassword': 'wrong',
'password': 'new',
'password2': 'new', }
r = self.client.post(self.url, data)
self.assertFormError(r, 'form', 'oldpassword',
'Wrong password entered!')
def test_set_unmatched_passwords(self):
data = {'email': 'jbalogh@mozilla.com',
'oldpassword': 'password',
'password': 'longer123',
'password2': 'longer1234', }
r = self.client.post(self.url, data)
self.assertFormError(r, 'form', 'password2',
'The passwords did not match.')
def test_set_new_passwords(self):
data = {'username': 'jbalogh',
'email': 'jbalogh@mozilla.com',
'oldpassword': 'password',
'password': 'longer123',
'password2': 'longer123',
'lang': 'en-US'}
r = self.client.post(self.url, data, follow=True)
self.assertContains(r, 'Profile Updated')
def test_long_data(self):
data = {'username': 'jbalogh',
'email': 'jbalogh@mozilla.com',
'oldpassword': 'password',
'password': 'new',
'password2': 'new',
'lang': 'en-US'}
for field, length in (('username', 50), ('display_name', 50),
('location', 100), ('occupation', 100)):
data[field] = 'x' * (length + 1)
r = self.client.post(self.url, data, follow=True)
err = u'Ensure this value has at most %s characters (it has %s).'
self.assertFormError(r, 'form', field, err % (length, length + 1))
@patch('amo.models.ModelBase.update')
def test_photo_modified(self, update_mock):
dummy = Mock()
dummy.user = self.user
data = {'username': self.user_profile.username,
'email': self.user_profile.email,
'lang': 'en-US'}
files = {'photo': get_uploaded_file('transparent.png')}
form = UserEditForm(data, files=files, instance=self.user_profile,
request=dummy)
assert form.is_valid()
form.save()
assert update_mock.called
def test_lang_initial(self):
"""If no lang is set on the user, initial value is current locale."""
# Lang is already set: don't change it.
res = self.client.get(self.url)
form = res.context['form']
eq_(form.initial['lang'], 'en-US')
with self.activate('fr'):
res = self.client.get(reverse('users.edit'))
form = res.context['form']
eq_(form.initial['lang'], 'en-US')
# Lang isn't set yet: initial value is set to the current locale.
user = UserProfile.objects.get(email='jbalogh@mozilla.com')
user.lang = None
user.save()
res = self.client.get(self.url)
form = res.context['form']
eq_(form.initial['lang'], 'en-US')
with self.activate('fr'):
res = self.client.get(reverse('users.edit'))
form = res.context['form']
eq_(form.initial['lang'], 'fr')
def test_required_attrs(self):
res = self.client.get(self.url)
username_input = pq(res.content.decode('utf-8'))('#id_username')
eq_(username_input.attr('required'), 'required')
eq_(username_input.attr('aria-required'), 'true')
def test_existing_email(self):
data = {'email': 'testo@example.com'}
r = self.client.post(self.url, data)
self.assertFormError(r, 'form', 'email',
[u'User profile with this Email already exists.'])
class TestAdminUserEditForm(UserFormBase):
fixtures = ['base/users']
def setUp(self):
super(TestAdminUserEditForm, self).setUp()
self.client.login(username='admin@mozilla.com', password='password')
self.url = reverse('users.admin_edit', args=[self.user.id])
def test_delete_link(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
eq_(pq(r.content)('a.delete').attr('href'),
reverse('admin:users_userprofile_delete', args=[self.user.id]))
class TestUserLoginForm(UserFormBase):
def _get_login_url(self):
return "/en-US/firefox/users/login"
def test_credential_fail(self):
r = self.client.post(self._get_login_url(),
{'username': '', 'password': ''})
self.assertFormError(r, 'form', 'username', "This field is required.")
self.assertFormError(r, 'form', 'password', "This field is required.")
def test_credential_fail_wrong_password(self):
r = self.client.post(self._get_login_url(),
{'username': 'jbalogh@mozilla.com',
'password': 'wrongpassword'})
self.assertFormError(r, 'form', '', ("Please enter a correct username "
"and password. Note that both "
"fields may be case-sensitive."))
def test_credential_fail_short_password(self):
r = self.client.post(self._get_login_url(),
{'username': 'jbalogh@mozilla.com',
'password': 'shortpw'})
error_msg = (u'As part of our new password policy, your password must '
u'be 8 characters or more. Please update your password '
u'by <a href="/en-US/firefox/users/pwreset">issuing a '
u'password reset</a>.')
self.assertFormError(r, 'form', 'password', error_msg)
def test_credential_success(self):
user = UserProfile.objects.get(email='jbalogh@mozilla.com')
url = self._get_login_url()
r = self.client.post(url, {'username': user.email,
'password': 'password'}, follow=True)
eq_(pq(r.content.decode('utf-8'))('.account .user').text(),
user.display_name)
eq_(pq(r.content)('.account .user').attr('title'), user.email)
r = self.client.post(url, {'username': user.email,
'password': 'password',
'rememberme': 1}, follow=True)
eq_(pq(r.content.decode('utf-8'))('.account .user').text(),
user.display_name)
eq_(pq(r.content)('.account .user').attr('title'), user.email)
# Subtract 100 to give some breathing room
age = settings.SESSION_COOKIE_AGE - 100
assert self.client.session.get_expiry_age() > age
def test_redirect_after_login(self):
url = urlparams(self._get_login_url(), to="/en-US/firefox/about")
r = self.client.post(url, {'username': 'jbalogh@mozilla.com',
'password': 'password'}, follow=True)
self.assertRedirects(r, '/en-US/about')
# Test a valid domain. Note that assertRedirects doesn't work on
# external domains
url = urlparams(self._get_login_url(), to="/addon/new",
domain="builder")
r = self.client.post(url, {'username': 'jbalogh@mozilla.com',
'password': 'password'}, follow=True)
to, code = r.redirect_chain[0]
self.assertEqual(to, 'https://builder.addons.mozilla.org/addon/new')
self.assertEqual(code, 302)
def test_redirect_after_login_evil(self):
url = urlparams(self._get_login_url(), to='http://foo.com')
r = self.client.post(url, {'username': 'jbalogh@mozilla.com',
'password': 'password'}, follow=True)
self.assertRedirects(r, '/en-US/firefox/')
def test_redirect_after_login_domain(self):
url = urlparams(self._get_login_url(), to='/en-US/firefox',
domain='http://evil.com')
r = self.client.post(url, {'username': 'jbalogh@mozilla.com',
'password': 'password'}, follow=True)
self.assertRedirects(r, '/en-US/firefox/')
def test_unconfirmed_account(self):
url = self._get_login_url()
self.user_profile.confirmationcode = 'blah'
self.user_profile.save()
r = self.client.post(url, {'username': 'jbalogh@mozilla.com',
'password': 'password'}, follow=True)
self.assertNotContains(r, "Welcome, Jeff")
self.assertContains(r, "A link to activate your user account")
self.assertContains(r, "If you did not receive the confirmation")
def test_yes_register(self):
res = self.client.get(self._get_login_url())
self.assertContains(res, 'Create an Add-ons Account')
def test_required_attrs(self):
res = self.client.get(self._get_login_url())
username_input = pq(res.content.decode('utf-8'))('#id_username')
eq_(username_input.attr('required'), 'required')
eq_(username_input.attr('aria-required'), 'true')
def test_disabled_account(self):
url = self._get_login_url()
self.user_profile.deleted = True
self.user_profile.save()
r = self.client.post(url, {'username': 'jbalogh@mozilla.com',
'password': 'password'}, follow=True)
self.assertNotContains(r, "Welcome, Jeff")
self.assertContains(r, 'Wrong email address or password')
def test_successful_login_logging(self):
t = datetime.now()
# microsecond is not saved in the db
t = datetime(t.year, t.month, t.day, t.hour, t.minute, t.second)
url = self._get_login_url()
self.client.post(url, {'username': 'jbalogh@mozilla.com',
'password': 'password'}, follow=True)
u = UserProfile.objects.get(email='jbalogh@mozilla.com')
eq_(u.failed_login_attempts, 0)
eq_(u.last_login_attempt_ip, '127.0.0.1')
eq_(u.last_login_ip, '127.0.0.1')
assert u.last_login_attempt == t or u.last_login_attempt > t
def test_failed_login_logging(self):
t = datetime.now()
# microsecond is not saved in the db
t = datetime(t.year, t.month, t.day, t.hour, t.minute, t.second)
url = self._get_login_url()
self.client.post(url, {'username': 'jbalogh@mozilla.com',
'password': 'wrongpassword'})
u = UserProfile.objects.get(email='jbalogh@mozilla.com')
eq_(u.failed_login_attempts, 4)
eq_(u.last_login_attempt_ip, '127.0.0.1')
assert u.last_login_ip != '127.0.0.1'
assert u.last_login_attempt == t or u.last_login_attempt > t
@patch.object(settings, 'RECAPTCHA_PRIVATE_KEY', 'something')
def test_recaptcha_errors_only(self):
"""Only recaptcha errors should be returned if validation fails.
We don't want any information on the username/password returned if the
captcha is incorrect.
"""
form = AuthenticationForm(data={'username': 'foo',
'password': 'barpassword',
'recaptcha': ''},
use_recaptcha=True)
form.is_valid()
assert len(form.errors) == 1
assert 'recaptcha' in form.errors
class TestUserRegisterForm(UserFormBase):
def test_no_info(self):
data = {'email': '',
'password': '',
'password2': '',
'username': '', }
r = self.client.post('/en-US/firefox/users/register', data)
msg = "This field is required."
self.assertFormError(r, 'form', 'email', msg)
self.assertFormError(r, 'form', 'username', msg)
def test_register_existing_account(self):
data = {'email': 'jbalogh@mozilla.com',
'password': 'xxxlonger',
'password2': 'xxxlonger',
'username': 'xxx', }
r = self.client.post('/en-US/firefox/users/register', data)
self.assertFormError(r, 'form', 'email',
'User profile with this Email already exists.')
eq_(len(mail.outbox), 0)
def test_set_unmatched_passwords(self):
data = {'email': 'john.connor@sky.net',
'password': 'new1longer',
'password2': 'new2longer', }
r = self.client.post('/en-US/firefox/users/register', data)
self.assertFormError(r, 'form', 'password2',
'The passwords did not match.')
eq_(len(mail.outbox), 0)
def test_invalid_username(self):
data = {'email': 'testo@example.com',
'password': 'xxxlonger',
'password2': 'xxxlonger',
'username': 'Todd/Rochelle', }
r = self.client.post('/en-US/firefox/users/register', data)
self.assertFormError(
r, 'form', 'username',
'Enter a valid username consisting of letters, numbers, '
'underscores or hyphens.')
def test_blacklisted_username(self):
data = {'email': 'testo@example.com',
'password': 'xxxlonger',
'password2': 'xxxlonger',
'username': 'IE6Fan', }
r = self.client.post('/en-US/firefox/users/register', data)
self.assertFormError(r, 'form', 'username',
'This username cannot be used.')
def test_blacklisted_display_name(self):
data = {'email': 'testo@example.com',
'password': 'xxxlonger',
'password2': 'xxxlonger',
'username': 'valid',
'display_name': 'IE6Fan', }
r = self.client.post('/en-US/firefox/users/register', data)
self.assertFormError(r, 'form', 'display_name',
'This display name cannot be used.')
def test_alldigit_username(self):
data = {'email': 'testo@example.com',
'password': 'xxxlonger',
'password2': 'xxxlonger',
'username': '8675309', }
r = self.client.post('/en-US/firefox/users/register', data)
self.assertFormError(r, 'form', 'username',
'Usernames cannot contain only digits.')
def test_blacklisted_password(self):
BlacklistedPassword.objects.create(password='password')
data = {'email': 'testo@example.com',
'password': 'password',
'password2': 'password',
'username': 'IE6Fan', }
r = self.client.post('/en-US/firefox/users/register', data)
self.assertFormError(r, 'form', 'password',
'That password is not allowed.')
def test_password_length(self):
BlacklistedPassword.objects.create(password='password')
data = {'email': 'testo@example.com',
'password': 'short',
'password2': 'short',
'username': 'IE6Fan', }
r = self.client.post('/en-US/firefox/users/register', data)
self.assertFormError(r, 'form', 'password',
'Must be 8 characters or more.')
def test_invalid_email_domain(self):
data = {'email': 'fake@mailinator.com',
'password': 'xxxlonger',
'password2': 'xxxlonger',
'username': 'trulyfake', }
r = self.client.post('/en-US/firefox/users/register', data)
self.assertFormError(r, 'form', 'email',
'Please use an email address from a different '
'provider to complete your registration.')
def test_invalid_homepage(self):
data = {'homepage': 'example.com:alert(String.fromCharCode(88,83,83)',
'email': ''}
m = 'This URL has an invalid format. '
m += 'Valid URLs look like http://example.com/my_page.'
r = self.client.post('/en-US/firefox/users/register', data)
self.assertFormError(r, 'form', 'homepage', m)
def test_already_logged_in(self):
self.client.login(username='jbalogh@mozilla.com', password='password')
r = self.client.get('/users/register', follow=True)
self.assertContains(r, "You are already logged in")
self.assertNotContains(r, '<button type="submit">Register</button>')
def good_data(self):
return {
'email': 'john.connor@sky.net',
'password': 'carebears',
'password2': 'carebears',
'username': 'BigJC',
'homepage': ''
}
@patch('captcha.fields.ReCaptchaField.clean')
def test_success(self, clean):
clean.return_value = ''
r = self.client.post('/en-US/firefox/users/register', self.good_data(),
follow=True)
self.assertContains(r, "Congratulations!")
u = UserProfile.objects.get(email='john.connor@sky.net')
assert u.confirmationcode
eq_(len(mail.outbox), 1)
assert mail.outbox[0].subject.find('Please confirm your email') == 0
assert mail.outbox[0].body.find('%s/confirm/%s' %
(u.id, u.confirmationcode)) > 0
def test_long_data(self):
data = {'username': 'jbalogh',
'email': 'jbalogh@mozilla.com',
'oldpassword': 'password',
'password': 'new',
'password2': 'new', }
for field, length in (('username', 50), ('display_name', 50)):
data[field] = 'x' * (length + 1)
r = self.client.post(reverse('users.register'), data, follow=True)
err = u'Ensure this value has at most %s characters (it has %s).'
self.assertFormError(r, 'form', field, err % (length, length + 1))
class TestBlacklistedNameAdminAddForm(UserFormBase):
def test_no_usernames(self):
self.client.login(username='testo@example.com', password='password')
url = reverse('admin:users_blacklistedname_add')
data = {'names': "\n\n", }
r = self.client.post(url, data)
msg = 'Please enter at least one name to blacklist.'
self.assertFormError(r, 'form', 'names', msg)
def test_add(self):
self.client.login(username='testo@example.com', password='password')
url = reverse('admin:users_blacklistedname_add')
data = {'names': "IE6Fan\nfubar\n\n", }
r = self.client.post(url, data)
msg = '1 new values added to the blacklist. '
msg += '1 duplicates were ignored.'
self.assertContains(r, msg)
self.assertNotContains(r, 'fubar')
class TestBlacklistedEmailDomainAdminAddForm(UserFormBase):
def test_no_domains(self):
self.client.login(username='testo@example.com', password='password')
url = reverse('admin:users_blacklistedemaildomain_add')
data = {'domains': "\n\n", }
r = self.client.post(url, data)
msg = 'Please enter at least one e-mail domain to blacklist.'
self.assertFormError(r, 'form', 'domains', msg)
def test_add(self):
self.client.login(username='testo@example.com', password='password')
url = reverse('admin:users_blacklistedemaildomain_add')
data = {'domains': "mailinator.com\ntrash-mail.de\n\n", }
r = self.client.post(url, data)
msg = '1 new values added to the blacklist. '
msg += '1 duplicates were ignored.'
self.assertContains(r, msg)
self.assertNotContains(r, 'fubar')
| bsd-3-clause |
tchellomello/home-assistant | tests/components/braviatv/test_config_flow.py | 13 | 10401 | """Define tests for the Bravia TV config flow."""
from bravia_tv.braviarc import NoIPControl
from homeassistant import data_entry_flow
from homeassistant.components.braviatv.const import CONF_IGNORED_SOURCES, DOMAIN
from homeassistant.config_entries import SOURCE_IMPORT, SOURCE_USER
from homeassistant.const import CONF_HOST, CONF_MAC, CONF_PIN
from tests.async_mock import patch
from tests.common import MockConfigEntry
BRAVIA_SYSTEM_INFO = {
"product": "TV",
"region": "XEU",
"language": "pol",
"model": "TV-Model",
"serial": "serial_number",
"macAddr": "AA:BB:CC:DD:EE:FF",
"name": "BRAVIA",
"generation": "5.2.0",
"area": "POL",
"cid": "very_unique_string",
}
BRAVIA_SOURCE_LIST = {
"HDMI 1": "extInput:hdmi?port=1",
"HDMI 2": "extInput:hdmi?port=2",
"HDMI 3/ARC": "extInput:hdmi?port=3",
"HDMI 4": "extInput:hdmi?port=4",
"AV/Component": "extInput:component?port=1",
}
IMPORT_CONFIG_HOSTNAME = {CONF_HOST: "bravia-host", CONF_PIN: "1234"}
IMPORT_CONFIG_IP = {CONF_HOST: "10.10.10.12", CONF_PIN: "1234"}
async def test_show_form(hass):
"""Test that the form is served with no input."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == SOURCE_USER
async def test_import(hass):
"""Test that the import works."""
with patch("bravia_tv.BraviaRC.connect", return_value=True), patch(
"bravia_tv.BraviaRC.is_connected", return_value=True
), patch(
"bravia_tv.BraviaRC.get_system_info", return_value=BRAVIA_SYSTEM_INFO
), patch(
"homeassistant.components.braviatv.async_setup_entry", return_value=True
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=IMPORT_CONFIG_HOSTNAME
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == "very_unique_string"
assert result["title"] == "TV-Model"
assert result["data"] == {
CONF_HOST: "bravia-host",
CONF_PIN: "1234",
CONF_MAC: "AA:BB:CC:DD:EE:FF",
}
async def test_import_cannot_connect(hass):
"""Test that errors are shown when cannot connect to the host during import."""
with patch("bravia_tv.BraviaRC.connect", return_value=True), patch(
"bravia_tv.BraviaRC.is_connected", return_value=False
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=IMPORT_CONFIG_HOSTNAME
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
async def test_import_model_unsupported(hass):
"""Test that errors are shown when the TV is not supported during import."""
with patch("bravia_tv.BraviaRC.connect", return_value=True), patch(
"bravia_tv.BraviaRC.is_connected", return_value=True
), patch("bravia_tv.BraviaRC.get_system_info", return_value={}):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=IMPORT_CONFIG_IP
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "unsupported_model"
async def test_import_no_ip_control(hass):
"""Test that errors are shown when IP Control is disabled on the TV during import."""
with patch("bravia_tv.BraviaRC.connect", side_effect=NoIPControl("No IP Control")):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=IMPORT_CONFIG_IP
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "no_ip_control"
async def test_import_duplicate_error(hass):
"""Test that errors are shown when duplicates are added during import."""
config_entry = MockConfigEntry(
domain=DOMAIN,
unique_id="very_unique_string",
data={
CONF_HOST: "bravia-host",
CONF_PIN: "1234",
CONF_MAC: "AA:BB:CC:DD:EE:FF",
},
title="TV-Model",
)
config_entry.add_to_hass(hass)
with patch("bravia_tv.BraviaRC.connect", return_value=True), patch(
"bravia_tv.BraviaRC.is_connected", return_value=True
), patch("bravia_tv.BraviaRC.get_system_info", return_value=BRAVIA_SYSTEM_INFO):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=IMPORT_CONFIG_HOSTNAME
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_user_invalid_host(hass):
"""Test that errors are shown when the host is invalid."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data={CONF_HOST: "invalid/host"}
)
assert result["errors"] == {CONF_HOST: "invalid_host"}
async def test_authorize_cannot_connect(hass):
"""Test that errors are shown when cannot connect to host at the authorize step."""
with patch("bravia_tv.BraviaRC.connect", return_value=True):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data={CONF_HOST: "bravia-host"}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_PIN: "1234"}
)
assert result["errors"] == {"base": "cannot_connect"}
async def test_authorize_model_unsupported(hass):
"""Test that errors are shown when the TV is not supported at the authorize step."""
with patch("bravia_tv.BraviaRC.connect", return_value=True), patch(
"bravia_tv.BraviaRC.is_connected", return_value=True
), patch("bravia_tv.BraviaRC.get_system_info", return_value={}):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data={CONF_HOST: "10.10.10.12"}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_PIN: "1234"}
)
assert result["errors"] == {"base": "unsupported_model"}
async def test_authorize_no_ip_control(hass):
"""Test that errors are shown when IP Control is disabled on the TV."""
with patch("bravia_tv.BraviaRC.connect", side_effect=NoIPControl("No IP Control")):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data={CONF_HOST: "bravia-host"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "no_ip_control"
async def test_duplicate_error(hass):
"""Test that errors are shown when duplicates are added."""
config_entry = MockConfigEntry(
domain=DOMAIN,
unique_id="very_unique_string",
data={
CONF_HOST: "bravia-host",
CONF_PIN: "1234",
CONF_MAC: "AA:BB:CC:DD:EE:FF",
},
title="TV-Model",
)
config_entry.add_to_hass(hass)
with patch("bravia_tv.BraviaRC.connect", return_value=True), patch(
"bravia_tv.BraviaRC.is_connected", return_value=True
), patch("bravia_tv.BraviaRC.get_system_info", return_value=BRAVIA_SYSTEM_INFO):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data={CONF_HOST: "bravia-host"}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_PIN: "1234"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_create_entry(hass):
"""Test that the user step works."""
with patch("bravia_tv.BraviaRC.connect", return_value=True), patch(
"bravia_tv.BraviaRC.is_connected", return_value=True
), patch(
"bravia_tv.BraviaRC.get_system_info", return_value=BRAVIA_SYSTEM_INFO
), patch(
"homeassistant.components.braviatv.async_setup_entry", return_value=True
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data={CONF_HOST: "bravia-host"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "authorize"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_PIN: "1234"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == "very_unique_string"
assert result["title"] == "TV-Model"
assert result["data"] == {
CONF_HOST: "bravia-host",
CONF_PIN: "1234",
CONF_MAC: "AA:BB:CC:DD:EE:FF",
}
async def test_options_flow(hass):
"""Test config flow options."""
config_entry = MockConfigEntry(
domain=DOMAIN,
unique_id="very_unique_string",
data={
CONF_HOST: "bravia-host",
CONF_PIN: "1234",
CONF_MAC: "AA:BB:CC:DD:EE:FF",
},
title="TV-Model",
)
config_entry.add_to_hass(hass)
with patch("bravia_tv.BraviaRC.connect", return_value=True), patch(
"bravia_tv.BraviaRC.is_connected", return_value=True
), patch("bravia_tv.BraviaRC.get_system_info", return_value=BRAVIA_SYSTEM_INFO):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
with patch("bravia_tv.BraviaRC.load_source_list", return_value=BRAVIA_SOURCE_LIST):
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_IGNORED_SOURCES: ["HDMI 1", "HDMI 2"]}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options == {CONF_IGNORED_SOURCES: ["HDMI 1", "HDMI 2"]}
| apache-2.0 |
prmakerspace/led-tunnel | examples/python/stars.py | 1 | 4839 | #!/usr/bin/env python3
"""Stars lights up random LEDs
Try running with optional config name.
ex. ./stars.py blackberry
"""
__author__ = 'plong0 <plong00@gmail.com>'
import sys
import time
import random
from pprint import pprint
from LED_Tunnel import Tunnel
# fun settings - try adding your own config and playing with the options
configs = {
'basic': {
'launchDelay': 0.1,
'launchCount': 1,
'maximum': int(Tunnel.LED_COUNT*2.0/3.0),
'fadeIn': [0.2, 0.3],
'lifespan': [1.0, 5.0],
'fadeOut': [0.25, 0.5],
'colours': [
(255, 255, 0), # yellow
(255, 192, 0), # orange
(255, 255, 255) # white
]
},
'blackberry': {
'launchDelay': 0.0,
'launchCount': 1,
'maximum': int(Tunnel.LED_COUNT*0.75),
'fadeIn': [0.2, 0.3],
'lifespan': [1.0, 5.0],
'fadeOut': [0.25, 0.5],
'colours': [
(192, 0, 255), # purple
(255, 128, 255), # light purple
(255, 192, 255) # white
]
},
'rainbow-sky': {
'launchDelay': 0.0,
'launchCount': 5,
'maximum': int(Tunnel.LED_COUNT),
'fadeIn': [0.5, 1.0],
'lifespan': [5.0, 10.0],
'fadeOut': [0.75, 1.5],
'colours': [
(128, 0, 0), # dark red
(255, 0, 0), # red
(255, 165, 0), # orange
(255, 192, 0), # orange
(255, 255, 0), # yellow
(0, 128, 0), # green
(0, 255, 0), # green
(0, 0, 128), # blue
(0, 0, 255), # blue
(192, 0, 192), # purple
(192, 0, 255), # purple
(255, 255, 255), # white
]
}
};
config_name = next(iter(configs))
if len(sys.argv) > 1:
config_name = sys.argv[1]
if (config_name not in configs):
sys.exit('Invalid config name: "{}"'.format(config_name))
config = configs[config_name]
# system settings
inverse = True # if True, start at the back of the tunnel
frameDelay = 0.01 # delay between frames - controls animation speed (increase to slow down)
print('RUNNING CONFIG: "{}"'.format(config_name))
pprint(config)
activeStars = {}
lastLaunch = None
def should_launch():
global activeStars, lastLaunch
return len(activeStars) < config['maximum'] and (lastLaunch is None or time.time() - lastLaunch >= config['launchDelay'])
def do_launch():
global activeStars, lastLaunch
random.seed()
index = random.randint(0, Tunnel.LED_COUNT-1)
if index not in activeStars:
activeStars[index] = {
'state': 0,
'stateStart': time.time(),
'colourIndex': random.randint(0, len(config['colours'])-1),
'fadeIn': random.uniform(config['fadeIn'][0], config['fadeIn'][1]),
'lifespan': random.uniform(config['lifespan'][0], config['lifespan'][1]),
'fadeOut': random.uniform(config['fadeOut'][0], config['fadeOut'][1])
}
lastLaunch = time.time()
while True:
if should_launch():
for i in range(config['launchCount']):
do_launch()
pixels = [ (0,0,0) ] * Tunnel.LED_COUNT
killStars = []
for index in activeStars:
activeStar = activeStars[index]
stateTime = time.time() - activeStar['stateStart']
if activeStar['state'] == 0:
if activeStar['fadeIn']:
stateProg = stateTime / activeStar['fadeIn']
else:
stateProg = 1.0
colour1 = (0,0,0)
colour2 = config['colours'][activeStar['colourIndex']]
elif activeStar['state'] == 1:
if activeStar['lifespan']:
stateProg = stateTime / activeStar['lifespan']
else:
stateProg = 1.0
colour1 = config['colours'][activeStar['colourIndex']]
colour2 = config['colours'][activeStar['colourIndex']]
elif activeStar['state'] == 2:
if activeStar['fadeOut']:
stateProg = stateTime / activeStar['fadeOut']
else:
stateProg = 1.0
colour1 = config['colours'][activeStar['colourIndex']]
colour2 = (0,0,0)
if stateProg >= 1.0:
killStars.append(index)
if index not in killStars:
if stateProg >= 1.0:
activeStar['state'] += 1
activeStar['stateStart'] = time.time()
stateProg = 1.0
pixels[index] = ( colour1[0]+(colour2[0]-colour1[0])*stateProg, colour1[1]+(colour2[1]-colour1[1])*stateProg, colour1[2]+(colour2[2]-colour1[2])*stateProg )
# kill the expired stars
for index in killStars:
activeStars.pop(index, None)
Tunnel.Client.put_pixels(pixels)
time.sleep(frameDelay)
| mit |
V155/qutebrowser | scripts/dev/get_coredumpctl_traces.py | 5 | 5259 | #!/usr/bin/env python3
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2018 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Get qutebrowser crash information and stacktraces from coredumpctl."""
import os
import os.path
import sys
import argparse
import subprocess
import tempfile
import attr
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir,
os.pardir))
from scripts import utils
@attr.s
class Line:
"""A line in "coredumpctl list"."""
time = attr.ib()
pid = attr.ib()
uid = attr.ib()
gid = attr.ib()
sig = attr.ib()
present = attr.ib()
exe = attr.ib()
def _convert_present(data):
"""Convert " "/"*" to True/False for parse_coredumpctl_line."""
if data == '*':
return True
elif data == ' ':
return False
else:
raise ValueError(data)
def parse_coredumpctl_line(line):
"""Parse a given string coming from coredumpctl and return a Line object.
Example input:
Mon 2015-09-28 23:22:24 CEST 10606 1000 1000 11 /usr/bin/python3.4
"""
fields = {
'time': (0, 28, str),
'pid': (29, 35, int),
'uid': (36, 41, int),
'gid': (42, 47, int),
'sig': (48, 51, int),
'present': (52, 53, _convert_present),
'exe': (54, None, str),
}
data = {}
for name, (start, end, converter) in fields.items():
data[name] = converter(line[start:end])
return Line(**data)
def get_info(pid):
"""Get and parse "coredumpctl info" output for the given PID."""
data = {}
output = subprocess.run(['coredumpctl', 'info', str(pid)], check=True,
stdout=subprocess.PIPE).stdout
output = output.decode('utf-8')
for line in output.split('\n'):
if not line.strip():
continue
try:
key, value = line.split(':', maxsplit=1)
except ValueError:
# systemd stack output
continue
data[key.strip()] = value.strip()
return data
def is_qutebrowser_dump(parsed):
"""Check if the given Line is a qutebrowser dump."""
basename = os.path.basename(parsed.exe)
if basename == 'python' or basename.startswith('python3'):
info = get_info(parsed.pid)
try:
cmdline = info['Command Line']
except KeyError:
return True
else:
return '-m qutebrowser' in cmdline
else:
return basename == 'qutebrowser'
def dump_infos_gdb(parsed):
"""Dump all needed infos for the given crash using gdb."""
with tempfile.TemporaryDirectory() as tempdir:
coredump = os.path.join(tempdir, 'dump')
subprocess.run(['coredumpctl', 'dump', '-o', coredump,
str(parsed.pid)], check=True)
subprocess.run(['gdb', parsed.exe, coredump,
'-ex', 'info threads',
'-ex', 'thread apply all bt full',
'-ex', 'quit'], check=True)
def dump_infos(parsed):
"""Dump all possible infos for the given crash."""
if not parsed.present:
info = get_info(parsed.pid)
print("{}: Signal {} with no coredump: {}".format(
parsed.time, info.get('Signal', None),
info.get('Command Line', None)))
else:
print('\n\n\n')
utils.print_title('{} - {}'.format(parsed.time, parsed.pid))
sys.stdout.flush()
dump_infos_gdb(parsed)
def check_prerequisites():
"""Check if coredumpctl/gdb are installed."""
for binary in ['coredumpctl', 'gdb']:
try:
subprocess.run([binary, '--version'], check=True)
except FileNotFoundError:
print("{} is needed to run this script!".format(binary),
file=sys.stderr)
sys.exit(1)
def main():
check_prerequisites()
parser = argparse.ArgumentParser()
parser.add_argument('--all', help="Also list crashes without coredumps.",
action='store_true')
args = parser.parse_args()
coredumps = subprocess.run(['coredumpctl', 'list'], check=True,
stdout=subprocess.PIPE).stdout
lines = coredumps.decode('utf-8').split('\n')
for line in lines[1:]:
if not line.strip():
continue
parsed = parse_coredumpctl_line(line)
if not parsed.present and not args.all:
continue
if is_qutebrowser_dump(parsed):
dump_infos(parsed)
if __name__ == '__main__':
main()
| gpl-3.0 |
seap-udea/tQuakes | db/tmp2/TEMPLATE/quake-data.py | 1 | 1753 | from tquakes import *
# ##################################################
# ARGUMENTS
# ##################################################
quakeid=argv[1]
print "\tRunning ETERNA for quake '%s'..."%quakeid
# ##################################################
# CONFIGURATION
# ##################################################
conf=loadConf("configuration")
# ##################################################
# RUN ETERNA
# ##################################################
# GENERATE ETERNA.INI FILES PER COMPONENT
lquakeid=quakeid.lower()
for component in COMPONENTS:
print "\t\tRunning component %d..."%component
system("bash prd2plain.sh %s%d.prd > %s%d.plain"%(lquakeid,component,
lquakeid,component))
# GENERATE DATAFILES
print "\tGenerating plain data file..."
ic=0
for component in COMPONENTS:
fileplain="%s%d.plain"%(lquakeid,component)
try:
datacomp=numpy.loadtxt(fileplain)
except:
System("touch .fail")
exit(1)
System("rm "+fileplain)
if ic:data=numpy.column_stack((data,datacomp[:,2]))
else:data=datacomp[:,2]
ic+=1
# CREATE ADDITIONAL COLUMNS
# MAGNITUDE OF THE HORIZONTAL STRAIN
hsm=numpy.sqrt(data[:,4]**2+data[:,5]**2)
# ANGLE OF THE HORIZONTAL STRAIN (0 IS EAST, 90 NORTH, 180 WEST)
hst=numpy.arctan2(data[:,4],data[:,5])*RAD
data=numpy.column_stack((data,hsm,hst))
# CALCULATE DATE
times=[]
for i in xrange(len(datacomp[:,0])):
timestr="%d %06d"%(int(datacomp[i,0]),int(datacomp[i,1]))
timedate=datetime.datetime.strptime(timestr,"%Y%m%d %H%M%S")
timejd=date2jd(timedate)
times+=[timejd]
data=numpy.column_stack((times,data))
numpy.savetxt("%s.data"%(quakeid),data)
print "\tQuake done."
| gpl-2.0 |
tnndwc/ChinaDNS-Python | test.py | 82 | 1134 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
sys.path.insert(0, 'shadowsocks')
import os
import signal
import select
from subprocess import Popen, PIPE
with open(sys.argv[-1]) as f:
dig_cmd = f.read()
p1 = Popen(['sudo', sys.executable, 'chinadns/dnsrelay.py'], shell=False,
bufsize=0, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
p2 = None
try:
local_ready = False
server_ready = False
fdset = [p1.stdout, p1.stderr]
while True:
r, w, e = select.select(fdset, [], fdset)
if e:
break
for fd in r:
line = fd.readline()
sys.stdout.write(line)
if line.find('starting dns') >= 0:
local_ready = True
if local_ready and p2 is None:
p2 = Popen(dig_cmd.split(), shell=False, bufsize=0, close_fds=True)
break
if p2 is not None:
r = p2.wait()
if r == 0:
print 'test passed'
sys.exit(r)
finally:
for p in [p1, p2]:
try:
os.kill(p.pid, signal.SIGTERM)
except OSError:
pass
sys.exit(-1)
| mit |
CamelBackNotation/CarnotKE | jyhton/lib-python/2.7/multiprocessing/dummy/__init__.py | 59 | 4481 | #
# Support for the API of the multiprocessing package using threads
#
# multiprocessing/dummy/__init__.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__all__ = [
'Process', 'current_process', 'active_children', 'freeze_support',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'
]
#
# Imports
#
import threading
import sys
import weakref
import array
import itertools
from multiprocessing import TimeoutError, cpu_count
from multiprocessing.dummy.connection import Pipe
from threading import Lock, RLock, Semaphore, BoundedSemaphore
from threading import Event
from Queue import Queue
#
#
#
class DummyProcess(threading.Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
threading.Thread.__init__(self, group, target, name, args, kwargs)
self._pid = None
self._children = weakref.WeakKeyDictionary()
self._start_called = False
self._parent = current_process()
def start(self):
assert self._parent is current_process()
self._start_called = True
if hasattr(self._parent, '_children'):
self._parent._children[self] = None
threading.Thread.start(self)
@property
def exitcode(self):
if self._start_called and not self.is_alive():
return 0
else:
return None
#
#
#
class Condition(threading._Condition):
notify_all = threading._Condition.notify_all.im_func
#
#
#
Process = DummyProcess
current_process = threading.current_thread
current_process()._children = weakref.WeakKeyDictionary()
def active_children():
children = current_process()._children
for p in list(children):
if not p.is_alive():
children.pop(p, None)
return list(children)
def freeze_support():
pass
#
#
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = self.__dict__.items()
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return 'Namespace(%s)' % str.join(', ', temp)
dict = dict
list = list
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def _get(self):
return self._value
def _set(self, value):
self._value = value
value = property(_get, _set)
def __repr__(self):
return '<%r(%r, %r)>'%(type(self).__name__,self._typecode,self._value)
def Manager():
return sys.modules[__name__]
def shutdown():
pass
def Pool(processes=None, initializer=None, initargs=()):
from multiprocessing.pool import ThreadPool
return ThreadPool(processes, initializer, initargs)
JoinableQueue = Queue
| apache-2.0 |
google-research/nested-transformer | augment/augment_utils.py | 1 | 4137 | # coding=utf-8
# Copyright 2020 The Nested-Transformer Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific Nested-Transformer governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""Helper code which creates augmentations."""
import functools
from absl import logging
import tensorflow as tf
from augment import augment_ops
from augment.rand_augment import RandAugment
def _get_augmenter_type_and_args(**kwargs):
"""Extracts augmenter type and args from **kwargs dict."""
augment_type = kwargs['type'].lower()
augment_args = {}
for k, v in kwargs.items():
if k.startswith(augment_type + '_'):
augment_args[k[len(augment_type) + 1:]] = v
if k == 'size':
augment_args[k] = v
logging.info('Using augmentation %s with parameters %s', augment_type,
augment_args)
return augment_type, augment_args
def create_random_erasing(erase_prob):
"""Creates random erasing function."""
return functools.partial(augment_ops.random_erasing, erase_prob=erase_prob)
def create_augmenter(**kwargs):
"""Creates augmenter for supervised task based on hyperparameters dict.
Args:
**kwargs: dictionary augment_type and augmenter arguments.
Returns:
augmenter_state: class representing augmenter state or None for stateless
augmnenter
sup_augmenter: callable which performs augmentation of the data
"""
augment_type, augment_args = _get_augmenter_type_and_args(**kwargs)
if augment_type == 'randaugment':
augmenter = RandAugment(**augment_args)
return augmenter
elif augment_type == 'colorjitter':
def base_augmenter(rng, x):
# TODO(zizhaoz): Take care of rng.
del rng
return {'image': augment_ops.color_map_fn(x, **augment_args)}
return base_augmenter
else:
raise ValueError('Invalid augmentation type {0}'.format(augment_type))
def create_mix_augment(num_classes,
smoothing=0.,
mixup_alpha=0.8,
cutmix_alpha=1.0,
prob_to_apply=1.0):
"""Creates mix style augmentations."""
def augment_fn(features):
images, labels = features['image'], features['label']
assert len(images.shape) == 4, 'Input must be batched'
oh_labels = tf.cast(tf.one_hot(labels, num_classes), tf.float32)
rng = features.pop('rng')
cutmix_rng, mixup_rng, branch_rng, apply_rng = tf.unstack(
tf.random.experimental.stateless_split(rng, 4))
branch_fns = []
# Add mixup function
if mixup_alpha:
def _mixup():
return augment_ops.batch_mixup(mixup_rng, images, oh_labels,
mixup_alpha, smoothing)
branch_fns.append(_mixup)
# Add cutmix function
if cutmix_alpha:
def _cutmix():
return augment_ops.batch_cutmix(cutmix_rng, images, oh_labels,
cutmix_alpha, smoothing)
branch_fns.append(_cutmix)
branch_index = tf.random.stateless_uniform(
shape=[], seed=branch_rng, maxval=len(branch_fns), dtype=tf.int32)
aug_image, aug_labels = tf.switch_case(branch_index, branch_fns)
augmented_outputs = {'image': aug_image, 'label': aug_labels}
origin_outputs = {'image': images, 'label': oh_labels}
if prob_to_apply == 0:
return origin_outputs
elif prob_to_apply < 1.0:
return tf.cond(
tf.random.stateless_uniform(
shape=[], seed=apply_rng, dtype=tf.float32) < prob_to_apply,
lambda: augmented_outputs, lambda: origin_outputs)
else:
return augmented_outputs
return augment_fn
| apache-2.0 |
lixt/lily2-gem5 | src/python/m5/util/orderdict.py | 88 | 2718 | # Copyright (c) 2005 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
__all__ = [ 'orderdict' ]
from UserDict import DictMixin
class orderdict(dict, DictMixin):
def __init__(self, *args, **kwargs):
if len(args) > 1:
raise TypeError("expected at most one argument, got %d" % \
len(args))
self._keys = []
self.update(*args, **kwargs)
def __setitem__(self, key, item):
if key not in self:
self._keys.append(key)
super(orderdict, self).__setitem__(key, item)
def __delitem__(self, key):
super(orderdict, self).__delitem__(key)
self._keys.remove(key)
def clear(self):
super(orderdict, self).clear()
self._keys = []
def iterkeys(self):
for key in self._keys:
yield key
def itervalues(self):
for key in self._keys:
yield self[key]
def iteritems(self):
for key in self._keys:
yield key, self[key]
def keys(self):
return self._keys[:]
def values(self):
return [ self[key] for key in self._keys ]
def items(self):
return [ (self[key],key) for key in self._keys ]
| bsd-3-clause |
rdeits/director | src/python/ddapp/teleoppanel.py | 6 | 52107 | import PythonQt
from PythonQt import QtCore, QtGui, QtUiTools
import ddapp.applogic as app
import ddapp.objectmodel as om
from ddapp.timercallback import TimerCallback
from ddapp import robotstate
from ddapp import visualization as vis
from ddapp import transformUtils
from ddapp import ikplanner
from ddapp import footstepsdriver
from ddapp import vtkAll as vtk
from ddapp import drcargs
from ddapp import affordanceurdf
import ddapp.applogic as app
import functools
import math
import numpy as np
import types
def addWidgetsToDict(widgets, d):
for widget in widgets:
if widget.objectName:
d[str(widget.objectName)] = widget
addWidgetsToDict(widget.children(), d)
class WidgetDict(object):
def __init__(self, widgets):
addWidgetsToDict(widgets, self.__dict__)
def clearLayout(w):
children = w.findChildren(QtGui.QWidget)
for child in children:
child.delete()
class ConstraintItem(om.ObjectModelItem):
def __init__(self, constraint):
linkStr = '(%s)' % constraint.linkName if hasattr(constraint, 'linkName') else ''
name = '%s %s' % (type(constraint).__name__, linkStr)
om.ObjectModelItem.__init__(self, name)
self.constraint = constraint
for propertyName, propertyValue in constraint:
if isinstance(propertyValue, np.ndarray):
propertyValue = propertyValue.tolist()
if isinstance(propertyValue, vtk.vtkTransform):
propertyValue = list(propertyValue.GetPosition()) + list(propertyValue.GetOrientation())
self.addProperty(propertyName, propertyValue, attributes=om.PropertyAttributes(decimals=3, minimum=-100, maximum=100))
def _onPropertyChanged(self, propertySet, propertyName):
om.ObjectModelItem._onPropertyChanged(self, propertySet, propertyName)
self.constraint.__setattr__(propertyName, propertySet.getProperty(propertyName))
class EndEffectorTeleopPanel(object):
def __init__(self, panel):
self.panel = panel
self.ui = panel.ui
self.ui.eeTeleopButton.connect('clicked()', self.teleopButtonClicked)
self.ui.planButton.connect('clicked()', self.planClicked)
self.ui.updateIkButton.connect('clicked()', self.onUpdateIkClicked)
self.ui.baseCombo.connect('currentIndexChanged(const QString&)', self.baseComboChanged)
self.ui.backCombo.connect('currentIndexChanged(const QString&)', self.backComboChanged)
self.ui.lhandCombo.connect('currentIndexChanged(const QString&)', self.lhandComboChanged)
self.ui.rhandCombo.connect('currentIndexChanged(const QString&)', self.rhandComboChanged)
self.ui.lfootCombo.connect('currentIndexChanged(const QString&)', self.lfootComboChanged)
self.ui.rfootCombo.connect('currentIndexChanged(const QString&)', self.rfootComboChanged)
self.ui.leftFootPlanningSupportCheckbox.connect('toggled(bool)', self.leftFootPlanningSupportCheckboxChanged)
self.ui.rightFootPlanningSupportCheckbox.connect('toggled(bool)', self.rightFootPlanningSupportCheckboxChanged)
self.ui.leftHandPlanningSupportCheckbox.connect('toggled(bool)', self.leftHandPlanningSupportCheckboxChanged)
self.ui.rightHandPlanningSupportCheckbox.connect('toggled(bool)', self.rightHandPlanningSupportCheckboxChanged)
self.ui.pelvisPlanningSupportCheckbox.connect('toggled(bool)', self.pelvisPlanningSupportCheckboxChanged)
self.ui.leftFootExecutionSupportCheckbox.connect('toggled(bool)', self.leftFootExecutionSupportCheckboxChanged)
self.ui.rightFootExecutionSupportCheckbox.connect('toggled(bool)', self.rightFootExecutionSupportCheckboxChanged)
self.ui.leftHandExecutionSupportCheckbox.connect('toggled(bool)', self.leftHandExecutionSupportCheckboxChanged)
self.ui.rightHandExecutionSupportCheckbox.connect('toggled(bool)', self.rightHandExecutionSupportCheckboxChanged)
self.ui.pelvisExecutionSupportCheckbox.connect('toggled(bool)', self.pelvisExecutionSupportCheckboxChanged)
self.ui.executionSupportCheckbox.connect('toggled(bool)', self.executionSupportCheckboxChanged)
self.palmOffsetDistance = 0.0
self.palmGazeAxis = [0.0, 1.0, 0.0]
self.constraintSet = None
#self.ui.interactiveCheckbox.visible = False
#self.ui.updateIkButton.visible = False
if 'kneeJointLimits' in drcargs.getDirectorConfig():
self.kneeJointLimits = drcargs.getDirectorConfig()['kneeJointLimits']
def setComboText(self, combo, text):
index = combo.findText(text)
assert index >= 0
combo.setCurrentIndex(index)
def getComboText(self, combo):
return str(combo.currentText)
def setCheckboxState(self, checkbox, state):
assert type(state) is types.BooleanType
checkbox.checked = state
def getCheckboxState(self, checkbox):
return checkbox.checked
def getBaseConstraint(self):
return self.getComboText(self.ui.baseCombo)
def setBaseConstraint(self, value):
return self.setComboText(self.ui.baseCombo, value)
def getBackConstraint(self):
return self.getComboText(self.ui.backCombo)
def setBackConstraint(self, value):
return self.setComboText(self.ui.backCombo, value)
def getLHandConstraint(self):
return self.getComboText(self.ui.lhandCombo)
def setLHandConstraint(self, value):
return self.setComboText(self.ui.lhandCombo, value)
def getRHandConstraint(self):
return self.getComboText(self.ui.rhandCombo)
def setRHandConstraint(self, value):
return self.setComboText(self.ui.rhandCombo, value)
def getLFootConstraint(self):
return self.getComboText(self.ui.lfootCombo)
def setLFootConstraint(self, value):
return self.setComboText(self.ui.lfootCombo, value)
def getRFootConstraint(self):
return self.getComboText(self.ui.rfootCombo)
def setRFootConstraint(self, value):
return self.setComboText(self.ui.rfootCombo, value)
def getLFootPlanningSupportEnabled(self):
return self.getCheckboxState(self.ui.leftFootPlanningSupportCheckbox)
def setLFootPlanningSupportEnabled(self, value):
self.setCheckboxState(self.ui.leftFootPlanningSupportCheckbox, value)
def getRFootPlanningSupportEnabled(self):
return self.getCheckboxState(self.ui.rightFootPlanningSupportCheckbox)
def setRFootPlanningSupportEnabled(self, value):
self.setCheckboxState(self.ui.rightFootPlanningSupportCheckbox, value)
def getLHandPlanningSupportEnabled(self):
return self.getCheckboxState(self.ui.leftHandPlanningSupportCheckbox)
def setLHandPlanningSupportEnabled(self, value):
self.setCheckboxState(self.ui.leftHandPlanningSupportCheckbox, value)
def getRHandPlanningSupportEnabled(self):
return self.getCheckboxState(self.ui.rightHandPlanningSupportCheckbox)
def setRHandPlanningSupportEnabled(self, value):
self.setCheckboxState(self.ui.rightHandPlanningSupportCheckbox, value)
def getPelvisPlanningSupportEnabled(self):
return self.getCheckboxState(self.ui.pelvisPlanningSupportCheckbox)
def setPelvisPlanningSupportEnabled(self, value):
self.setCheckboxState(self.ui.pelvisPlanningSupportCheckbox, value)
def getLFootExecutionSupportEnabled(self):
return self.getCheckboxState(self.ui.leftFootExecutionSupportCheckbox)
def setLFootExecutionSupportEnabled(self, value):
self.setCheckboxState(self.ui.leftFootExecutionSupportCheckbox, value)
def getRFootExecutionSupportEnabled(self):
return self.getCheckboxState(self.ui.rightFootExecutionSupportCheckbox)
def setRFootExecutionSupportEnabled(self, value):
self.setCheckboxState(self.ui.rightFootExecutionSupportCheckbox, value)
def getLHandExecutionSupportEnabled(self):
return self.getCheckboxState(self.ui.leftHandExecutionSupportCheckbox)
def setLHandExecutionSupportEnabled(self, value):
self.setCheckboxState(self.ui.leftHandExecutionSupportCheckbox, value)
def getRHandExecutionSupportEnabled(self):
return self.getCheckboxState(self.ui.rightHandExecutionSupportCheckbox)
def setRHandExecutionSupportEnabled(self, value):
self.setCheckboxState(self.ui.rightHandExecutionSupportCheckbox, value)
def getPelvisExecutionSupportEnabled(self):
return self.getCheckboxState(self.ui.pelvisExecutionSupportCheckbox)
def setPelvisExecutionSupportEnabled(self, value):
self.setCheckboxState(self.ui.pelvisExecutionSupportCheckbox, value)
def getExecutionSupportEnabled(self):
return self.getCheckboxState(self.ui.executionSupportCheckbox)
def baseComboChanged(self):
self.updateConstraints()
def backComboChanged(self):
self.updateConstraints()
def lhandComboChanged(self):
self.updateConstraints()
def rhandComboChanged(self):
self.updateConstraints()
def lfootComboChanged(self):
self.updateConstraints()
def rfootComboChanged(self):
self.updateConstraints()
def leftFootExecutionSupportCheckboxChanged(self):
if not self.getLFootExecutionSupportEnabled():
self.setLFootPlanningSupportEnabled(False)
self.panel.manipPlanner.leftFootSupportEnabled = self.getLFootExecutionSupportEnabled()
self.updateQuasistaticFlag()
def rightFootExecutionSupportCheckboxChanged(self):
if not self.getRFootExecutionSupportEnabled():
self.setRFootPlanningSupportEnabled(False)
self.panel.manipPlanner.rightFootSupportEnabled = self.getRFootExecutionSupportEnabled()
self.updateQuasistaticFlag()
def leftHandExecutionSupportCheckboxChanged(self):
if not self.getLHandExecutionSupportEnabled():
self.setLHandPlanningSupportEnabled(False)
self.panel.manipPlanner.leftHandSupportEnabled = self.getLHandExecutionSupportEnabled()
self.updateQuasistaticFlag()
def rightHandExecutionSupportCheckboxChanged(self):
if not self.getRHandExecutionSupportEnabled():
self.setRHandPlanningSupportEnabled(False)
self.panel.manipPlanner.rightHandSupportEnabled = self.getRHandExecutionSupportEnabled()
self.updateQuasistaticFlag()
def pelvisExecutionSupportCheckboxChanged(self):
if not self.getPelvisExecutionSupportEnabled():
self.setPelvisPlanningSupportEnabled(False)
self.panel.manipPlanner.pelvisSupportEnabled = self.getPelvisExecutionSupportEnabled()
self.updateQuasistaticFlag()
def executionSupportCheckboxChanged(self):
self.updateQuasistaticFlag()
self.panel.manipPlanner.setPublishPlansWithSupports(self.getExecutionSupportEnabled())
def leftFootPlanningSupportCheckboxChanged(self):
if self.getLFootPlanningSupportEnabled():
self.setLFootExecutionSupportEnabled(True)
self.updatePlanningSupports()
self.updateConstraints()
def rightFootPlanningSupportCheckboxChanged(self):
if self.getRFootPlanningSupportEnabled():
self.setRFootExecutionSupportEnabled(True)
self.updatePlanningSupports()
self.updateConstraints()
def leftHandPlanningSupportCheckboxChanged(self):
if self.getLHandPlanningSupportEnabled():
self.setLHandExecutionSupportEnabled(True)
self.updatePlanningSupports()
self.updateConstraints()
def rightHandPlanningSupportCheckboxChanged(self):
if self.getRHandPlanningSupportEnabled():
self.setRHandExecutionSupportEnabled(True)
self.updatePlanningSupports()
self.updateConstraints()
def pelvisPlanningSupportCheckboxChanged(self):
if self.getPelvisPlanningSupportEnabled():
self.setPelvisExecutionSupportEnabled(True)
self.updatePlanningSupports()
self.updateConstraints()
def updateQuasistaticFlag(self):
lfootEnabled = self.getLFootExecutionSupportEnabled()
rfootEnabled = self.getRFootExecutionSupportEnabled()
lhandEnabled = self.getLHandExecutionSupportEnabled()
rhandEnabled = self.getRHandExecutionSupportEnabled()
pelvisEnabled = self.getPelvisExecutionSupportEnabled()
if (lhandEnabled or rhandEnabled or pelvisEnabled) or (lfootEnabled and rfootEnabled):
self.panel.manipPlanner.plansWithSupportsAreQuasistatic = True
else:
self.panel.manipPlanner.plansWithSupportsAreQuasistatic = False
def onGoalFrameModified(self, frame):
if self.constraintSet and self.ui.interactiveCheckbox.checked:
self.updateIk()
def onUpdateIkClicked(self):
self.updateIk()
def updateIk(self):
endPose, info = self.constraintSet.runIk()
self.panel.showPose(self.constraintSet.endPose)
app.displaySnoptInfo(info)
def updateCollisionEnvironment(self):
affs = self.panel.affordanceManager.getCollisionAffordances()
if not affs:
self.panel.ikPlanner.ikServer.clearEnvironment()
else:
urdfStr = affordanceurdf.urdfStringFromAffordances(affs)
self.panel.ikPlanner.ikServer.setEnvironment(urdfStr)
def planClicked(self):
if not self.ui.eeTeleopButton.checked:
return
self.updateCollisionEnvironment()
self.generatePlan()
def generatePlan(self):
self.updateConstraints()
if not self.ui.interactiveCheckbox.checked:
self.updateIk()
# todo- need an option here
goalMode = ikplanner.getIkOptions().getProperty('Goal planning mode')
if goalMode == 1 or ikplanner.getIkOptions().getProperty('Use collision'):
plan = self.constraintSet.runIkTraj()
else:
plan = self.constraintSet.planEndPoseGoal()
self.panel.showPlan(plan)
def teleopButtonClicked(self):
if self.ui.eeTeleopButton.checked:
self.activate()
else:
self.deactivate()
def activate(self):
self.ui.eeTeleopButton.blockSignals(True)
self.ui.eeTeleopButton.checked = True
self.ui.eeTeleopButton.blockSignals(False)
self.panel.endEffectorTeleopActivated()
self.createGoalFrames()
self.updateConstraints()
def deactivate(self):
self.ui.eeTeleopButton.blockSignals(True)
self.ui.eeTeleopButton.checked = False
self.ui.eeTeleopButton.blockSignals(False)
self.removePlanFolder()
self.panel.endEffectorTeleopDeactivated()
@staticmethod
def getGoalFrame(linkName):
return om.findObjectByName('%s constraint frame' % linkName)
def updateGoalFrame(self, linkName, transform):
goalFrame = self.getGoalFrame(linkName)
if not goalFrame:
return
goalFrame.copyFrame(transform)
return goalFrame
def updatePlanningSupports(self):
self.panel.ikPlanner.leftFootSupportEnabled = self.getLFootPlanningSupportEnabled()
self.panel.ikPlanner.rightFootSupportEnabled = self.getRFootPlanningSupportEnabled()
self.panel.ikPlanner.leftHandSupportEnabled = self.getLHandPlanningSupportEnabled()
self.panel.ikPlanner.rightHandSupportEnabled = self.getRHandPlanningSupportEnabled()
self.panel.ikPlanner.pelvisSupportEnabled = self.getPelvisPlanningSupportEnabled()
def updateConstraints(self):
if not self.ui.eeTeleopButton.checked:
return
self.updatePlanningSupports()
ikPlanner = self.panel.ikPlanner
startPoseName = 'reach_start'
startPose = np.array(self.panel.robotStateJointController.q)
ikPlanner.addPose(startPose, startPoseName)
if (ikPlanner.fixedBaseArm==False):
constraints = []
constraints.append(ikPlanner.createQuasiStaticConstraint())
constraints.append(ikPlanner.createLockedNeckPostureConstraint(startPoseName))
if self.getLFootConstraint() == 'fixed':
constraints.append(ikPlanner.createFixedLinkConstraints(startPoseName, ikPlanner.leftFootLink, tspan=[0.0, 1.0], lowerBound=-0.0001*np.ones(3), upperBound=0.0001*np.ones(3), angleToleranceInDegrees=0.1))
elif self.getLFootConstraint() == 'constrained':
constraints.extend(ikPlanner.createSixDofLinkConstraints(startPoseName, ikPlanner.leftFootLink, tspan=[1.0, 1.0]))
elif self.getLFootConstraint() == 'sliding':
constraints.extend(ikPlanner.createSlidingFootConstraints(startPoseName)[:2])
if self.getRFootConstraint() == 'fixed':
constraints.append(ikPlanner.createFixedLinkConstraints(startPoseName, ikPlanner.rightFootLink, tspan=[0.0, 1.0], lowerBound=-0.0001*np.ones(3), upperBound=0.0001*np.ones(3), angleToleranceInDegrees=0.1))
elif self.getRFootConstraint() == 'constrained':
constraints.extend(ikPlanner.createSixDofLinkConstraints(startPoseName, ikPlanner.rightFootLink, tspan=[1.0, 1.0]))
elif self.getRFootConstraint() == 'sliding':
constraints.extend(ikPlanner.createSlidingFootConstraints(startPoseName)[2:])
if self.getBackConstraint() == 'fixed':
constraints.append(ikPlanner.createLockedBackPostureConstraint(startPoseName))
ikPlanner.setBackLocked(True)
elif self.getBackConstraint() == 'limited':
constraints.append(ikPlanner.createMovingBackLimitedPostureConstraint())
ikPlanner.setBackLocked(False)
elif self.getBackConstraint() == 'free':
constraints.append(ikPlanner.createMovingBackPostureConstraint())
ikPlanner.setBackLocked(False)
if self.getBaseConstraint() == 'fixed':
constraints.append(ikPlanner.createLockedBasePostureConstraint(startPoseName, lockLegs=False))
ikPlanner.setBaseLocked(True)
if self.getBaseConstraint() == 'constrained':
constraints.extend(ikPlanner.createSixDofLinkConstraints(startPoseName, ikPlanner.pelvisLink, tspan=[1.0, 1.0]))
ikPlanner.setBaseLocked(False)
elif self.getBaseConstraint() == 'xyz only':
constraints.append(ikPlanner.createXYZMovingBasePostureConstraint(startPoseName))
constraints.append(ikPlanner.createKneePostureConstraint(self.kneeJointLimits))
ikPlanner.setBaseLocked(False)
elif self.getBaseConstraint() == 'z only':
constraints.append(ikPlanner.createZMovingBasePostureConstraint(startPoseName))
constraints.append(ikPlanner.createKneePostureConstraint(self.kneeJointLimits))
ikPlanner.setBaseLocked(False)
elif self.getBaseConstraint() == 'limited':
constraints.append(ikPlanner.createMovingBaseSafeLimitsConstraint())
constraints.append(ikPlanner.createKneePostureConstraint(self.kneeJointLimits))
ikPlanner.setBaseLocked(False)
elif self.getBaseConstraint() == 'free':
constraints.append(ikPlanner.createKneePostureConstraint(self.kneeJointLimits))
ikPlanner.setBaseLocked(False)
# Remove all except the fixed base constraint if you only have an arm:
else:
constraints = []
constraints.append(ikPlanner.createLockedBasePostureConstraint(startPoseName, lockLegs=False))
if ikPlanner.robotNoFeet == True:
constraints = []
constraints.append(ikPlanner.createLockedBasePostureConstraint(startPoseName))
if self.getBackConstraint() == 'fixed':
constraints.append(ikPlanner.createLockedBackPostureConstraint(startPoseName))
ikPlanner.setBackLocked(True)
elif self.getBackConstraint() == 'limited':
constraints.append(ikPlanner.createMovingBackLimitedPostureConstraint())
ikPlanner.setBackLocked(False)
elif self.getBackConstraint() == 'free':
constraints.append(ikPlanner.createMovingBackPostureConstraint())
ikPlanner.setBackLocked(False)
for handModel in ikPlanner.handModels:
side = handModel.side
if (side == "left"):
thisHandConstraint = self.getLHandConstraint()
elif (side == "right"):
thisHandConstraint = self.getRHandConstraint()
linkName = ikPlanner.getHandLink(side)
graspToHand = ikPlanner.newPalmOffsetGraspToHandFrame(side, self.palmOffsetDistance)
graspToWorld = self.getGoalFrame(linkName)
p, q = ikPlanner.createPositionOrientationGraspConstraints(side, graspToWorld, graspToHand)
g = ikPlanner.createGazeGraspConstraint(side, graspToWorld, graspToHand, targetAxis=list(self.palmGazeAxis), bodyAxis=list(self.palmGazeAxis))
p.tspan = [1.0, 1.0]
q.tspan = [1.0, 1.0]
g.tspan = [1.0, 1.0]
if thisHandConstraint == 'arm fixed':
if (side == "left"):
constraints.append(ikPlanner.createLockedLeftArmPostureConstraint(startPoseName))
elif (side == "right"):
constraints.append(ikPlanner.createLockedRightArmPostureConstraint(startPoseName))
ikPlanner.setArmLocked(side,True)
elif thisHandConstraint == 'ee fixed':
constraints.extend([p, q])
ikPlanner.setArmLocked(side,False)
elif thisHandConstraint == 'position':
constraints.extend([p])
ikPlanner.setArmLocked(side,False)
elif thisHandConstraint == 'gaze':
constraints.extend([p, g])
ikPlanner.setArmLocked(side,False)
elif thisHandConstraint == 'orbit':
graspToHand = ikPlanner.newPalmOffsetGraspToHandFrame(side, distance=0.07)
constraints.extend(ikPlanner.createGraspOrbitConstraints(side, graspToWorld, graspToHand))
constraints[-3].tspan = [1.0, 1.0]
if ikPlanner.defaultIkParameters.useCollision:
constraints[-2].tspan = [0.5, 1.0]
constraints[-1].tspan = [0.5, 1.0]
else:
constraints[-2].tspan = [1.0, 1.0]
constraints[-1].tspan = [1.0, 1.0]
ikPlanner.setArmLocked(side,False)
elif thisHandConstraint == 'free':
ikPlanner.setArmLocked(side,False)
if hasattr(self,'reachSide'):
if self.reachSide == 'left':
endEffectorName = ikPlanner.handModels[0].handLinkName # 'l_hand'
else:
endEffectorName = ikPlanner.handModels[1].handLinkName # 'r_hand'
constraints.append(ikPlanner.createActiveEndEffectorConstraint(endEffectorName,ikPlanner.getPalmPoint(self.reachSide)))
self.constraintSet = ikplanner.ConstraintSet(ikPlanner, constraints, 'reach_end', startPoseName)
handLinks = []
for handModel in ikPlanner.handModels: handLinks.append(handModel.handLinkName)
for constraint in constraints:
if hasattr(constraint, 'linkName') and constraint.linkName in handLinks:
continue
if isinstance(constraint, ikplanner.ik.PositionConstraint):
frameObj = self.getGoalFrame(constraint.linkName)
if frameObj:
constraint.referenceFrame = frameObj.transform
elif isinstance(constraint, ikplanner.ik.QuatConstraint):
frameObj = self.getGoalFrame(constraint.linkName)
if frameObj:
constraint.quaternion = frameObj.transform
elif isinstance(constraint, ikplanner.ik.WorldGazeDirConstraint):
frameObj = self.getGoalFrame(constraint.linkName)
if frameObj:
constraint.targetFrame = frameObj.transform
self.onGoalFrameModified(None)
om.removeFromObjectModel(self.getConstraintFolder())
folder = self.getConstraintFolder()
for i, pc in enumerate(constraints):
constraintItem = ConstraintItem(pc)
om.addToObjectModel(constraintItem, parentObj=folder)
def addHandMesh(self, handModel, goalFrame):
handObj = handModel.newPolyData('reach goal left hand', self.panel.teleopRobotModel.views[0], parent=goalFrame)
handFrame = handObj.children()[0]
handFrame.copyFrame(goalFrame.transform)
frameSync = vis.FrameSync()
frameSync.addFrame(goalFrame)
frameSync.addFrame(handFrame)
goalFrame.sync = frameSync
@staticmethod
def removePlanFolder():
om.removeFromObjectModel(om.findObjectByName('teleop plan'))
@staticmethod
def getConstraintFrameFolder():
return om.getOrCreateContainer('constraint frames', parentObj=om.getOrCreateContainer('teleop plan', parentObj=om.findObjectByName('planning')))
@staticmethod
def getConstraintFolder():
return om.getOrCreateContainer('ik constraints', parentObj=om.getOrCreateContainer('teleop plan', parentObj=om.findObjectByName('planning')))
def createGoalFrames(self):
ikPlanner = self.panel.ikPlanner
startPose = np.array(self.panel.robotStateJointController.q)
self.removePlanFolder()
folder = self.getConstraintFrameFolder()
for handModel in ikPlanner.handModels:
side = handModel.side
linkName = ikPlanner.getHandLink(side)
frameName = '%s constraint frame' % linkName
graspToHand = ikPlanner.newPalmOffsetGraspToHandFrame(side, self.palmOffsetDistance)
graspToWorld = ikPlanner.newGraspToWorldFrame(startPose, side, graspToHand)
om.removeFromObjectModel(om.findObjectByName(frameName))
frame = vis.showFrame(graspToWorld, frameName, parent=folder, scale=0.2)
#frame.setProperty('Edit', True)
frame.connectFrameModified(self.onGoalFrameModified)
#addHandMesh(handModels[side], frame)
if not ikPlanner.fixedBaseArm and not ikPlanner.robotNoFeet:
for linkName in [ikPlanner.leftFootLink, ikPlanner.rightFootLink, ikPlanner.pelvisLink]:
frameName = linkName + ' constraint frame'
om.removeFromObjectModel(om.findObjectByName(frameName))
frame = vis.showFrame(ikPlanner.getLinkFrameAtPose(linkName, startPose), frameName, parent=folder, scale=0.2)
frame.connectFrameModified(self.onGoalFrameModified)
def newReachTeleop(self, frame, side, reachTargetObject=None):
'''
reachTarget is the object we are reaching to. For some types of plans
this object may be treated in a special way, for example, when doing
planning with collision avoidance.
'''
self.deactivate()
self.panel.jointTeleop.deactivate()
self.setBaseConstraint('xyz only')
self.setBackConstraint('limited')
self.setLFootConstraint('fixed')
self.setRFootConstraint('fixed')
self.setLHandConstraint('arm fixed')
self.setRHandConstraint('arm fixed')
if side == 'left':
if self.panel.ikPlanner.defaultIkParameters.useCollision:
self.setLHandConstraint('ee fixed')
else:
self.setLHandConstraint('ee fixed')
elif side == 'right':
if self.panel.ikPlanner.defaultIkParameters.useCollision:
self.setRHandConstraint('ee fixed')
else:
self.setRHandConstraint('ee fixed')
self.reachTargetObject = reachTargetObject
self.reachSide = side
self.activate()
return self.updateGoalFrame(self.panel.ikPlanner.getHandLink(side), frame)
class PosturePlanShortcuts(object):
def __init__(self, jointController, ikPlanner, widget=None):
self.jointController = jointController
self.ikPlanner = ikPlanner
widget = widget or app.getMainWindow()
app.addShortcut(widget, 'Ctrl+Shift+S', self.planStand)
app.addShortcut(widget, 'Ctrl+Shift+N', self.planNominal)
app.addShortcut(widget, 'Ctrl+Shift+L', functools.partial(self.planPreGrasp, 'left'))
app.addShortcut(widget, 'Ctrl+Shift+R', functools.partial(self.planPreGrasp, 'right'))
def planStand(self):
self.ikPlanner.computeStandPlan(self.jointController.q)
def planNominal(self):
self.ikPlanner.computeNominalPlan(self.jointController.q)
def planPreGrasp(self, side):
startPose = self.jointController.q
endPose = self.ikPlanner.getMergedPostureFromDatabase(startPose, 'General', 'arm up pregrasp', side=side)
self.ikPlanner.computePostureGoal(startPose, endPose)
class JointLimitChecker(object):
def __init__(self, robotModel, sensorJointController):
self.robotModel = robotModel
self.sensorJointController = sensorJointController
self.jointLimitsMin = np.array([self.robotModel.model.getJointLimits(jointName)[0] for jointName in robotstate.getDrakePoseJointNames()])
self.jointLimitsMax = np.array([self.robotModel.model.getJointLimits(jointName)[1] for jointName in robotstate.getDrakePoseJointNames()])
self.joints = robotstate.matchJoints('^(?!base_)') # all but base joints
self.inflationAmount = np.radians(0.3)
self.timer = TimerCallback(targetFps=1)
self.timer.callback = self.update
self.warningButton = None
self.action = None
def update(self):
limitData = self.checkJointLimits()
if limitData:
self.notifyUserStatusBar(limitData)
else:
self.clearStatusBarWarning()
def start(self):
self.action.checked = True
self.timer.start()
def stop(self):
self.action.checked = False
self.timer.stop()
def setupMenuAction(self):
self.action = app.addMenuAction('Tools', 'Joint Limit Checker')
self.action.setCheckable(True)
self.action.checked = self.timer.isActive()
self.action.connect('triggered()', self.onActionChanged)
def onActionChanged(self):
if self.action.checked:
self.start()
else:
self.stop()
def clearStatusBarWarning(self):
if self.warningButton:
self.warningButton.deleteLater()
self.warningButton = None
def notifyUserStatusBar(self, limitData):
if self.warningButton:
return
def showDialog():
limitData = self.checkJointLimits()
if limitData:
self.notifyUserDialog(limitData)
self.clearStatusBarWarning()
self.warningButton = QtGui.QPushButton('Joint Limit Warning')
self.warningButton.setStyleSheet("background-color:red")
self.warningButton.connect('clicked()', showDialog)
app.getMainWindow().statusBar().insertPermanentWidget(0, self.warningButton)
def notifyUserDialog(self, limitData):
message = '\n'.join(['%s by %.2f degrees' % (name, np.degrees(epsilon)) for name, epsilon in limitData])
message = 'The following joints have been detected to exceed joint limts specified by the model:\n\n' + message + '\n\n'
message += 'Would to like to update the joint limits used by the planning robot model? If you select no '\
'then the joint limit checker will be disabled (use the Tools menu to re-enable).'
choice = QtGui.QMessageBox.warning(app.getMainWindow(), 'Joint Limit Exceeded', message,
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
QtGui.QMessageBox.Yes)
if choice == QtGui.QMessageBox.No:
self.stop()
else:
# inflate the epsilon
limitData = [(jointName, epsilon+np.sign(epsilon)*self.inflationAmount) for jointName, epsilon in limitData]
# update limits on server
panel.ikPlanner.ikServer.updateJointLimits(limitData)
# update limits on checker
for jointName, epsilon in limitData:
limitsArray = self.jointLimitsMin if epsilon < 0 else self.jointLimitsMax
limitsArray[self.toJointIndex(jointName)] += epsilon
def checkJointLimits(self):
limitData = []
for jointName in self.joints:
jointIndex = self.toJointIndex(jointName)
jointPosition = self.sensorJointController.q[jointIndex]
jointMin, jointMax = self.jointLimitsMin[jointIndex], self.jointLimitsMax[jointIndex]
if not (jointMin <= jointPosition <= jointMax):
epsilon = jointPosition - np.clip(jointPosition, jointMin, jointMax)
#print 'detected joint outside limit:', jointName, ' by %.3f degrees' % np.degrees(epsilon)
limitData.append((jointName, epsilon))
return limitData
def toJointIndex(self, jointName):
return robotstate.getDrakePoseJointNames().index(jointName)
class GeneralEndEffectorTeleopPanel(object):
def __init__(self, ikPlanner, teleopPanel, robotStateModel, robotStateJointController):
self.ikPlanner = ikPlanner
self.teleopPanel = teleopPanel
self.robotStateModel = robotStateModel
self.robotStateJointController = robotStateJointController
self.widget = QtGui.QWidget()
l = QtGui.QVBoxLayout(self.widget)
h = QtGui.QHBoxLayout()
l.addLayout(h)
h.addWidget(QtGui.QLabel('End effector:'))
self.endEffectorCombo = QtGui.QComboBox()
h.addWidget(self.endEffectorCombo)
def addButton(name, func):
b = QtGui.QPushButton(name)
b.connect('clicked()', func)
l.addWidget(b)
addButton('start ik', self.startIk)
addButton('end ik', self.endIk)
addButton('plan', self.planIk)
config = drcargs.getDirectorConfig()['endEffectorConfig']
self.endEffectorLinkNames = config['endEffectorLinkNames']
self.graspOffsetFrame = transformUtils.frameFromPositionAndRPY(config['graspOffsetFrame'][0], np.degrees(config['graspOffsetFrame'][1]))
self.fixedJoints = config['fixedJoints']
for linkName in self.endEffectorLinkNames:
self.endEffectorCombo.addItem(linkName)
def planIk(self):
startPoseName = 'reach_start'
endPoseName = 'reach_end'
startPose = np.array(self.robotStateJointController.q)
self.ikPlanner.addPose(startPose, startPoseName)
plan = self.constraintSet.runIkTraj()
self.teleopPanel.showPlan(plan)
def endIk(self):
self.teleopPanel.hideTeleopModel()
EndEffectorTeleopPanel.removePlanFolder()
def startIk(self, reachGoal=None):
EndEffectorTeleopPanel.removePlanFolder()
ikPlanner = self.ikPlanner
startPoseName = 'reach_start'
endPoseName = 'reach_end'
startPose = np.array(self.robotStateJointController.q)
ikPlanner.addPose(startPose, startPoseName)
endEffectorLinkName = str(self.endEffectorCombo.currentText)
if reachGoal is None:
endEffectorLinkFrame = self.robotStateModel.getLinkFrame(endEffectorLinkName)
assert endEffectorLinkFrame is not None
graspToWorld = vtk.vtkTransform()
graspToWorld.PostMultiply()
graspToWorld.Concatenate(self.graspOffsetFrame)
graspToWorld.Concatenate(endEffectorLinkFrame)
reachGoal = graspToWorld
om.removeFromObjectModel('reach goal')
goalFrame = vis.showFrame(reachGoal, 'reach goal', scale=0.1, parent=EndEffectorTeleopPanel.getConstraintFrameFolder())
goalFrame.setProperty('Edit', True)
constraints = []
for pattern in self.fixedJoints:
constraints.append(ikPlanner.createPostureConstraint(startPoseName, robotstate.matchJoints(pattern)))
constraints.extend(ikPlanner.createPositionOrientationConstraint(endEffectorLinkName, goalFrame, self.graspOffsetFrame, positionTolerance=0.0, angleToleranceInDegrees=0.0))
constraints[-1].tspan = [1.0, 1.0]
constraints[-2].tspan = [1.0, 1.0]
self.constraintSet = ikplanner.ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
def onGoalFrameModified(frame):
endPose, info = self.constraintSet.runIk()
self.teleopPanel.showPose(self.constraintSet.endPose)
app.displaySnoptInfo(info)
goalFrame.connectFrameModified(onGoalFrameModified)
onGoalFrameModified()
folder = EndEffectorTeleopPanel.getConstraintFolder()
for i, constraint in enumerate(constraints):
constraintItem = ConstraintItem(constraint)
om.addToObjectModel(constraintItem, parentObj=folder)
class JointTeleopPanel(object):
def __init__(self, panel, jointGroups=None):
self.panel = panel
self.ui = panel.ui
self.ui.jointTeleopButton.connect('clicked()', self.teleopButtonClicked)
self.ui.resetJointsButton.connect('clicked()', self.resetButtonClicked)
self.ui.planButton.connect('clicked()', self.planClicked)
self.timerCallback = TimerCallback()
self.timerCallback.callback = self.onTimerCallback
self.jointLimitsMin = np.array([self.panel.teleopRobotModel.model.getJointLimits(jointName)[0] for jointName in robotstate.getDrakePoseJointNames()])
self.jointLimitsMax = np.array([self.panel.teleopRobotModel.model.getJointLimits(jointName)[1] for jointName in robotstate.getDrakePoseJointNames()])
# this need to be generalized
if 'baseZJointLimits' in drcargs.getDirectorConfig():
baseZLimits = drcargs.getDirectorConfig()['baseZJointLimits']
else: # TODO generalise so the base sliders are deactivated
baseZLimits = [-0.1, 0.1]
self.jointLimitsMin[0:6] = [-0.25, -0.25, baseZLimits[0], -math.radians(20), -math.radians(20), -math.radians(20)]
self.jointLimitsMax[0:6] = [ 0.25 , 0.25, baseZLimits[1], math.radians(20), math.radians(20), math.radians(20)]
if jointGroups is None:
# Add only these joint groups:
telopJointGroupNames = ['Back', 'Base', 'Left Arm', 'Right Arm', 'Neck']
allJointGroups = drcargs.getDirectorConfig()['teleopJointGroups']
jointGroups = []
for jointGroup in allJointGroups:
if jointGroup['name'] in telopJointGroupNames:
jointGroups.append( jointGroup )
self.jointGroups = jointGroups
self.buildTabWidget(jointGroups)
self.startPose = None
self.endPose = None
self.userJoints = {}
self.updateWidgetState()
def buildTabWidget(self, jointGroups):
self.slidersMap = {}
self.labelMap = {}
for group in jointGroups:
groupName = group['name']
joints = group['joints']
labels = group['labels']
if len(labels) != len(joints):
print 'error, joints/labels mismatch for joint group:', name
continue
jointGroupWidget = QtGui.QWidget()
gridLayout = QtGui.QGridLayout(jointGroupWidget)
gridLayout.setColumnStretch(0, 1)
for jointName, labelText in zip(joints, labels):
label = QtGui.QLabel(labelText)
numericLabel = QtGui.QLabel('0.0')
slider = QtGui.QSlider(QtCore.Qt.Vertical)
column = gridLayout.columnCount()
gridLayout.addWidget(label, 0, column)
gridLayout.addWidget(slider, 1, column)
gridLayout.addWidget(numericLabel, 2, column)
self.slidersMap[jointName] = slider
self.labelMap[slider] = numericLabel
gridLayout.setColumnStretch(gridLayout.columnCount(), 1)
self.ui.tabWidget.addTab(jointGroupWidget, groupName)
self.signalMapper = QtCore.QSignalMapper()
self.sliderMax = 1000.0
for jointName, slider in self.slidersMap.iteritems():
slider.connect('valueChanged(int)', self.signalMapper, 'map()')
self.signalMapper.setMapping(slider, jointName)
slider.setMaximum(self.sliderMax)
self.signalMapper.connect('mapped(const QString&)', self.sliderChanged)
def planClicked(self):
if not self.ui.jointTeleopButton.checked:
return
self.computeEndPose()
self.generatePlan()
def generatePlan(self):
hasBase = False
for jointIndex, jointValue in self.userJoints.iteritems():
if self.toJointName(jointIndex).startswith('base_'):
hasBase = True
plan = self.panel.ikPlanner.computePostureGoal(self.startPose, self.endPose, feetOnGround=hasBase)
self.panel.showPlan(plan)
def teleopButtonClicked(self):
if self.ui.jointTeleopButton.checked:
self.activate()
else:
self.deactivate()
def activate(self):
self.timerCallback.stop()
self.panel.jointTeleopActivated()
self.resetPose()
self.updateWidgetState()
def deactivate(self):
self.ui.jointTeleopButton.blockSignals(True)
self.ui.jointTeleopButton.checked = False
self.ui.jointTeleopButton.blockSignals(False)
self.timerCallback.stop()
self.panel.jointTeleopDeactivated()
self.updateWidgetState()
def updateWidgetState(self):
enabled = self.ui.jointTeleopButton.checked
for slider in self.slidersMap.values():
slider.setEnabled(enabled)
self.ui.resetJointsButton.setEnabled(enabled)
if not enabled:
self.timerCallback.start()
def resetButtonClicked(self):
self.resetPose()
self.panel.showPose(self.endPose)
def resetPose(self):
self.userJoints = {}
self.computeEndPose()
self.updateSliders()
def onTimerCallback(self):
if not self.ui.tabWidget.visible:
return
self.resetPose()
def toJointIndex(self, jointName):
return robotstate.getDrakePoseJointNames().index(jointName)
def toJointName(self, jointIndex):
return robotstate.getDrakePoseJointNames()[jointIndex]
def toJointValue(self, jointIndex, sliderValue):
assert 0.0 <= sliderValue <= 1.0
jointRange = self.jointLimitsMin[jointIndex], self.jointLimitsMax[jointIndex]
return jointRange[0] + (jointRange[1] - jointRange[0])*sliderValue
def toSliderValue(self, jointIndex, jointValue):
jointRange = self.jointLimitsMin[jointIndex], self.jointLimitsMax[jointIndex]
#if jointValue < jointRange[0] or jointValue > jointRange[1]:
# print 'warning: joint %s value %f is out of expected range [%f, %f]' % (self.toJointName(jointIndex), jointValue, jointRange[0], jointRange[1])
return (jointValue - jointRange[0]) / (jointRange[1] - jointRange[0])
def getSlider(self, joint):
jointName = self.toJointName(joint) if isinstance(joint, int) else joint
return self.slidersMap[jointName]
def computeBaseJointOffsets(self):
baseReferenceFrame = footstepsdriver.FootstepsDriver.getFeetMidPoint(self.panel.ikPlanner.getRobotModelAtPose(self.startPose))
baseReferenceWorldPos = np.array(baseReferenceFrame.GetPosition())
baseReferenceWorldYaw = math.radians(baseReferenceFrame.GetOrientation()[2])
self.baseJointOffsets = {
'base_x' : baseReferenceWorldPos[0],
'base_y' : baseReferenceWorldPos[1],
'base_z' : baseReferenceWorldPos[2],
'base_yaw' : baseReferenceWorldYaw,
}
def computeEndPose(self):
self.startPose = np.array(self.panel.robotStateJointController.q)
self.endPose = self.startPose.copy()
hasBase = False
for jointIndex, jointValue in self.userJoints.iteritems():
jointName = self.toJointName(jointIndex)
self.endPose[jointIndex] = jointValue
if jointName.startswith('base_'):
hasBase = True
if hasBase:
ikPlanner = self.panel.ikPlanner
startPoseName = 'posture_goal_start'
ikPlanner.addPose(self.startPose, startPoseName)
endPoseName = 'posture_goal_end'
ikPlanner.addPose(self.endPose, endPoseName)
jointNamesAll = self.slidersMap.keys()
# remove leg joints
jointNames = []
for name in jointNamesAll:
if not 'leg' in name:
jointNames.append(name)
# uncomment to constraint only joints adjusted by user
#jointNames = [self.toJointName(jointIndex) for jointIndex in sorted(self.userJoints.keys())]
p = ikPlanner.createPostureConstraint(endPoseName, jointNames)
constraints = [p]
constraints.extend(ikPlanner.createFixedFootConstraints(startPoseName))
constraints.append(ikPlanner.createQuasiStaticConstraint())
self.endPose, info = ikPlanner.ikServer.runIk(constraints, ikPlanner.defaultIkParameters, nominalPostureName=startPoseName, seedPostureName='q_end')
app.displaySnoptInfo(info)
def getJointValue(self, jointIndex):
return self.endPose[jointIndex]
def sliderChanged(self, jointName):
slider = self.slidersMap[jointName]
jointIndex = self.toJointIndex(jointName)
jointValue = self.toJointValue(jointIndex, slider.value / float(self.sliderMax))
self.userJoints[jointIndex] = jointValue
if jointName.startswith('base_'):
self.computeBaseJointOffsets()
self.userJoints[jointIndex] += self.baseJointOffsets.get(jointName, 0.0)
self.computeEndPose()
self.panel.showPose(self.endPose)
self.updateLabel(jointName, jointValue)
def updateLabel(self, jointName, jointValue):
slider = self.slidersMap[jointName]
label = self.labelMap[slider]
if jointName in ['base_x', 'base_y', 'base_z']:
label.text = str('%.3f' % jointValue).center(5, ' ')
else:
label.text = str('%.1f' % math.degrees(jointValue)).center(5, ' ')
def updateSliders(self):
baseJointOffsets = None
for jointName, slider in self.slidersMap.iteritems():
jointIndex = self.toJointIndex(jointName)
jointValue = self.getJointValue(jointIndex)
if (self.panel.ikPlanner.fixedBaseArm==False):
if jointName.startswith('base_'):
if baseJointOffsets is None:
baseJointOffsets = self.computeBaseJointOffsets()
jointValue -= self.baseJointOffsets.get(jointName, 0.0)
slider.blockSignals(True)
slider.setValue(self.toSliderValue(jointIndex, jointValue)*self.sliderMax)
slider.blockSignals(False)
self.updateLabel(jointName, jointValue)
class TeleopPanel(object):
def __init__(self, robotStateModel, robotStateJointController, teleopRobotModel, teleopJointController, ikPlanner, manipPlanner, affordanceManager, showPlanFunction, hidePlanFunction):
self.robotStateModel = robotStateModel
self.robotStateJointController = robotStateJointController
self.teleopRobotModel = teleopRobotModel
self.teleopJointController = teleopJointController
self.ikPlanner = ikPlanner
self.manipPlanner = manipPlanner
self.affordanceManager = affordanceManager
self.showPlanFunction = showPlanFunction
self.hidePlanFunction = hidePlanFunction
manipPlanner.connectPlanCommitted(self.onPlanCommitted)
loader = QtUiTools.QUiLoader()
uifile = QtCore.QFile(':/ui/ddTeleopPanel.ui')
assert uifile.open(uifile.ReadOnly)
self.widget = loader.load(uifile)
uifile.close()
self.ui = WidgetDict(self.widget.children())
self.ui.postureDatabaseButton.connect('clicked()', self.onPostureDatabaseClicked)
self.endEffectorTeleop = EndEffectorTeleopPanel(self)
self.jointTeleop = JointTeleopPanel(self)
if 'endEffectorConfig' in drcargs.getDirectorConfig():
self.ui.endEffectorTeleopFrame.setVisible(False)
self.generalEndEffectorTeleopPanel = GeneralEndEffectorTeleopPanel(ikPlanner, self, robotStateModel, robotStateJointController)
self.widget.layout().addWidget(self.generalEndEffectorTeleopPanel.widget, 0, 0, 1, 2)
PythonQt.dd.ddGroupBoxHider(self.ui.paramsContainer)
def onPostureDatabaseClicked(self):
ikplanner.RobotPoseGUIWrapper.initCaptureMethods(self.robotStateJointController, self.teleopJointController)
ikplanner.RobotPoseGUIWrapper.show()
def disableJointTeleop(self):
self.ui.jointTeleopFrame.setEnabled(False)
def disableEndEffectorTeleop(self):
self.ui.endEffectorTeleopFrame.setEnabled(False)
def jointTeleopActivated(self):
self.disableEndEffectorTeleop()
def endEffectorTeleopActivated(self):
self.disableJointTeleop()
def endEffectorTeleopDeactivated(self):
self.hideTeleopModel()
self.enablePanels()
def jointTeleopDeactivated(self):
self.hideTeleopModel()
self.enablePanels()
def enablePanels(self):
self.ui.endEffectorTeleopFrame.setEnabled(True)
self.ui.jointTeleopFrame.setEnabled(True)
def onPlanCommitted(self, plan):
self.hideTeleopModel()
def hideTeleopModel(self):
self.teleopRobotModel.setProperty('Visible', False)
self.robotStateModel.setProperty('Visible', True)
self.robotStateModel.setProperty('Alpha', 1.0)
def showTeleopModel(self):
self.teleopRobotModel.setProperty('Visible', True)
self.robotStateModel.setProperty('Visible', True)
self.robotStateModel.setProperty('Alpha', 0.1)
def showPose(self, pose):
self.teleopJointController.setPose('teleop_pose', pose)
self.hidePlanFunction()
self.showTeleopModel()
def showPlan(self, plan):
self.hideTeleopModel()
self.showPlanFunction(plan)
def extendJointLimitsForTesting(teleopPanel, jointLimitChecker):
# add +/- 3 degrees to joint teleop sliders
jointTeleop = teleopPanel.jointTeleop
extra = np.zeros(len(jointTeleop.jointLimitsMin))
extra += np.deg2rad(3.0)
jointTeleop.jointLimitsMin -= extra
jointTeleop.jointLimitsMax += extra
# add +/- 4 degrees to planner joint limits
limitDataMin = [(name, -np.deg2rad(4.0)) for name in jointLimitChecker.joints]
limitDataMax = [(name, np.deg2rad(4.0)) for name in jointLimitChecker.joints]
teleopPanel.ikPlanner.ikServer.updateJointLimits(limitDataMin)
teleopPanel.ikPlanner.ikServer.updateJointLimits(limitDataMax)
def _getAction():
return app.getToolBarActions()['ActionTeleopPanel']
def init(robotStateModel, robotStateJointController, teleopRobotModel, teleopJointController, debrisPlanner, manipPlanner, affordanceManager, showPlanFunction, hidePlanFunction):
global panel
global dock
panel = TeleopPanel(robotStateModel, robotStateJointController, teleopRobotModel, teleopJointController, debrisPlanner, manipPlanner, affordanceManager, showPlanFunction, hidePlanFunction)
dock = app.addWidgetToDock(panel.widget, action=_getAction())
dock.hide()
return panel
| bsd-3-clause |
srimai/odoo | addons/stock/wizard/stock_return_picking.py | 218 | 10043 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class stock_return_picking_line(osv.osv_memory):
_name = "stock.return.picking.line"
_rec_name = 'product_id'
_columns = {
'product_id': fields.many2one('product.product', string="Product", required=True),
'quantity': fields.float("Quantity", digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'wizard_id': fields.many2one('stock.return.picking', string="Wizard"),
'move_id': fields.many2one('stock.move', "Move"),
'lot_id': fields.many2one('stock.production.lot', 'Serial Number', help="Used to choose the lot/serial number of the product returned"),
}
class stock_return_picking(osv.osv_memory):
_name = 'stock.return.picking'
_description = 'Return Picking'
_columns = {
'product_return_moves': fields.one2many('stock.return.picking.line', 'wizard_id', 'Moves'),
'move_dest_exists': fields.boolean('Chained Move Exists', readonly=True, help="Technical field used to hide help tooltip if not needed"),
}
def default_get(self, cr, uid, fields, context=None):
"""
To get default values for the object.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param fields: List of fields for which we want default values
@param context: A standard dictionary
@return: A dictionary with default values for all field in ``fields``
"""
result1 = []
if context is None:
context = {}
if context and context.get('active_ids', False):
if len(context.get('active_ids')) > 1:
raise osv.except_osv(_('Warning!'), _("You may only return one picking at a time!"))
res = super(stock_return_picking, self).default_get(cr, uid, fields, context=context)
record_id = context and context.get('active_id', False) or False
uom_obj = self.pool.get('product.uom')
pick_obj = self.pool.get('stock.picking')
pick = pick_obj.browse(cr, uid, record_id, context=context)
quant_obj = self.pool.get("stock.quant")
chained_move_exist = False
if pick:
if pick.state != 'done':
raise osv.except_osv(_('Warning!'), _("You may only return pickings that are Done!"))
for move in pick.move_lines:
if move.move_dest_id:
chained_move_exist = True
#Sum the quants in that location that can be returned (they should have been moved by the moves that were included in the returned picking)
qty = 0
quant_search = quant_obj.search(cr, uid, [('history_ids', 'in', move.id), ('qty', '>', 0.0), ('location_id', 'child_of', move.location_dest_id.id)], context=context)
for quant in quant_obj.browse(cr, uid, quant_search, context=context):
if not quant.reservation_id or quant.reservation_id.origin_returned_move_id.id != move.id:
qty += quant.qty
qty = uom_obj._compute_qty(cr, uid, move.product_id.uom_id.id, qty, move.product_uom.id)
result1.append({'product_id': move.product_id.id, 'quantity': qty, 'move_id': move.id})
if len(result1) == 0:
raise osv.except_osv(_('Warning!'), _("No products to return (only lines in Done state and not fully returned yet can be returned)!"))
if 'product_return_moves' in fields:
res.update({'product_return_moves': result1})
if 'move_dest_exists' in fields:
res.update({'move_dest_exists': chained_move_exist})
return res
def _create_returns(self, cr, uid, ids, context=None):
if context is None:
context = {}
record_id = context and context.get('active_id', False) or False
move_obj = self.pool.get('stock.move')
pick_obj = self.pool.get('stock.picking')
uom_obj = self.pool.get('product.uom')
data_obj = self.pool.get('stock.return.picking.line')
pick = pick_obj.browse(cr, uid, record_id, context=context)
data = self.read(cr, uid, ids[0], context=context)
returned_lines = 0
# Cancel assignment of existing chained assigned moves
moves_to_unreserve = []
for move in pick.move_lines:
to_check_moves = [move.move_dest_id] if move.move_dest_id.id else []
while to_check_moves:
current_move = to_check_moves.pop()
if current_move.state not in ('done', 'cancel') and current_move.reserved_quant_ids:
moves_to_unreserve.append(current_move.id)
split_move_ids = move_obj.search(cr, uid, [('split_from', '=', current_move.id)], context=context)
if split_move_ids:
to_check_moves += move_obj.browse(cr, uid, split_move_ids, context=context)
if moves_to_unreserve:
move_obj.do_unreserve(cr, uid, moves_to_unreserve, context=context)
#break the link between moves in order to be able to fix them later if needed
move_obj.write(cr, uid, moves_to_unreserve, {'move_orig_ids': False}, context=context)
#Create new picking for returned products
pick_type_id = pick.picking_type_id.return_picking_type_id and pick.picking_type_id.return_picking_type_id.id or pick.picking_type_id.id
new_picking = pick_obj.copy(cr, uid, pick.id, {
'move_lines': [],
'picking_type_id': pick_type_id,
'state': 'draft',
'origin': pick.name,
}, context=context)
for data_get in data_obj.browse(cr, uid, data['product_return_moves'], context=context):
move = data_get.move_id
if not move:
raise osv.except_osv(_('Warning !'), _("You have manually created product lines, please delete them to proceed"))
new_qty = data_get.quantity
if new_qty:
# The return of a return should be linked with the original's destination move if it was not cancelled
if move.origin_returned_move_id.move_dest_id.id and move.origin_returned_move_id.move_dest_id.state != 'cancel':
move_dest_id = move.origin_returned_move_id.move_dest_id.id
else:
move_dest_id = False
returned_lines += 1
move_obj.copy(cr, uid, move.id, {
'product_id': data_get.product_id.id,
'product_uom_qty': new_qty,
'product_uos_qty': new_qty * move.product_uos_qty / move.product_uom_qty,
'picking_id': new_picking,
'state': 'draft',
'location_id': move.location_dest_id.id,
'location_dest_id': move.location_id.id,
'picking_type_id': pick_type_id,
'warehouse_id': pick.picking_type_id.warehouse_id.id,
'origin_returned_move_id': move.id,
'procure_method': 'make_to_stock',
'restrict_lot_id': data_get.lot_id.id,
'move_dest_id': move_dest_id,
})
if not returned_lines:
raise osv.except_osv(_('Warning!'), _("Please specify at least one non-zero quantity."))
pick_obj.action_confirm(cr, uid, [new_picking], context=context)
pick_obj.action_assign(cr, uid, [new_picking], context)
return new_picking, pick_type_id
def create_returns(self, cr, uid, ids, context=None):
"""
Creates return picking.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of ids selected
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
new_picking_id, pick_type_id = self._create_returns(cr, uid, ids, context=context)
# Override the context to disable all the potential filters that could have been set previously
ctx = {
'search_default_picking_type_id': pick_type_id,
'search_default_draft': False,
'search_default_assigned': False,
'search_default_confirmed': False,
'search_default_ready': False,
'search_default_late': False,
'search_default_available': False,
}
return {
'domain': "[('id', 'in', [" + str(new_picking_id) + "])]",
'name': _('Returned Picking'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'stock.picking',
'type': 'ir.actions.act_window',
'context': ctx,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
lcnodc/codes | 01_variaveis_tipos_de_dados/variables_data_types.py | 1 | 1073 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Variáveis e Tipos de Dados: Notas de Estudo.
Variáveis são utilizadas para armazenar todos os dados manipulados
por um sistema. Uma linguagem de programação pode oferecer suporte a
inúmeros tipos de dados.
"""
"""Declarando uma variável do tipo string."""
my_var = " hello "
""" Um dado do tipo string oferece alguns métodos que permitem alterar
seu conteúdo, concatenar com outras string, formatá-lo para impressão,
etc.
"""
print(my_var)
"""title(): retorna a string com a primeira maiúscula."""
print(my_var.title())
"""Concatenando strings."""
my_var += "world "
print(my_var.title())
"""strip(): elimina espaços na string."""
print(my_var.title().strip())
""" Tipos númericos.
Inteiros e Floats, são tipos númericos que podem ser utilizados para
as mais diversas operações matemáticas.
"""
print("Soma: 1 + 1 = ", 1 + 1)
print("Subtração: 1 - 1 = ", 1 - 1)
print("Multiplicação: 2 * 2 = ", 2 * 2)
print("Divisão: 2.4 / 2 = ", 2.4 / 2)
print("Exponenciação: 2 ** 3 = ", 2 ** 3)
| mit |
kviktor/djangosaml2-py3 | djangosaml2/cache.py | 42 | 3438 | # Copyright (C) 2011-2012 Yaco Sistemas (http://www.yaco.es)
# Copyright (C) 2010 Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from saml2.cache import Cache
from saml2.ident import code, decode
class DjangoSessionCacheAdapter(dict):
"""A cache of things that are stored in the Django Session"""
key_prefix = '_saml2'
def __init__(self, django_session, key_suffix):
self.session = django_session
self.key = self.key_prefix + key_suffix
super(DjangoSessionCacheAdapter, self).__init__(self._get_objects())
def _get_objects(self):
return self.session.get(self.key, {})
def _set_objects(self, objects):
self.session[self.key] = objects
def sync(self):
objs = {}
objs.update(self)
self._set_objects(objs)
class OutstandingQueriesCache(object):
"""Handles the queries that have been sent to the IdP and have not
been replied yet.
"""
def __init__(self, django_session):
self._db = DjangoSessionCacheAdapter(django_session,
'_outstanding_queries')
def outstanding_queries(self):
return self._db._get_objects()
def set(self, saml2_session_id, came_from):
self._db[saml2_session_id] = came_from
self._db.sync()
def delete(self, saml2_session_id):
if saml2_session_id in self._db:
del self._db[saml2_session_id]
self._db.sync()
class IdentityCache(Cache):
"""Handles information about the users that have been succesfully
logged in.
This information is useful because when the user logs out we must
know where does he come from in order to notify such IdP/AA.
The current implementation stores this information in the Django session.
"""
def __init__(self, django_session):
self._db = DjangoSessionCacheAdapter(django_session, '_identities')
self._sync = True
def get(self, name_id, entity_id, *args, **kwargs):
info = super(IdentityCache, self).get(name_id, entity_id, *args, **kwargs)
try:
name_id = info['name_id']
except KeyError:
pass
else:
info = dict(info)
info['name_id'] = decode(name_id)
return info
def set(self, name_id, entity_id, info, *args, **kwargs):
try:
name_id = info['name_id']
except KeyError:
pass
else:
info = dict(info)
info['name_id'] = code(name_id)
return super(IdentityCache, self).set(name_id, entity_id, info, *args, **kwargs)
class StateCache(DjangoSessionCacheAdapter):
"""Store state information that is needed to associate a logout
request with its response.
"""
def __init__(self, django_session):
super(StateCache, self).__init__(django_session, '_state')
| apache-2.0 |
sbg2133/miscellaneous_projects | lic/lic.py | 1 | 1085 | from subprocess import call
import sys
import numpy as np
import matplotlib.pyplot as plt
from magnetic_dipole import dipole
plt.ion()
plt.figure(figsize = (10.24, 7.68), dpi = 100)
xsize, ysize = int(sys.argv[1]), int(sys.argv[2])
xmax, ymax = 200, 200
X = np.linspace(0, xmax, xsize)
Y = np.linspace(0, ymax, ysize)
x, y = np.meshgrid(X,Y)
### magnetic dipole ###
dx, dy = dipole(m=[5., 5.], r=np.meshgrid(X,Y), r0=[xmax/2. + 0.1, ymax/2. + 0.3]).astype('float32')
vectors = np.array([dx,dy])
white = np.random.rand(xsize, ysize)
with file('texture.dat', 'w') as outfile:
for row in white:
np.savetxt(outfile, row, newline = " ")
outfile.write('\n')
with file('dx.dat', 'w') as outfile:
for row in dx:
np.savetxt(outfile, row, newline = " ")
outfile.write('\n')
with file('dy.dat', 'w') as outfile:
for row in dy:
np.savetxt(outfile, row, newline = " ")
outfile.write('\n')
command = ["./lic", str(xsize), str(ysize)]
call(command)
lic = np.loadtxt("./lic.dat")
plt.imshow(lic, cmap = "viridis", interpolation = "sinc")
plt.tight_layout()
| gpl-3.0 |
meisamhe/GPLshared | Research_Projects_UTD/DataScientistCompetitionCode/clickthroughratepredictionkagglecompetitioncodes/fast_solution_ActualFeatureTestTrainWithoutNoise.py | 2 | 15208 | '''
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
Version 2, December 2004
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
Everyone is permitted to copy and distribute verbatim or modified
copies of this license document, and changing it is allowed as long
as the name is changed.
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO.
'''
from datetime import datetime
from csv import DictReader
from math import exp, log, sqrt
# TL; DR, the main training process starts on line: 250,
# you may want to start reading the code from there
##############################################################################
# parameters #################################################################
##############################################################################
# A, paths
train ="C:\\Users\\mxh109420\\Desktop\\KaggleCometition\\train\\train.csv"
test = "C:\\Users\\mxh109420\\Desktop\\KaggleCometition\\test\\test.csv"
submission = "C:\\Users\\mxh109420\\Desktop\\KaggleCometition\\submission1234ActualFeatureRemovedNoise.csv" # path of to be outputted submission file
#train = 'train_rev2' # path to training file
#test = 'test_rev2' # path to testing file
#submission = 'submission1234.csv' # path of to be outputted submission file
# B, model
alpha = .1 # learning rate
beta = 1. # smoothing parameter for adaptive learning rate
L1 = 1. # L1 regularization, larger value means more regularized
L2 = 1. # L2 regularization, larger value means more regularized
# C, feature/hash trick
D = 2 ** 12 # number of weights to use
interaction = False # whether to enable poly2 feature interactions
# D, training/validation
epoch = 1 # learn training data for N passes
holdafter = 9 # data after date N (exclusive) are used as validation
holdout = None # use every N training instance for holdout validation
##############################################################################
# class, function, generator definitions #####################################
##############################################################################
class ftrl_proximal(object):
''' Our main algorithm: Follow the regularized leader - proximal
In short,
this is an adaptive-learning-rate sparse logistic-regression with
efficient L1-L2-regularization
Reference:
http://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf
'''
def __init__(self, alpha, beta, L1, L2, D, interaction):
# parameters
self.alpha = alpha
self.beta = beta
self.L1 = L1
self.L2 = L2
# feature related parameters
self.D = D
self.interaction = interaction
# model
# n: squared sum of past gradients
# z: weights
# w: lazy weights
self.n = [0.] * D
self.z = [0.] * D
self.w = {}
def _indices(self, x):
''' A helper generator that yields the indices in x
The purpose of this generator is to make the following
code a bit cleaner when doing feature interaction.
'''
# first yield index of the bias term
yield 0
# then yield the normal indices
for index in x:
yield index
# now yield interactions (if applicable)
if self.interaction:
D = self.D
L = len(x)
x = sorted(x)
for i in xrange(L):
for j in xrange(i+1, L):
# one-hot encode interactions with hash trick
yield abs(hash(str(x[i]) + '_' + str(x[j]))) % D
def predict(self, x):
''' Get probability estimation on x
INPUT:
x: features
OUTPUT:
probability of p(y = 1 | x; w)
'''
# parameters
alpha = self.alpha
beta = self.beta
L1 = self.L1
L2 = self.L2
# model
n = self.n
z = self.z
w = [0]*D
# wTx is the inner product of w and x
wTx = 0.
for i in range(1,D,1):
if x[i]!=0:
sign = -1. if z[i] < 0 else 1. # get sign of z[i]
#print(i)
# build w on the fly using z and n, hence the name - lazy weights
# we are doing this at prediction instead of update time is because
# this allows us for not storing the complete w
if sign * z[i] <= L1:
# w[i] vanishes due to L1 regularization
w[i] = 0.
else:
# apply prediction time L1, L2 regularization to z and get w
w[i] = (sign * L1 - z[i]) / ((beta + sqrt(n[i])) / alpha + L2)
wTx += w[i]*x[i]
# cache the current w for update stage
self.w = w
#print('wTx is:')
#print(wTx)
# bounded sigmoid function, this is the probability estimation
return 1. / (1. + exp(-max(min(wTx, 35.), -35.)))
def update(self, x, p, y):
''' Update model using x, p, y
INPUT:
x: feature, a list of indices
p: click probability prediction of our model
y: answer
MODIFIES:
self.n: increase by squared gradient
self.z: weights
'''
alpha = self.alpha # parameter
n = self.n # model
z = self.z
w = self.w
#print('x is:')
#print(len(x))
#print('n is:')
#print(len(n))
#print('z is:')
#print(len(z))
#print('w is:')
#print(len(w))
# update z and n
sumXi = 0
for i in range(1,D,1):
if (x[i]!=0):
#self._indices(x):
#print(i)
# gradient under logloss (meisam and Ali update)
#print(p-y)
sumXi = sumXi + x[i]
g = (p - y)*x[i]
#print('g is:')
#print(g)
sigma = (sqrt(n[i] + g * g) - sqrt(n[i])) / alpha
z[i] += g - sigma * w[i]
n[i] += g * g
#print('sum of xi is:')
#print(sumXi)
def logloss(p, y):
''' FUNCTION: Bounded logloss
INPUT:
p: our prediction
y: real answer
OUTPUT:
logarithmic loss of p given y
'''
p = max(min(p, 1. - 10e-15), 10e-15)
return -log(p) if y == 1. else -log(1. - p)
def data(path, D):
''' GENERATOR: Apply hash-trick to the original csv row
and for simplicity, we one-hot-encode everything
INPUT:
path: path to training or testing file
D: the max index that we can hash to
YIELDS:
ID: id of the instance, mainly useless
x: a list of hashed and one-hot-encoded 'indices'
we only need the index since all values are either 0 or 1
y: y = 1 if we have a click, else we have y = 0
'''
for t, row in enumerate(DictReader(open(path))):
ID = row['id'] # process id
del row['id']
# process clicks
y = 0.
if 'click' in row:
if row['click'] == '1':
y = 1.
del row['click']
# extract date
date = int(row['hour'][4:6])
# turn hour really into hour, it was originally YYMMDDHH
row['hour'] = row['hour'][6:]
# build x Meisam and Ali update
x = [0] * D
for key in row:
#print('key is')
#print(key)
value = row[key]# one-hot encode everything with hash trick
keyvalue = key+value
if keyvalue in dictionaryFeature:
index = dictionaryFeature[key+value] # now the index is an integer number
#index = abs(hash(key + '_' + value)) % D
#print('index is:')
#print(index)
x[index] = x[index] + 1 #x.append(index)
#print('xvector is:')
#print(sum(x))
yield t, date, ID, x, y
##############################################################################
# Function to create the feature vector #############################################################
##############################################################################
##############################################################################
# class, function, generator definitions #####################################
##############################################################################
def dataFeatureCreation(path, D,basicStat):
''' GENERATOR: Apply hash-trick to the original csv row
and for simplicity, we one-hot-encode everything
INPUT:
path: path to training or testing file
D: the max index that we can hash to
YIELDS:
ID: id of the instance, mainly useless
x: a list of hashed and one-hot-encoded 'indices'
we only need the index since all values are either 0 or 1
y: y = 1 if we have a click, else we have y = 0
'''
for t, row in enumerate(DictReader(open(path))):
ID = row['id'] # process id
del row['id']
# process clicks
y = 0.
if 'click' in row:
if row['click'] == '1':
y = 1.
del row['click']
# extract date
date = int(row['hour'][4:6])
# turn hour really into hour, it was originally YYMMDDHH
row['hour'] = row['hour'][6:]
# build x Meisam and Ali update
#x = [0] * D
for key in row:
#print('key is')
#print(key)
value = row[key]# one-hot encode everything with hash trick
if key != "device_id" and key != "device_ip" :
if key in basicStat:
currentElement = basicStat[key]
if value in currentElement:
currentElement[value] = currentElement[value] + 1
else:
currentElement[value] = 1
# update the basic stat
basicStat[key] = currentElement
else:
currentElement = {}
currentElement[value] = 1
basicStat[key] = currentElement
#index = abs(hash(key + '_' + value)) % D
#print('index is:')
#print(index)
#x[index] = x[index] + 1 #x.append(index)
#print('xvector is:')
#print(sum(x))
yield t, date, ID, y,basicStat
##############################################################################
# Create a feature vector #############################################################
##############################################################################
start = datetime.now()
# start feature creation
#====================================================================================
numberOfObsTest = 1e5
counterTest = 0
yCounterTest = 0
basicStat = {}
for e in xrange(epoch):
loss = 0.
count = 0
for t, date, ID, y, basicStat in dataFeatureCreation(train, D,basicStat): # data is a generator
# t: just a instance counter
# date: you know what this is
# ID: id provided in original data
# x: features
# y: label (click)
# step 1, get prediction from learner
counterTest = counterTest + 1
if (float(counterTest)/5000==counterTest/5000):
print('create feature vector')
print(counterTest)
#p = learner.predict(x)
if (y == 1):
yCounterTest = yCounterTest + 1
# print('Epoch %d finished, validation logloss: %f, elapsed time: %s' % (
# e, loss/count, str(datetime.now() - start)))
trainingObsCounter = counterTest
##############################################################################
# start testing, and build Kaggle's submission file ##########################
##############################################################################
basicStatTest = {}
counterTest = 0
#with open(submission, 'w') as outfile:
# outfile.write('id,click\n')
for t, date, ID, y, basicStatTest in dataFeatureCreation(test, D,basicStatTest):
counterTest = counterTest + 1
if (float(counterTest)/5000==counterTest/5000):
print('test')
print(counterTest)
#p = learner.predict(x)
#outfile.write('%s,%s\n' % (ID, str(p)))
#if (counterTest>numberOfObsTest):
# break;
#save basic stat to file
#====================================================
# Vector creation
#====================================================
# start with zero D, and add up
D = 0
dictionaryFeature = {}
#save basic stat to file
#with open(trainBasicStat, 'w') as outfile:
# outfile.write('feature, instantiation, frequencey\n')
for i in basicStat:
if (i in basicStatTest):
Elements = basicStat[i]
ElementsTest = basicStatTest[i]
for j in Elements:
if (j in ElementsTest):
dictionaryFeature[i+j]=D
D = D + 1
#outfile.write('%s,%s,%s\n' %(i,j, Elements[j]))
# remove last useless element (removed for now to avoid error)
#D = D -1
#basicStat = {}
print('congratualtions the number of features is:')
print (D)
##############################################################################
# start training #############################################################
##############################################################################
start = datetime.now()
# initialize ourselves a learner
learner = ftrl_proximal(alpha, beta, L1, L2, D, interaction)
# start training
#====================================================================================
numberOfObsTest = 1e5
counterTest = 0
yCounterTest = 0
for e in xrange(epoch):
loss = 0.
count = 0
for t, date, ID, x, y in data(train, D): # data is a generator
# t: just a instance counter
# date: you know what this is
# ID: id provided in original data
# x: features
# y: label (click)
# step 1, get prediction from learner
counterTest = counterTest + 1
if (float(counterTest)/5000==counterTest/5000):
print('train')
print(counterTest)
p = learner.predict(x)
if (y == 1):
yCounterTest = yCounterTest + 1
#if (holdafter and date > holdafter) or (holdout and t % holdout == 0):
# step 2-1, calculate validation loss
# we do not train with the validation data so that our
# validation loss is an accurate estimation
#
# holdafter: train instances from day 1 to day N
# validate with instances from day N + 1 and after
#
# holdout: validate with every N instance, train with others
#loss += logloss(p, y)
#count += 1
#else:
# step 2-2, update learner with label (click) information
#learner.update(x, p, y)
learner.update(x, p, y)
#if (counterTest>numberOfObsTest):
# break;
# print('Epoch %d finished, validation logloss: %f, elapsed time: %s' % (
# e, loss/count, str(datetime.now() - start)))
trainingObsCounter = counterTest
##############################################################################
# start testing, and build Kaggle's submission file ##########################
##############################################################################
counterTest = 0
with open(submission, 'w') as outfile:
outfile.write('id,click\n')
for t, date, ID, x, y in data(test, D):
counterTest = counterTest + 1
if (float(counterTest)/5000==counterTest/5000):
print('test')
print(counterTest)
p = learner.predict(x)
outfile.write('%s,%s\n' % (ID, str(p)))
#if (counterTest>numberOfObsTest):
# break;
print('number of training observation is:\n')
print('===============================\n')
print(trainingObsCounter)
print('number of test observation is:\n')
print('===============================\n')
print(counterTest)
print('number of positive examples:\n')
print('===============================\n')
print(yCounterTest) | gpl-3.0 |
bzhou26/NRA-Crawler | requests/packages/chardet/gb2312freq.py | 3132 | 36011 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# GB2312 most frequently used character table
#
# Char to FreqOrder table , from hz6763
# 512 --> 0.79 -- 0.79
# 1024 --> 0.92 -- 0.13
# 2048 --> 0.98 -- 0.06
# 6768 --> 1.00 -- 0.02
#
# Ideal Distribution Ratio = 0.79135/(1-0.79135) = 3.79
# Random Distribution Ration = 512 / (3755 - 512) = 0.157
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher that RDR
GB2312_TYPICAL_DISTRIBUTION_RATIO = 0.9
GB2312_TABLE_SIZE = 3760
GB2312CharToFreqOrder = (
1671, 749,1443,2364,3924,3807,2330,3921,1704,3463,2691,1511,1515, 572,3191,2205,
2361, 224,2558, 479,1711, 963,3162, 440,4060,1905,2966,2947,3580,2647,3961,3842,
2204, 869,4207, 970,2678,5626,2944,2956,1479,4048, 514,3595, 588,1346,2820,3409,
249,4088,1746,1873,2047,1774, 581,1813, 358,1174,3590,1014,1561,4844,2245, 670,
1636,3112, 889,1286, 953, 556,2327,3060,1290,3141, 613, 185,3477,1367, 850,3820,
1715,2428,2642,2303,2732,3041,2562,2648,3566,3946,1349, 388,3098,2091,1360,3585,
152,1687,1539, 738,1559, 59,1232,2925,2267,1388,1249,1741,1679,2960, 151,1566,
1125,1352,4271, 924,4296, 385,3166,4459, 310,1245,2850, 70,3285,2729,3534,3575,
2398,3298,3466,1960,2265, 217,3647, 864,1909,2084,4401,2773,1010,3269,5152, 853,
3051,3121,1244,4251,1895, 364,1499,1540,2313,1180,3655,2268, 562, 715,2417,3061,
544, 336,3768,2380,1752,4075, 950, 280,2425,4382, 183,2759,3272, 333,4297,2155,
1688,2356,1444,1039,4540, 736,1177,3349,2443,2368,2144,2225, 565, 196,1482,3406,
927,1335,4147, 692, 878,1311,1653,3911,3622,1378,4200,1840,2969,3149,2126,1816,
2534,1546,2393,2760, 737,2494, 13, 447, 245,2747, 38,2765,2129,2589,1079, 606,
360, 471,3755,2890, 404, 848, 699,1785,1236, 370,2221,1023,3746,2074,2026,2023,
2388,1581,2119, 812,1141,3091,2536,1519, 804,2053, 406,1596,1090, 784, 548,4414,
1806,2264,2936,1100, 343,4114,5096, 622,3358, 743,3668,1510,1626,5020,3567,2513,
3195,4115,5627,2489,2991, 24,2065,2697,1087,2719, 48,1634, 315, 68, 985,2052,
198,2239,1347,1107,1439, 597,2366,2172, 871,3307, 919,2487,2790,1867, 236,2570,
1413,3794, 906,3365,3381,1701,1982,1818,1524,2924,1205, 616,2586,2072,2004, 575,
253,3099, 32,1365,1182, 197,1714,2454,1201, 554,3388,3224,2748, 756,2587, 250,
2567,1507,1517,3529,1922,2761,2337,3416,1961,1677,2452,2238,3153, 615, 911,1506,
1474,2495,1265,1906,2749,3756,3280,2161, 898,2714,1759,3450,2243,2444, 563, 26,
3286,2266,3769,3344,2707,3677, 611,1402, 531,1028,2871,4548,1375, 261,2948, 835,
1190,4134, 353, 840,2684,1900,3082,1435,2109,1207,1674, 329,1872,2781,4055,2686,
2104, 608,3318,2423,2957,2768,1108,3739,3512,3271,3985,2203,1771,3520,1418,2054,
1681,1153, 225,1627,2929, 162,2050,2511,3687,1954, 124,1859,2431,1684,3032,2894,
585,4805,3969,2869,2704,2088,2032,2095,3656,2635,4362,2209, 256, 518,2042,2105,
3777,3657, 643,2298,1148,1779, 190, 989,3544, 414, 11,2135,2063,2979,1471, 403,
3678, 126, 770,1563, 671,2499,3216,2877, 600,1179, 307,2805,4937,1268,1297,2694,
252,4032,1448,1494,1331,1394, 127,2256, 222,1647,1035,1481,3056,1915,1048, 873,
3651, 210, 33,1608,2516, 200,1520, 415, 102, 0,3389,1287, 817, 91,3299,2940,
836,1814, 549,2197,1396,1669,2987,3582,2297,2848,4528,1070, 687, 20,1819, 121,
1552,1364,1461,1968,2617,3540,2824,2083, 177, 948,4938,2291, 110,4549,2066, 648,
3359,1755,2110,2114,4642,4845,1693,3937,3308,1257,1869,2123, 208,1804,3159,2992,
2531,2549,3361,2418,1350,2347,2800,2568,1291,2036,2680, 72, 842,1990, 212,1233,
1154,1586, 75,2027,3410,4900,1823,1337,2710,2676, 728,2810,1522,3026,4995, 157,
755,1050,4022, 710, 785,1936,2194,2085,1406,2777,2400, 150,1250,4049,1206, 807,
1910, 534, 529,3309,1721,1660, 274, 39,2827, 661,2670,1578, 925,3248,3815,1094,
4278,4901,4252, 41,1150,3747,2572,2227,4501,3658,4902,3813,3357,3617,2884,2258,
887, 538,4187,3199,1294,2439,3042,2329,2343,2497,1255, 107, 543,1527, 521,3478,
3568, 194,5062, 15, 961,3870,1241,1192,2664, 66,5215,3260,2111,1295,1127,2152,
3805,4135, 901,1164,1976, 398,1278, 530,1460, 748, 904,1054,1966,1426, 53,2909,
509, 523,2279,1534, 536,1019, 239,1685, 460,2353, 673,1065,2401,3600,4298,2272,
1272,2363, 284,1753,3679,4064,1695, 81, 815,2677,2757,2731,1386, 859, 500,4221,
2190,2566, 757,1006,2519,2068,1166,1455, 337,2654,3203,1863,1682,1914,3025,1252,
1409,1366, 847, 714,2834,2038,3209, 964,2970,1901, 885,2553,1078,1756,3049, 301,
1572,3326, 688,2130,1996,2429,1805,1648,2930,3421,2750,3652,3088, 262,1158,1254,
389,1641,1812, 526,1719, 923,2073,1073,1902, 468, 489,4625,1140, 857,2375,3070,
3319,2863, 380, 116,1328,2693,1161,2244, 273,1212,1884,2769,3011,1775,1142, 461,
3066,1200,2147,2212, 790, 702,2695,4222,1601,1058, 434,2338,5153,3640, 67,2360,
4099,2502, 618,3472,1329, 416,1132, 830,2782,1807,2653,3211,3510,1662, 192,2124,
296,3979,1739,1611,3684, 23, 118, 324, 446,1239,1225, 293,2520,3814,3795,2535,
3116, 17,1074, 467,2692,2201, 387,2922, 45,1326,3055,1645,3659,2817, 958, 243,
1903,2320,1339,2825,1784,3289, 356, 576, 865,2315,2381,3377,3916,1088,3122,1713,
1655, 935, 628,4689,1034,1327, 441, 800, 720, 894,1979,2183,1528,5289,2702,1071,
4046,3572,2399,1571,3281, 79, 761,1103, 327, 134, 758,1899,1371,1615, 879, 442,
215,2605,2579, 173,2048,2485,1057,2975,3317,1097,2253,3801,4263,1403,1650,2946,
814,4968,3487,1548,2644,1567,1285, 2, 295,2636, 97, 946,3576, 832, 141,4257,
3273, 760,3821,3521,3156,2607, 949,1024,1733,1516,1803,1920,2125,2283,2665,3180,
1501,2064,3560,2171,1592, 803,3518,1416, 732,3897,4258,1363,1362,2458, 119,1427,
602,1525,2608,1605,1639,3175, 694,3064, 10, 465, 76,2000,4846,4208, 444,3781,
1619,3353,2206,1273,3796, 740,2483, 320,1723,2377,3660,2619,1359,1137,1762,1724,
2345,2842,1850,1862, 912, 821,1866, 612,2625,1735,2573,3369,1093, 844, 89, 937,
930,1424,3564,2413,2972,1004,3046,3019,2011, 711,3171,1452,4178, 428, 801,1943,
432, 445,2811, 206,4136,1472, 730, 349, 73, 397,2802,2547, 998,1637,1167, 789,
396,3217, 154,1218, 716,1120,1780,2819,4826,1931,3334,3762,2139,1215,2627, 552,
3664,3628,3232,1405,2383,3111,1356,2652,3577,3320,3101,1703, 640,1045,1370,1246,
4996, 371,1575,2436,1621,2210, 984,4033,1734,2638, 16,4529, 663,2755,3255,1451,
3917,2257,1253,1955,2234,1263,2951, 214,1229, 617, 485, 359,1831,1969, 473,2310,
750,2058, 165, 80,2864,2419, 361,4344,2416,2479,1134, 796,3726,1266,2943, 860,
2715, 938, 390,2734,1313,1384, 248, 202, 877,1064,2854, 522,3907, 279,1602, 297,
2357, 395,3740, 137,2075, 944,4089,2584,1267,3802, 62,1533,2285, 178, 176, 780,
2440, 201,3707, 590, 478,1560,4354,2117,1075, 30, 74,4643,4004,1635,1441,2745,
776,2596, 238,1077,1692,1912,2844, 605, 499,1742,3947, 241,3053, 980,1749, 936,
2640,4511,2582, 515,1543,2162,5322,2892,2993, 890,2148,1924, 665,1827,3581,1032,
968,3163, 339,1044,1896, 270, 583,1791,1720,4367,1194,3488,3669, 43,2523,1657,
163,2167, 290,1209,1622,3378, 550, 634,2508,2510, 695,2634,2384,2512,1476,1414,
220,1469,2341,2138,2852,3183,2900,4939,2865,3502,1211,3680, 854,3227,1299,2976,
3172, 186,2998,1459, 443,1067,3251,1495, 321,1932,3054, 909, 753,1410,1828, 436,
2441,1119,1587,3164,2186,1258, 227, 231,1425,1890,3200,3942, 247, 959, 725,5254,
2741, 577,2158,2079, 929, 120, 174, 838,2813, 591,1115, 417,2024, 40,3240,1536,
1037, 291,4151,2354, 632,1298,2406,2500,3535,1825,1846,3451, 205,1171, 345,4238,
18,1163, 811, 685,2208,1217, 425,1312,1508,1175,4308,2552,1033, 587,1381,3059,
2984,3482, 340,1316,4023,3972, 792,3176, 519, 777,4690, 918, 933,4130,2981,3741,
90,3360,2911,2200,5184,4550, 609,3079,2030, 272,3379,2736, 363,3881,1130,1447,
286, 779, 357,1169,3350,3137,1630,1220,2687,2391, 747,1277,3688,2618,2682,2601,
1156,3196,5290,4034,3102,1689,3596,3128, 874, 219,2783, 798, 508,1843,2461, 269,
1658,1776,1392,1913,2983,3287,2866,2159,2372, 829,4076, 46,4253,2873,1889,1894,
915,1834,1631,2181,2318, 298, 664,2818,3555,2735, 954,3228,3117, 527,3511,2173,
681,2712,3033,2247,2346,3467,1652, 155,2164,3382, 113,1994, 450, 899, 494, 994,
1237,2958,1875,2336,1926,3727, 545,1577,1550, 633,3473, 204,1305,3072,2410,1956,
2471, 707,2134, 841,2195,2196,2663,3843,1026,4940, 990,3252,4997, 368,1092, 437,
3212,3258,1933,1829, 675,2977,2893, 412, 943,3723,4644,3294,3283,2230,2373,5154,
2389,2241,2661,2323,1404,2524, 593, 787, 677,3008,1275,2059, 438,2709,2609,2240,
2269,2246,1446, 36,1568,1373,3892,1574,2301,1456,3962, 693,2276,5216,2035,1143,
2720,1919,1797,1811,2763,4137,2597,1830,1699,1488,1198,2090, 424,1694, 312,3634,
3390,4179,3335,2252,1214, 561,1059,3243,2295,2561, 975,5155,2321,2751,3772, 472,
1537,3282,3398,1047,2077,2348,2878,1323,3340,3076, 690,2906, 51, 369, 170,3541,
1060,2187,2688,3670,2541,1083,1683, 928,3918, 459, 109,4427, 599,3744,4286, 143,
2101,2730,2490, 82,1588,3036,2121, 281,1860, 477,4035,1238,2812,3020,2716,3312,
1530,2188,2055,1317, 843, 636,1808,1173,3495, 649, 181,1002, 147,3641,1159,2414,
3750,2289,2795, 813,3123,2610,1136,4368, 5,3391,4541,2174, 420, 429,1728, 754,
1228,2115,2219, 347,2223,2733, 735,1518,3003,2355,3134,1764,3948,3329,1888,2424,
1001,1234,1972,3321,3363,1672,1021,1450,1584, 226, 765, 655,2526,3404,3244,2302,
3665, 731, 594,2184, 319,1576, 621, 658,2656,4299,2099,3864,1279,2071,2598,2739,
795,3086,3699,3908,1707,2352,2402,1382,3136,2475,1465,4847,3496,3865,1085,3004,
2591,1084, 213,2287,1963,3565,2250, 822, 793,4574,3187,1772,1789,3050, 595,1484,
1959,2770,1080,2650, 456, 422,2996, 940,3322,4328,4345,3092,2742, 965,2784, 739,
4124, 952,1358,2498,2949,2565, 332,2698,2378, 660,2260,2473,4194,3856,2919, 535,
1260,2651,1208,1428,1300,1949,1303,2942, 433,2455,2450,1251,1946, 614,1269, 641,
1306,1810,2737,3078,2912, 564,2365,1419,1415,1497,4460,2367,2185,1379,3005,1307,
3218,2175,1897,3063, 682,1157,4040,4005,1712,1160,1941,1399, 394, 402,2952,1573,
1151,2986,2404, 862, 299,2033,1489,3006, 346, 171,2886,3401,1726,2932, 168,2533,
47,2507,1030,3735,1145,3370,1395,1318,1579,3609,4560,2857,4116,1457,2529,1965,
504,1036,2690,2988,2405, 745,5871, 849,2397,2056,3081, 863,2359,3857,2096, 99,
1397,1769,2300,4428,1643,3455,1978,1757,3718,1440, 35,4879,3742,1296,4228,2280,
160,5063,1599,2013, 166, 520,3479,1646,3345,3012, 490,1937,1545,1264,2182,2505,
1096,1188,1369,1436,2421,1667,2792,2460,1270,2122, 727,3167,2143, 806,1706,1012,
1800,3037, 960,2218,1882, 805, 139,2456,1139,1521, 851,1052,3093,3089, 342,2039,
744,5097,1468,1502,1585,2087, 223, 939, 326,2140,2577, 892,2481,1623,4077, 982,
3708, 135,2131, 87,2503,3114,2326,1106, 876,1616, 547,2997,2831,2093,3441,4530,
4314, 9,3256,4229,4148, 659,1462,1986,1710,2046,2913,2231,4090,4880,5255,3392,
3274,1368,3689,4645,1477, 705,3384,3635,1068,1529,2941,1458,3782,1509, 100,1656,
2548, 718,2339, 408,1590,2780,3548,1838,4117,3719,1345,3530, 717,3442,2778,3220,
2898,1892,4590,3614,3371,2043,1998,1224,3483, 891, 635, 584,2559,3355, 733,1766,
1729,1172,3789,1891,2307, 781,2982,2271,1957,1580,5773,2633,2005,4195,3097,1535,
3213,1189,1934,5693,3262, 586,3118,1324,1598, 517,1564,2217,1868,1893,4445,3728,
2703,3139,1526,1787,1992,3882,2875,1549,1199,1056,2224,1904,2711,5098,4287, 338,
1993,3129,3489,2689,1809,2815,1997, 957,1855,3898,2550,3275,3057,1105,1319, 627,
1505,1911,1883,3526, 698,3629,3456,1833,1431, 746, 77,1261,2017,2296,1977,1885,
125,1334,1600, 525,1798,1109,2222,1470,1945, 559,2236,1186,3443,2476,1929,1411,
2411,3135,1777,3372,2621,1841,1613,3229, 668,1430,1839,2643,2916, 195,1989,2671,
2358,1387, 629,3205,2293,5256,4439, 123,1310, 888,1879,4300,3021,3605,1003,1162,
3192,2910,2010, 140,2395,2859, 55,1082,2012,2901, 662, 419,2081,1438, 680,2774,
4654,3912,1620,1731,1625,5035,4065,2328, 512,1344, 802,5443,2163,2311,2537, 524,
3399, 98,1155,2103,1918,2606,3925,2816,1393,2465,1504,3773,2177,3963,1478,4346,
180,1113,4655,3461,2028,1698, 833,2696,1235,1322,1594,4408,3623,3013,3225,2040,
3022, 541,2881, 607,3632,2029,1665,1219, 639,1385,1686,1099,2803,3231,1938,3188,
2858, 427, 676,2772,1168,2025, 454,3253,2486,3556, 230,1950, 580, 791,1991,1280,
1086,1974,2034, 630, 257,3338,2788,4903,1017, 86,4790, 966,2789,1995,1696,1131,
259,3095,4188,1308, 179,1463,5257, 289,4107,1248, 42,3413,1725,2288, 896,1947,
774,4474,4254, 604,3430,4264, 392,2514,2588, 452, 237,1408,3018, 988,4531,1970,
3034,3310, 540,2370,1562,1288,2990, 502,4765,1147, 4,1853,2708, 207, 294,2814,
4078,2902,2509, 684, 34,3105,3532,2551, 644, 709,2801,2344, 573,1727,3573,3557,
2021,1081,3100,4315,2100,3681, 199,2263,1837,2385, 146,3484,1195,2776,3949, 997,
1939,3973,1008,1091,1202,1962,1847,1149,4209,5444,1076, 493, 117,5400,2521, 972,
1490,2934,1796,4542,2374,1512,2933,2657, 413,2888,1135,2762,2314,2156,1355,2369,
766,2007,2527,2170,3124,2491,2593,2632,4757,2437, 234,3125,3591,1898,1750,1376,
1942,3468,3138, 570,2127,2145,3276,4131, 962, 132,1445,4196, 19, 941,3624,3480,
3366,1973,1374,4461,3431,2629, 283,2415,2275, 808,2887,3620,2112,2563,1353,3610,
955,1089,3103,1053, 96, 88,4097, 823,3808,1583, 399, 292,4091,3313, 421,1128,
642,4006, 903,2539,1877,2082, 596, 29,4066,1790, 722,2157, 130, 995,1569, 769,
1485, 464, 513,2213, 288,1923,1101,2453,4316, 133, 486,2445, 50, 625, 487,2207,
57, 423, 481,2962, 159,3729,1558, 491, 303, 482, 501, 240,2837, 112,3648,2392,
1783, 362, 8,3433,3422, 610,2793,3277,1390,1284,1654, 21,3823, 734, 367, 623,
193, 287, 374,1009,1483, 816, 476, 313,2255,2340,1262,2150,2899,1146,2581, 782,
2116,1659,2018,1880, 255,3586,3314,1110,2867,2137,2564, 986,2767,5185,2006, 650,
158, 926, 762, 881,3157,2717,2362,3587, 306,3690,3245,1542,3077,2427,1691,2478,
2118,2985,3490,2438, 539,2305, 983, 129,1754, 355,4201,2386, 827,2923, 104,1773,
2838,2771, 411,2905,3919, 376, 767, 122,1114, 828,2422,1817,3506, 266,3460,1007,
1609,4998, 945,2612,4429,2274, 726,1247,1964,2914,2199,2070,4002,4108, 657,3323,
1422, 579, 455,2764,4737,1222,2895,1670, 824,1223,1487,2525, 558, 861,3080, 598,
2659,2515,1967, 752,2583,2376,2214,4180, 977, 704,2464,4999,2622,4109,1210,2961,
819,1541, 142,2284, 44, 418, 457,1126,3730,4347,4626,1644,1876,3671,1864, 302,
1063,5694, 624, 723,1984,3745,1314,1676,2488,1610,1449,3558,3569,2166,2098, 409,
1011,2325,3704,2306, 818,1732,1383,1824,1844,3757, 999,2705,3497,1216,1423,2683,
2426,2954,2501,2726,2229,1475,2554,5064,1971,1794,1666,2014,1343, 783, 724, 191,
2434,1354,2220,5065,1763,2752,2472,4152, 131, 175,2885,3434, 92,1466,4920,2616,
3871,3872,3866, 128,1551,1632, 669,1854,3682,4691,4125,1230, 188,2973,3290,1302,
1213, 560,3266, 917, 763,3909,3249,1760, 868,1958, 764,1782,2097, 145,2277,3774,
4462, 64,1491,3062, 971,2132,3606,2442, 221,1226,1617, 218, 323,1185,3207,3147,
571, 619,1473,1005,1744,2281, 449,1887,2396,3685, 275, 375,3816,1743,3844,3731,
845,1983,2350,4210,1377, 773, 967,3499,3052,3743,2725,4007,1697,1022,3943,1464,
3264,2855,2722,1952,1029,2839,2467, 84,4383,2215, 820,1391,2015,2448,3672, 377,
1948,2168, 797,2545,3536,2578,2645, 94,2874,1678, 405,1259,3071, 771, 546,1315,
470,1243,3083, 895,2468, 981, 969,2037, 846,4181, 653,1276,2928, 14,2594, 557,
3007,2474, 156, 902,1338,1740,2574, 537,2518, 973,2282,2216,2433,1928, 138,2903,
1293,2631,1612, 646,3457, 839,2935, 111, 496,2191,2847, 589,3186, 149,3994,2060,
4031,2641,4067,3145,1870, 37,3597,2136,1025,2051,3009,3383,3549,1121,1016,3261,
1301, 251,2446,2599,2153, 872,3246, 637, 334,3705, 831, 884, 921,3065,3140,4092,
2198,1944, 246,2964, 108,2045,1152,1921,2308,1031, 203,3173,4170,1907,3890, 810,
1401,2003,1690, 506, 647,1242,2828,1761,1649,3208,2249,1589,3709,2931,5156,1708,
498, 666,2613, 834,3817,1231, 184,2851,1124, 883,3197,2261,3710,1765,1553,2658,
1178,2639,2351, 93,1193, 942,2538,2141,4402, 235,1821, 870,1591,2192,1709,1871,
3341,1618,4126,2595,2334, 603, 651, 69, 701, 268,2662,3411,2555,1380,1606, 503,
448, 254,2371,2646, 574,1187,2309,1770, 322,2235,1292,1801, 305, 566,1133, 229,
2067,2057, 706, 167, 483,2002,2672,3295,1820,3561,3067, 316, 378,2746,3452,1112,
136,1981, 507,1651,2917,1117, 285,4591, 182,2580,3522,1304, 335,3303,1835,2504,
1795,1792,2248, 674,1018,2106,2449,1857,2292,2845, 976,3047,1781,2600,2727,1389,
1281, 52,3152, 153, 265,3950, 672,3485,3951,4463, 430,1183, 365, 278,2169, 27,
1407,1336,2304, 209,1340,1730,2202,1852,2403,2883, 979,1737,1062, 631,2829,2542,
3876,2592, 825,2086,2226,3048,3625, 352,1417,3724, 542, 991, 431,1351,3938,1861,
2294, 826,1361,2927,3142,3503,1738, 463,2462,2723, 582,1916,1595,2808, 400,3845,
3891,2868,3621,2254, 58,2492,1123, 910,2160,2614,1372,1603,1196,1072,3385,1700,
3267,1980, 696, 480,2430, 920, 799,1570,2920,1951,2041,4047,2540,1321,4223,2469,
3562,2228,1271,2602, 401,2833,3351,2575,5157, 907,2312,1256, 410, 263,3507,1582,
996, 678,1849,2316,1480, 908,3545,2237, 703,2322, 667,1826,2849,1531,2604,2999,
2407,3146,2151,2630,1786,3711, 469,3542, 497,3899,2409, 858, 837,4446,3393,1274,
786, 620,1845,2001,3311, 484, 308,3367,1204,1815,3691,2332,1532,2557,1842,2020,
2724,1927,2333,4440, 567, 22,1673,2728,4475,1987,1858,1144,1597, 101,1832,3601,
12, 974,3783,4391, 951,1412, 1,3720, 453,4608,4041, 528,1041,1027,3230,2628,
1129, 875,1051,3291,1203,2262,1069,2860,2799,2149,2615,3278, 144,1758,3040, 31,
475,1680, 366,2685,3184, 311,1642,4008,2466,5036,1593,1493,2809, 216,1420,1668,
233, 304,2128,3284, 232,1429,1768,1040,2008,3407,2740,2967,2543, 242,2133, 778,
1565,2022,2620, 505,2189,2756,1098,2273, 372,1614, 708, 553,2846,2094,2278, 169,
3626,2835,4161, 228,2674,3165, 809,1454,1309, 466,1705,1095, 900,3423, 880,2667,
3751,5258,2317,3109,2571,4317,2766,1503,1342, 866,4447,1118, 63,2076, 314,1881,
1348,1061, 172, 978,3515,1747, 532, 511,3970, 6, 601, 905,2699,3300,1751, 276,
1467,3725,2668, 65,4239,2544,2779,2556,1604, 578,2451,1802, 992,2331,2624,1320,
3446, 713,1513,1013, 103,2786,2447,1661, 886,1702, 916, 654,3574,2031,1556, 751,
2178,2821,2179,1498,1538,2176, 271, 914,2251,2080,1325, 638,1953,2937,3877,2432,
2754, 95,3265,1716, 260,1227,4083, 775, 106,1357,3254, 426,1607, 555,2480, 772,
1985, 244,2546, 474, 495,1046,2611,1851,2061, 71,2089,1675,2590, 742,3758,2843,
3222,1433, 267,2180,2576,2826,2233,2092,3913,2435, 956,1745,3075, 856,2113,1116,
451, 3,1988,2896,1398, 993,2463,1878,2049,1341,2718,2721,2870,2108, 712,2904,
4363,2753,2324, 277,2872,2349,2649, 384, 987, 435, 691,3000, 922, 164,3939, 652,
1500,1184,4153,2482,3373,2165,4848,2335,3775,3508,3154,2806,2830,1554,2102,1664,
2530,1434,2408, 893,1547,2623,3447,2832,2242,2532,3169,2856,3223,2078, 49,3770,
3469, 462, 318, 656,2259,3250,3069, 679,1629,2758, 344,1138,1104,3120,1836,1283,
3115,2154,1437,4448, 934, 759,1999, 794,2862,1038, 533,2560,1722,2342, 855,2626,
1197,1663,4476,3127, 85,4240,2528, 25,1111,1181,3673, 407,3470,4561,2679,2713,
768,1925,2841,3986,1544,1165, 932, 373,1240,2146,1930,2673, 721,4766, 354,4333,
391,2963, 187, 61,3364,1442,1102, 330,1940,1767, 341,3809,4118, 393,2496,2062,
2211, 105, 331, 300, 439, 913,1332, 626, 379,3304,1557, 328, 689,3952, 309,1555,
931, 317,2517,3027, 325, 569, 686,2107,3084, 60,1042,1333,2794, 264,3177,4014,
1628, 258,3712, 7,4464,1176,1043,1778, 683, 114,1975, 78,1492, 383,1886, 510,
386, 645,5291,2891,2069,3305,4138,3867,2939,2603,2493,1935,1066,1848,3588,1015,
1282,1289,4609, 697,1453,3044,2666,3611,1856,2412, 54, 719,1330, 568,3778,2459,
1748, 788, 492, 551,1191,1000, 488,3394,3763, 282,1799, 348,2016,1523,3155,2390,
1049, 382,2019,1788,1170, 729,2968,3523, 897,3926,2785,2938,3292, 350,2319,3238,
1718,1717,2655,3453,3143,4465, 161,2889,2980,2009,1421, 56,1908,1640,2387,2232,
1917,1874,2477,4921, 148, 83,3438, 592,4245,2882,1822,1055, 741, 115,1496,1624,
381,1638,4592,1020, 516,3214, 458, 947,4575,1432, 211,1514,2926,1865,2142, 189,
852,1221,1400,1486, 882,2299,4036, 351, 28,1122, 700,6479,6480,6481,6482,6483, # last 512
#Everything below is of no interest for detection purpose
5508,6484,3900,3414,3974,4441,4024,3537,4037,5628,5099,3633,6485,3148,6486,3636,
5509,3257,5510,5973,5445,5872,4941,4403,3174,4627,5873,6276,2286,4230,5446,5874,
5122,6102,6103,4162,5447,5123,5323,4849,6277,3980,3851,5066,4246,5774,5067,6278,
3001,2807,5695,3346,5775,5974,5158,5448,6487,5975,5976,5776,3598,6279,5696,4806,
4211,4154,6280,6488,6489,6490,6281,4212,5037,3374,4171,6491,4562,4807,4722,4827,
5977,6104,4532,4079,5159,5324,5160,4404,3858,5359,5875,3975,4288,4610,3486,4512,
5325,3893,5360,6282,6283,5560,2522,4231,5978,5186,5449,2569,3878,6284,5401,3578,
4415,6285,4656,5124,5979,2506,4247,4449,3219,3417,4334,4969,4329,6492,4576,4828,
4172,4416,4829,5402,6286,3927,3852,5361,4369,4830,4477,4867,5876,4173,6493,6105,
4657,6287,6106,5877,5450,6494,4155,4868,5451,3700,5629,4384,6288,6289,5878,3189,
4881,6107,6290,6495,4513,6496,4692,4515,4723,5100,3356,6497,6291,3810,4080,5561,
3570,4430,5980,6498,4355,5697,6499,4724,6108,6109,3764,4050,5038,5879,4093,3226,
6292,5068,5217,4693,3342,5630,3504,4831,4377,4466,4309,5698,4431,5777,6293,5778,
4272,3706,6110,5326,3752,4676,5327,4273,5403,4767,5631,6500,5699,5880,3475,5039,
6294,5562,5125,4348,4301,4482,4068,5126,4593,5700,3380,3462,5981,5563,3824,5404,
4970,5511,3825,4738,6295,6501,5452,4516,6111,5881,5564,6502,6296,5982,6503,4213,
4163,3454,6504,6112,4009,4450,6113,4658,6297,6114,3035,6505,6115,3995,4904,4739,
4563,4942,4110,5040,3661,3928,5362,3674,6506,5292,3612,4791,5565,4149,5983,5328,
5259,5021,4725,4577,4564,4517,4364,6298,5405,4578,5260,4594,4156,4157,5453,3592,
3491,6507,5127,5512,4709,4922,5984,5701,4726,4289,6508,4015,6116,5128,4628,3424,
4241,5779,6299,4905,6509,6510,5454,5702,5780,6300,4365,4923,3971,6511,5161,3270,
3158,5985,4100, 867,5129,5703,6117,5363,3695,3301,5513,4467,6118,6512,5455,4232,
4242,4629,6513,3959,4478,6514,5514,5329,5986,4850,5162,5566,3846,4694,6119,5456,
4869,5781,3779,6301,5704,5987,5515,4710,6302,5882,6120,4392,5364,5705,6515,6121,
6516,6517,3736,5988,5457,5989,4695,2457,5883,4551,5782,6303,6304,6305,5130,4971,
6122,5163,6123,4870,3263,5365,3150,4871,6518,6306,5783,5069,5706,3513,3498,4409,
5330,5632,5366,5458,5459,3991,5990,4502,3324,5991,5784,3696,4518,5633,4119,6519,
4630,5634,4417,5707,4832,5992,3418,6124,5993,5567,4768,5218,6520,4595,3458,5367,
6125,5635,6126,4202,6521,4740,4924,6307,3981,4069,4385,6308,3883,2675,4051,3834,
4302,4483,5568,5994,4972,4101,5368,6309,5164,5884,3922,6127,6522,6523,5261,5460,
5187,4164,5219,3538,5516,4111,3524,5995,6310,6311,5369,3181,3386,2484,5188,3464,
5569,3627,5708,6524,5406,5165,4677,4492,6312,4872,4851,5885,4468,5996,6313,5709,
5710,6128,2470,5886,6314,5293,4882,5785,3325,5461,5101,6129,5711,5786,6525,4906,
6526,6527,4418,5887,5712,4808,2907,3701,5713,5888,6528,3765,5636,5331,6529,6530,
3593,5889,3637,4943,3692,5714,5787,4925,6315,6130,5462,4405,6131,6132,6316,5262,
6531,6532,5715,3859,5716,5070,4696,5102,3929,5788,3987,4792,5997,6533,6534,3920,
4809,5000,5998,6535,2974,5370,6317,5189,5263,5717,3826,6536,3953,5001,4883,3190,
5463,5890,4973,5999,4741,6133,6134,3607,5570,6000,4711,3362,3630,4552,5041,6318,
6001,2950,2953,5637,4646,5371,4944,6002,2044,4120,3429,6319,6537,5103,4833,6538,
6539,4884,4647,3884,6003,6004,4758,3835,5220,5789,4565,5407,6540,6135,5294,4697,
4852,6320,6321,3206,4907,6541,6322,4945,6542,6136,6543,6323,6005,4631,3519,6544,
5891,6545,5464,3784,5221,6546,5571,4659,6547,6324,6137,5190,6548,3853,6549,4016,
4834,3954,6138,5332,3827,4017,3210,3546,4469,5408,5718,3505,4648,5790,5131,5638,
5791,5465,4727,4318,6325,6326,5792,4553,4010,4698,3439,4974,3638,4335,3085,6006,
5104,5042,5166,5892,5572,6327,4356,4519,5222,5573,5333,5793,5043,6550,5639,5071,
4503,6328,6139,6551,6140,3914,3901,5372,6007,5640,4728,4793,3976,3836,4885,6552,
4127,6553,4451,4102,5002,6554,3686,5105,6555,5191,5072,5295,4611,5794,5296,6556,
5893,5264,5894,4975,5466,5265,4699,4976,4370,4056,3492,5044,4886,6557,5795,4432,
4769,4357,5467,3940,4660,4290,6141,4484,4770,4661,3992,6329,4025,4662,5022,4632,
4835,4070,5297,4663,4596,5574,5132,5409,5895,6142,4504,5192,4664,5796,5896,3885,
5575,5797,5023,4810,5798,3732,5223,4712,5298,4084,5334,5468,6143,4052,4053,4336,
4977,4794,6558,5335,4908,5576,5224,4233,5024,4128,5469,5225,4873,6008,5045,4729,
4742,4633,3675,4597,6559,5897,5133,5577,5003,5641,5719,6330,6560,3017,2382,3854,
4406,4811,6331,4393,3964,4946,6561,2420,3722,6562,4926,4378,3247,1736,4442,6332,
5134,6333,5226,3996,2918,5470,4319,4003,4598,4743,4744,4485,3785,3902,5167,5004,
5373,4394,5898,6144,4874,1793,3997,6334,4085,4214,5106,5642,4909,5799,6009,4419,
4189,3330,5899,4165,4420,5299,5720,5227,3347,6145,4081,6335,2876,3930,6146,3293,
3786,3910,3998,5900,5300,5578,2840,6563,5901,5579,6147,3531,5374,6564,6565,5580,
4759,5375,6566,6148,3559,5643,6336,6010,5517,6337,6338,5721,5902,3873,6011,6339,
6567,5518,3868,3649,5722,6568,4771,4947,6569,6149,4812,6570,2853,5471,6340,6341,
5644,4795,6342,6012,5723,6343,5724,6013,4349,6344,3160,6150,5193,4599,4514,4493,
5168,4320,6345,4927,3666,4745,5169,5903,5005,4928,6346,5725,6014,4730,4203,5046,
4948,3395,5170,6015,4150,6016,5726,5519,6347,5047,3550,6151,6348,4197,4310,5904,
6571,5581,2965,6152,4978,3960,4291,5135,6572,5301,5727,4129,4026,5905,4853,5728,
5472,6153,6349,4533,2700,4505,5336,4678,3583,5073,2994,4486,3043,4554,5520,6350,
6017,5800,4487,6351,3931,4103,5376,6352,4011,4321,4311,4190,5136,6018,3988,3233,
4350,5906,5645,4198,6573,5107,3432,4191,3435,5582,6574,4139,5410,6353,5411,3944,
5583,5074,3198,6575,6354,4358,6576,5302,4600,5584,5194,5412,6577,6578,5585,5413,
5303,4248,5414,3879,4433,6579,4479,5025,4854,5415,6355,4760,4772,3683,2978,4700,
3797,4452,3965,3932,3721,4910,5801,6580,5195,3551,5907,3221,3471,3029,6019,3999,
5908,5909,5266,5267,3444,3023,3828,3170,4796,5646,4979,4259,6356,5647,5337,3694,
6357,5648,5338,4520,4322,5802,3031,3759,4071,6020,5586,4836,4386,5048,6581,3571,
4679,4174,4949,6154,4813,3787,3402,3822,3958,3215,3552,5268,4387,3933,4950,4359,
6021,5910,5075,3579,6358,4234,4566,5521,6359,3613,5049,6022,5911,3375,3702,3178,
4911,5339,4521,6582,6583,4395,3087,3811,5377,6023,6360,6155,4027,5171,5649,4421,
4249,2804,6584,2270,6585,4000,4235,3045,6156,5137,5729,4140,4312,3886,6361,4330,
6157,4215,6158,3500,3676,4929,4331,3713,4930,5912,4265,3776,3368,5587,4470,4855,
3038,4980,3631,6159,6160,4132,4680,6161,6362,3923,4379,5588,4255,6586,4121,6587,
6363,4649,6364,3288,4773,4774,6162,6024,6365,3543,6588,4274,3107,3737,5050,5803,
4797,4522,5589,5051,5730,3714,4887,5378,4001,4523,6163,5026,5522,4701,4175,2791,
3760,6589,5473,4224,4133,3847,4814,4815,4775,3259,5416,6590,2738,6164,6025,5304,
3733,5076,5650,4816,5590,6591,6165,6592,3934,5269,6593,3396,5340,6594,5804,3445,
3602,4042,4488,5731,5732,3525,5591,4601,5196,6166,6026,5172,3642,4612,3202,4506,
4798,6366,3818,5108,4303,5138,5139,4776,3332,4304,2915,3415,4434,5077,5109,4856,
2879,5305,4817,6595,5913,3104,3144,3903,4634,5341,3133,5110,5651,5805,6167,4057,
5592,2945,4371,5593,6596,3474,4182,6367,6597,6168,4507,4279,6598,2822,6599,4777,
4713,5594,3829,6169,3887,5417,6170,3653,5474,6368,4216,2971,5228,3790,4579,6369,
5733,6600,6601,4951,4746,4555,6602,5418,5475,6027,3400,4665,5806,6171,4799,6028,
5052,6172,3343,4800,4747,5006,6370,4556,4217,5476,4396,5229,5379,5477,3839,5914,
5652,5807,4714,3068,4635,5808,6173,5342,4192,5078,5419,5523,5734,6174,4557,6175,
4602,6371,6176,6603,5809,6372,5735,4260,3869,5111,5230,6029,5112,6177,3126,4681,
5524,5915,2706,3563,4748,3130,6178,4018,5525,6604,6605,5478,4012,4837,6606,4534,
4193,5810,4857,3615,5479,6030,4082,3697,3539,4086,5270,3662,4508,4931,5916,4912,
5811,5027,3888,6607,4397,3527,3302,3798,2775,2921,2637,3966,4122,4388,4028,4054,
1633,4858,5079,3024,5007,3982,3412,5736,6608,3426,3236,5595,3030,6179,3427,3336,
3279,3110,6373,3874,3039,5080,5917,5140,4489,3119,6374,5812,3405,4494,6031,4666,
4141,6180,4166,6032,5813,4981,6609,5081,4422,4982,4112,3915,5653,3296,3983,6375,
4266,4410,5654,6610,6181,3436,5082,6611,5380,6033,3819,5596,4535,5231,5306,5113,
6612,4952,5918,4275,3113,6613,6376,6182,6183,5814,3073,4731,4838,5008,3831,6614,
4888,3090,3848,4280,5526,5232,3014,5655,5009,5737,5420,5527,6615,5815,5343,5173,
5381,4818,6616,3151,4953,6617,5738,2796,3204,4360,2989,4281,5739,5174,5421,5197,
3132,5141,3849,5142,5528,5083,3799,3904,4839,5480,2880,4495,3448,6377,6184,5271,
5919,3771,3193,6034,6035,5920,5010,6036,5597,6037,6378,6038,3106,5422,6618,5423,
5424,4142,6619,4889,5084,4890,4313,5740,6620,3437,5175,5307,5816,4199,5198,5529,
5817,5199,5656,4913,5028,5344,3850,6185,2955,5272,5011,5818,4567,4580,5029,5921,
3616,5233,6621,6622,6186,4176,6039,6379,6380,3352,5200,5273,2908,5598,5234,3837,
5308,6623,6624,5819,4496,4323,5309,5201,6625,6626,4983,3194,3838,4167,5530,5922,
5274,6381,6382,3860,3861,5599,3333,4292,4509,6383,3553,5481,5820,5531,4778,6187,
3955,3956,4324,4389,4218,3945,4325,3397,2681,5923,4779,5085,4019,5482,4891,5382,
5383,6040,4682,3425,5275,4094,6627,5310,3015,5483,5657,4398,5924,3168,4819,6628,
5925,6629,5532,4932,4613,6041,6630,4636,6384,4780,4204,5658,4423,5821,3989,4683,
5822,6385,4954,6631,5345,6188,5425,5012,5384,3894,6386,4490,4104,6632,5741,5053,
6633,5823,5926,5659,5660,5927,6634,5235,5742,5824,4840,4933,4820,6387,4859,5928,
4955,6388,4143,3584,5825,5346,5013,6635,5661,6389,5014,5484,5743,4337,5176,5662,
6390,2836,6391,3268,6392,6636,6042,5236,6637,4158,6638,5744,5663,4471,5347,3663,
4123,5143,4293,3895,6639,6640,5311,5929,5826,3800,6189,6393,6190,5664,5348,3554,
3594,4749,4603,6641,5385,4801,6043,5827,4183,6642,5312,5426,4761,6394,5665,6191,
4715,2669,6643,6644,5533,3185,5427,5086,5930,5931,5386,6192,6044,6645,4781,4013,
5745,4282,4435,5534,4390,4267,6045,5746,4984,6046,2743,6193,3501,4087,5485,5932,
5428,4184,4095,5747,4061,5054,3058,3862,5933,5600,6646,5144,3618,6395,3131,5055,
5313,6396,4650,4956,3855,6194,3896,5202,4985,4029,4225,6195,6647,5828,5486,5829,
3589,3002,6648,6397,4782,5276,6649,6196,6650,4105,3803,4043,5237,5830,6398,4096,
3643,6399,3528,6651,4453,3315,4637,6652,3984,6197,5535,3182,3339,6653,3096,2660,
6400,6654,3449,5934,4250,4236,6047,6401,5831,6655,5487,3753,4062,5832,6198,6199,
6656,3766,6657,3403,4667,6048,6658,4338,2897,5833,3880,2797,3780,4326,6659,5748,
5015,6660,5387,4351,5601,4411,6661,3654,4424,5935,4339,4072,5277,4568,5536,6402,
6662,5238,6663,5349,5203,6200,5204,6201,5145,4536,5016,5056,4762,5834,4399,4957,
6202,6403,5666,5749,6664,4340,6665,5936,5177,5667,6666,6667,3459,4668,6404,6668,
6669,4543,6203,6670,4276,6405,4480,5537,6671,4614,5205,5668,6672,3348,2193,4763,
6406,6204,5937,5602,4177,5669,3419,6673,4020,6205,4443,4569,5388,3715,3639,6407,
6049,4058,6206,6674,5938,4544,6050,4185,4294,4841,4651,4615,5488,6207,6408,6051,
5178,3241,3509,5835,6208,4958,5836,4341,5489,5278,6209,2823,5538,5350,5206,5429,
6675,4638,4875,4073,3516,4684,4914,4860,5939,5603,5389,6052,5057,3237,5490,3791,
6676,6409,6677,4821,4915,4106,5351,5058,4243,5539,4244,5604,4842,4916,5239,3028,
3716,5837,5114,5605,5390,5940,5430,6210,4332,6678,5540,4732,3667,3840,6053,4305,
3408,5670,5541,6410,2744,5240,5750,6679,3234,5606,6680,5607,5671,3608,4283,4159,
4400,5352,4783,6681,6411,6682,4491,4802,6211,6412,5941,6413,6414,5542,5751,6683,
4669,3734,5942,6684,6415,5943,5059,3328,4670,4144,4268,6685,6686,6687,6688,4372,
3603,6689,5944,5491,4373,3440,6416,5543,4784,4822,5608,3792,4616,5838,5672,3514,
5391,6417,4892,6690,4639,6691,6054,5673,5839,6055,6692,6056,5392,6212,4038,5544,
5674,4497,6057,6693,5840,4284,5675,4021,4545,5609,6418,4454,6419,6213,4113,4472,
5314,3738,5087,5279,4074,5610,4959,4063,3179,4750,6058,6420,6214,3476,4498,4716,
5431,4960,4685,6215,5241,6694,6421,6216,6695,5841,5945,6422,3748,5946,5179,3905,
5752,5545,5947,4374,6217,4455,6423,4412,6218,4803,5353,6696,3832,5280,6219,4327,
4702,6220,6221,6059,4652,5432,6424,3749,4751,6425,5753,4986,5393,4917,5948,5030,
5754,4861,4733,6426,4703,6697,6222,4671,5949,4546,4961,5180,6223,5031,3316,5281,
6698,4862,4295,4934,5207,3644,6427,5842,5950,6428,6429,4570,5843,5282,6430,6224,
5088,3239,6060,6699,5844,5755,6061,6431,2701,5546,6432,5115,5676,4039,3993,3327,
4752,4425,5315,6433,3941,6434,5677,4617,4604,3074,4581,6225,5433,6435,6226,6062,
4823,5756,5116,6227,3717,5678,4717,5845,6436,5679,5846,6063,5847,6064,3977,3354,
6437,3863,5117,6228,5547,5394,4499,4524,6229,4605,6230,4306,4500,6700,5951,6065,
3693,5952,5089,4366,4918,6701,6231,5548,6232,6702,6438,4704,5434,6703,6704,5953,
4168,6705,5680,3420,6706,5242,4407,6066,3812,5757,5090,5954,4672,4525,3481,5681,
4618,5395,5354,5316,5955,6439,4962,6707,4526,6440,3465,4673,6067,6441,5682,6708,
5435,5492,5758,5683,4619,4571,4674,4804,4893,4686,5493,4753,6233,6068,4269,6442,
6234,5032,4705,5146,5243,5208,5848,6235,6443,4963,5033,4640,4226,6236,5849,3387,
6444,6445,4436,4437,5850,4843,5494,4785,4894,6709,4361,6710,5091,5956,3331,6237,
4987,5549,6069,6711,4342,3517,4473,5317,6070,6712,6071,4706,6446,5017,5355,6713,
6714,4988,5436,6447,4734,5759,6715,4735,4547,4456,4754,6448,5851,6449,6450,3547,
5852,5318,6451,6452,5092,4205,6716,6238,4620,4219,5611,6239,6072,4481,5760,5957,
5958,4059,6240,6453,4227,4537,6241,5761,4030,4186,5244,5209,3761,4457,4876,3337,
5495,5181,6242,5959,5319,5612,5684,5853,3493,5854,6073,4169,5613,5147,4895,6074,
5210,6717,5182,6718,3830,6243,2798,3841,6075,6244,5855,5614,3604,4606,5496,5685,
5118,5356,6719,6454,5960,5357,5961,6720,4145,3935,4621,5119,5962,4261,6721,6455,
4786,5963,4375,4582,6245,6246,6247,6076,5437,4877,5856,3376,4380,6248,4160,6722,
5148,6456,5211,6457,6723,4718,6458,6724,6249,5358,4044,3297,6459,6250,5857,5615,
5497,5245,6460,5498,6725,6251,6252,5550,3793,5499,2959,5396,6461,6462,4572,5093,
5500,5964,3806,4146,6463,4426,5762,5858,6077,6253,4755,3967,4220,5965,6254,4989,
5501,6464,4352,6726,6078,4764,2290,5246,3906,5438,5283,3767,4964,2861,5763,5094,
6255,6256,4622,5616,5859,5860,4707,6727,4285,4708,4824,5617,6257,5551,4787,5212,
4965,4935,4687,6465,6728,6466,5686,6079,3494,4413,2995,5247,5966,5618,6729,5967,
5764,5765,5687,5502,6730,6731,6080,5397,6467,4990,6258,6732,4538,5060,5619,6733,
4719,5688,5439,5018,5149,5284,5503,6734,6081,4607,6259,5120,3645,5861,4583,6260,
4584,4675,5620,4098,5440,6261,4863,2379,3306,4585,5552,5689,4586,5285,6735,4864,
6736,5286,6082,6737,4623,3010,4788,4381,4558,5621,4587,4896,3698,3161,5248,4353,
4045,6262,3754,5183,4588,6738,6263,6739,6740,5622,3936,6741,6468,6742,6264,5095,
6469,4991,5968,6743,4992,6744,6083,4897,6745,4256,5766,4307,3108,3968,4444,5287,
3889,4343,6084,4510,6085,4559,6086,4898,5969,6746,5623,5061,4919,5249,5250,5504,
5441,6265,5320,4878,3242,5862,5251,3428,6087,6747,4237,5624,5442,6266,5553,4539,
6748,2585,3533,5398,4262,6088,5150,4736,4438,6089,6267,5505,4966,6749,6268,6750,
6269,5288,5554,3650,6090,6091,4624,6092,5690,6751,5863,4270,5691,4277,5555,5864,
6752,5692,4720,4865,6470,5151,4688,4825,6753,3094,6754,6471,3235,4653,6755,5213,
5399,6756,3201,4589,5865,4967,6472,5866,6473,5019,3016,6757,5321,4756,3957,4573,
6093,4993,5767,4721,6474,6758,5625,6759,4458,6475,6270,6760,5556,4994,5214,5252,
6271,3875,5768,6094,5034,5506,4376,5769,6761,2120,6476,5253,5770,6762,5771,5970,
3990,5971,5557,5558,5772,6477,6095,2787,4641,5972,5121,6096,6097,6272,6763,3703,
5867,5507,6273,4206,6274,4789,6098,6764,3619,3646,3833,3804,2394,3788,4936,3978,
4866,4899,6099,6100,5559,6478,6765,3599,5868,6101,5869,5870,6275,6766,4527,6767)
# flake8: noqa
| mit |
kata198/usrsvc | usrsvcmod/MainConfig.py | 1 | 3041 | '''
Copyright (c) 2016 Tim Savannah All Rights Reserved.
This software is licensed under the terms of the GPLv3.
This may change at my discretion, retroactively, and without notice.
You should have received a copy of this with the source distribution as a file titled, LICENSE.
The most current license can be found at:
https://github.com/kata198/usrsvc/LICENSE
This location may need to be changed at some point in the future, in which case
you are may email Tim Savannah <kata198 at gmail dot com>, or find them on the
current website intended for distribution of usrsvc.
'MainConfig' is the main configuration file
'''
# vim:set ts=4 shiftwidth=4 softtabstop=4 expandtab :
import os
from .util import findProgramPath
__all__ = ('MainConfig', )
class MainConfig(object):
'''
MainConfig - The main config object. Will read all child configs, and provides access to all configs through this object.
This object provides a refreshable means to update runtime configuration.
The main op iterations should fetch the relevant sections, and on next loop fetch from new.
'''
def __init__(self, config_dir=None, pidfile=None, usrsvcd_stdout=None, usrsvcd_stderr=None, sendmail_path='auto', **kwargs):
if kwargs:
raise ValueError('Unknown config options in Main section: %s\n' %(str(list(kwargs.keys())),))
if config_dir:
if config_dir[0] != '/':
raise ValueError('config_dir in [Main], if defined, must be an absolute path.')
if config_dir[-1] == '/':
config_dir = config_dir[:-1]
self.config_dir = config_dir
self.pidfile = pidfile or (os.environ.get('HOME', '/tmp') + '/%d_usrsvcd.pid' %(os.getuid()))
if usrsvcd_stdout:
if usrsvcd_stdout[0] != '/':
raise ValueError('usrsvcd_stdout in [Main], if defined, must be an absolute path.')
self.usrsvcd_stdout = usrsvcd_stdout
if usrsvcd_stderr:
if usrsvcd_stderr != 'stdout':
if usrsvcd_stderr[0] != '/':
raise ValueError('usrsvcd_stderr in [Main], if defined, must be "stdout" or an absolute path.')
self.usrsvcd_stderr = usrsvcd_stderr
if not sendmail_path or sendmail_path == 'auto':
sendmail_path = None
if os.path.exists('/usr/sbin/sendmail'):
sendmail_path = '/usr/sbin/sendmail'
elif os.path.exists('/usr/bin/sendmail'):
sendmail_path = '/usr/bin/sendmail'
else:
sendmail_path = findProgramPath('sendmail')
else:
if not os.path.exists(sendmail_path):
raise ValueError('sendmail_path "%s" does not exist.' %(sendmail_path,))
self.sendmail_path = sendmail_path
def getProgramConfigDir(self):
return self.config_dir
def __str__(self):
return str(self.__dict__)
# vim:set ts=4 shiftwidth=4 softtabstop=4 expandtab :
| lgpl-2.1 |
rice-apps/petition-app | controllers/dashboards.py | 1 | 3776 | """
Elections page controller.
"""
import json
import logging
import pages
import webapp2
import datetime
# from mail import sendConfirmation
from authentication import auth
from config import *
import models.organization
import models.user
import models.election
import models.petition
class DashboardHandler(webapp2.RequestHandler):
def get(self):
user = auth.require_login(self)
if not user:
return self.redirect(ERROR_URI)
# Get the organization
organization_id = self.request.get('id')
organization = models.organization.get_organization(organization_id).to_json()
logging.info("Organization: %s", organization)
# Make sure the user is authorized to delete the organization
assert user.get_id() in organization['admins']
admin_dict = {}
for admin in organization['admins']:
admin_dict[admin] = (user.get_id() == admin or ADMIN_ID == admin)
organization['admins'] = admin_dict
# Get all the elections for the organization
elections = models.election.get_organization_elections(organization['id'])
ongoing_elections = []
upcoming_elections = []
expired_elections = []
for election in elections:
positions = {}
for position in election['positions']:
petitions = models.petition.get_petitions_for_position(election['id'], position)
for petition in petitions:
petition['signatures_left'] = election['threshold'] - petition['signature_num']
if petition['signatures_left'] < 0:
petition['signatures_left'] = 0
positions[position] = petitions
election['positions'] = positions
if election['start_date'] > datetime.date.today():
upcoming_elections.append(election)
elif election['end_date'] < datetime.date.today():
expired_elections.append(election)
else:
ongoing_elections.append(election)
logging.info("Elections: %s", elections)
view = pages.render_view(DASHBOARD_URI, {'organization': organization,
'ongoing_elections': ongoing_elections,
'upcoming_elections': upcoming_elections,
'expired_elections': expired_elections})
pages.render_page(self, view)
class SaveAdminsHandler(webapp2.RequestHandler):
def post(self):
user = auth.require_login(self)
if not user:
return self.redirect(ERROR_URI)
data = json.loads(self.request.get('data'))
logging.info('Save Admins Post: %s', data)
models.organization.update_admins(data['organization_id'], data['admins'])
self.response.out.write('Success!')
class ElectionHandler(webapp2.RequestHandler):
def post(self):
user = auth.require_login(self)
if not user:
return self.redirect(ERROR_URI)
data = json.loads(self.request.get('data'))
logging.info('Add Election Post: %s', data)
election = models.election.create_election(user, data)
# Respond
if not election:
self.response.out.write('Duplicate Election')
else:
self.response.out.write('Success')
def delete(self):
user = auth.require_login(self)
if not user:
return self.redirect(ERROR_URI)
election_id = self.request.get('id')
election = models.election.get_election(election_id)
models.election.delete_election(election)
self.response.out.write('Success!')
| mit |
alexandrucoman/vbox-neutron-agent | neutron/plugins/hyperv/agent/l2_agent.py | 17 | 4513 | # Copyright 2015 Cloudbase Solutions Srl
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import platform
from hyperv.neutron import hyperv_neutron_agent
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.common import constants as n_const
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron import context
from neutron.i18n import _LE
from neutron.openstack.common import loopingcall
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
# Topic for tunnel notifications between the plugin and agent
TUNNEL = 'tunnel'
class HyperVSecurityAgent(sg_rpc.SecurityGroupAgentRpc):
def __init__(self, context, plugin_rpc):
super(HyperVSecurityAgent, self).__init__(context, plugin_rpc)
if sg_rpc.is_firewall_enabled():
self._setup_rpc()
@property
def use_enhanced_rpc(self):
return False
def _setup_rpc(self):
self.topic = topics.AGENT
self.endpoints = [HyperVSecurityCallbackMixin(self)]
consumers = [[topics.SECURITY_GROUP, topics.UPDATE]]
self.connection = agent_rpc.create_consumers(self.endpoints,
self.topic,
consumers)
class HyperVSecurityCallbackMixin(sg_rpc.SecurityGroupAgentRpcCallbackMixin):
target = oslo_messaging.Target(version='1.1')
def __init__(self, sg_agent):
super(HyperVSecurityCallbackMixin, self).__init__()
self.sg_agent = sg_agent
class HyperVNeutronAgent(hyperv_neutron_agent.HyperVNeutronAgentMixin):
# Set RPC API version to 1.1 by default.
target = oslo_messaging.Target(version='1.1')
def __init__(self):
super(HyperVNeutronAgent, self).__init__(conf=CONF)
self._set_agent_state()
self._setup_rpc()
def _set_agent_state(self):
configurations = self.get_agent_configurations()
self.agent_state = {
'binary': 'neutron-hyperv-agent',
'host': CONF.host,
'topic': n_const.L2_AGENT_TOPIC,
'configurations': configurations,
'agent_type': n_const.AGENT_TYPE_HYPERV,
'start_flag': True}
def _report_state(self):
try:
self.state_rpc.report_state(self.context,
self.agent_state)
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_LE("Failed reporting state!"))
def _setup_rpc(self):
self.agent_id = 'hyperv_%s' % platform.node()
self.topic = topics.AGENT
self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN)
self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
# RPC network init
self.context = context.get_admin_context_without_session()
# Handle updates from service
self.endpoints = [self]
# Define the listening consumers for the agent
consumers = [[topics.PORT, topics.UPDATE],
[topics.NETWORK, topics.DELETE],
[topics.PORT, topics.DELETE],
[TUNNEL, topics.UPDATE]]
self.connection = agent_rpc.create_consumers(self.endpoints,
self.topic,
consumers)
self.client = n_rpc.get_client(self.target)
self.sec_groups_agent = HyperVSecurityAgent(
self.context, self.sg_plugin_rpc)
report_interval = CONF.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
| apache-2.0 |
ChanderG/docker | vendor/src/github.com/hashicorp/go-msgpack/codec/msgpack_test.py | 1232 | 3478 | #!/usr/bin/env python
# This will create golden files in a directory passed to it.
# A Test calls this internally to create the golden files
# So it can process them (so we don't have to checkin the files).
import msgpack, msgpackrpc, sys, os, threading
def get_test_data_list():
# get list with all primitive types, and a combo type
l0 = [
-8,
-1616,
-32323232,
-6464646464646464,
192,
1616,
32323232,
6464646464646464,
192,
-3232.0,
-6464646464.0,
3232.0,
6464646464.0,
False,
True,
None,
"someday",
"",
"bytestring",
1328176922000002000,
-2206187877999998000,
0,
-6795364578871345152
]
l1 = [
{ "true": True,
"false": False },
{ "true": "True",
"false": False,
"uint16(1616)": 1616 },
{ "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
"int32":32323232, "bool": True,
"LONG STRING": "123456789012345678901234567890123456789012345678901234567890",
"SHORT STRING": "1234567890" },
{ True: "true", 8: False, "false": 0 }
]
l = []
l.extend(l0)
l.append(l0)
l.extend(l1)
return l
def build_test_data(destdir):
l = get_test_data_list()
for i in range(len(l)):
packer = msgpack.Packer()
serialized = packer.pack(l[i])
f = open(os.path.join(destdir, str(i) + '.golden'), 'wb')
f.write(serialized)
f.close()
def doRpcServer(port, stopTimeSec):
class EchoHandler(object):
def Echo123(self, msg1, msg2, msg3):
return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
def EchoStruct(self, msg):
return ("%s" % msg)
addr = msgpackrpc.Address('localhost', port)
server = msgpackrpc.Server(EchoHandler())
server.listen(addr)
# run thread to stop it after stopTimeSec seconds if > 0
if stopTimeSec > 0:
def myStopRpcServer():
server.stop()
t = threading.Timer(stopTimeSec, myStopRpcServer)
t.start()
server.start()
def doRpcClientToPythonSvc(port):
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("Echo123", "A1", "B2", "C3")
print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doRpcClientToGoSvc(port):
# print ">>>> port: ", port, " <<<<<"
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doMain(args):
if len(args) == 2 and args[0] == "testdata":
build_test_data(args[1])
elif len(args) == 3 and args[0] == "rpc-server":
doRpcServer(int(args[1]), int(args[2]))
elif len(args) == 2 and args[0] == "rpc-client-python-service":
doRpcClientToPythonSvc(int(args[1]))
elif len(args) == 2 and args[0] == "rpc-client-go-service":
doRpcClientToGoSvc(int(args[1]))
else:
print("Usage: msgpack_test.py " +
"[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
if __name__ == "__main__":
doMain(sys.argv[1:])
| apache-2.0 |
psf/black | tests/data/fmtonoff.py | 1 | 9645 | #!/usr/bin/env python3
import asyncio
import sys
from third_party import X, Y, Z
from library import some_connection, \
some_decorator
# fmt: off
from third_party import (X,
Y, Z)
# fmt: on
f'trigger 3.6 mode'
# Comment 1
# Comment 2
# fmt: off
def func_no_args():
a; b; c
if True: raise RuntimeError
if False: ...
for i in range(10):
print(i)
continue
exec('new-style exec', {}, {})
return None
async def coroutine(arg, exec=False):
'Single-line docstring. Multiline is harder to reformat.'
async with some_connection() as conn:
await conn.do_what_i_mean('SELECT bobby, tables FROM xkcd', timeout=2)
await asyncio.sleep(1)
@asyncio.coroutine
@some_decorator(
with_args=True,
many_args=[1,2,3]
)
def function_signature_stress_test(number:int,no_annotation=None,text:str='default',* ,debug:bool=False,**kwargs) -> str:
return text[number:-1]
# fmt: on
def spaces(a=1, b=(), c=[], d={}, e=True, f=-1, g=1 if False else 2, h="", i=r''):
offset = attr.ib(default=attr.Factory( lambda: _r.uniform(1, 2)))
assert task._cancel_stack[:len(old_stack)] == old_stack
def spaces_types(a: int = 1, b: tuple = (), c: list = [], d: dict = {}, e: bool = True, f: int = -1, g: int = 1 if False else 2, h: str = "", i: str = r''): ...
def spaces2(result= _core.Value(None)):
...
something = {
# fmt: off
key: 'value',
}
def subscriptlist():
atom[
# fmt: off
'some big and',
'complex subscript',
# fmt: on
goes + here, andhere,
]
def import_as_names():
# fmt: off
from hello import a, b
'unformatted'
# fmt: on
def testlist_star_expr():
# fmt: off
a , b = *hello
'unformatted'
# fmt: on
def yield_expr():
# fmt: off
yield hello
'unformatted'
# fmt: on
'formatted'
# fmt: off
( yield hello )
'unformatted'
# fmt: on
def example(session):
# fmt: off
result = session\
.query(models.Customer.id)\
.filter(models.Customer.account_id == account_id,
models.Customer.email == email_address)\
.order_by(models.Customer.id.asc())\
.all()
# fmt: on
def off_and_on_without_data():
"""All comments here are technically on the same prefix.
The comments between will be formatted. This is a known limitation.
"""
# fmt: off
#hey, that won't work
# fmt: on
pass
def on_and_off_broken():
"""Another known limitation."""
# fmt: on
# fmt: off
this=should.not_be.formatted()
and_=indeed . it is not formatted
because . the . handling . inside . generate_ignored_nodes()
now . considers . multiple . fmt . directives . within . one . prefix
# fmt: on
# fmt: off
# ...but comments still get reformatted even though they should not be
# fmt: on
def long_lines():
if True:
typedargslist.extend(
gen_annotated_params(ast_args.kwonlyargs, ast_args.kw_defaults, parameters, implicit_default=True)
)
# fmt: off
a = (
unnecessary_bracket()
)
# fmt: on
_type_comment_re = re.compile(
r"""
^
[\t ]*
\#[ ]type:[ ]*
(?P<type>
[^#\t\n]+?
)
(?<!ignore) # note: this will force the non-greedy + in <type> to match
# a trailing space which is why we need the silliness below
(?<!ignore[ ]{1})(?<!ignore[ ]{2})(?<!ignore[ ]{3})(?<!ignore[ ]{4})
(?<!ignore[ ]{5})(?<!ignore[ ]{6})(?<!ignore[ ]{7})(?<!ignore[ ]{8})
(?<!ignore[ ]{9})(?<!ignore[ ]{10})
[\t ]*
(?P<nl>
(?:\#[^\n]*)?
\n?
)
$
""",
# fmt: off
re.MULTILINE|re.VERBOSE
# fmt: on
)
def single_literal_yapf_disable():
"""Black does not support this."""
BAZ = {
(1, 2, 3, 4),
(5, 6, 7, 8),
(9, 10, 11, 12)
} # yapf: disable
cfg.rule(
"Default", "address",
xxxx_xxxx=["xxx-xxxxxx-xxxxxxxxxx"],
xxxxxx="xx_xxxxx", xxxxxxx="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
xxxxxxxxx_xxxx=True, xxxxxxxx_xxxxxxxxxx=False,
xxxxxx_xxxxxx=2, xxxxxx_xxxxx_xxxxxxxx=70, xxxxxx_xxxxxx_xxxxx=True,
# fmt: off
xxxxxxx_xxxxxxxxxxxx={
"xxxxxxxx": {
"xxxxxx": False,
"xxxxxxx": False,
"xxxx_xxxxxx": "xxxxx",
},
"xxxxxxxx-xxxxx": {
"xxxxxx": False,
"xxxxxxx": True,
"xxxx_xxxxxx": "xxxxxx",
},
},
# fmt: on
xxxxxxxxxx_xxxxxxxxxxx_xxxxxxx_xxxxxxxxx=5
)
# fmt: off
yield 'hello'
# No formatting to the end of the file
l=[1,2,3]
d={'a':1,
'b':2}
# output
#!/usr/bin/env python3
import asyncio
import sys
from third_party import X, Y, Z
from library import some_connection, some_decorator
# fmt: off
from third_party import (X,
Y, Z)
# fmt: on
f"trigger 3.6 mode"
# Comment 1
# Comment 2
# fmt: off
def func_no_args():
a; b; c
if True: raise RuntimeError
if False: ...
for i in range(10):
print(i)
continue
exec('new-style exec', {}, {})
return None
async def coroutine(arg, exec=False):
'Single-line docstring. Multiline is harder to reformat.'
async with some_connection() as conn:
await conn.do_what_i_mean('SELECT bobby, tables FROM xkcd', timeout=2)
await asyncio.sleep(1)
@asyncio.coroutine
@some_decorator(
with_args=True,
many_args=[1,2,3]
)
def function_signature_stress_test(number:int,no_annotation=None,text:str='default',* ,debug:bool=False,**kwargs) -> str:
return text[number:-1]
# fmt: on
def spaces(a=1, b=(), c=[], d={}, e=True, f=-1, g=1 if False else 2, h="", i=r""):
offset = attr.ib(default=attr.Factory(lambda: _r.uniform(1, 2)))
assert task._cancel_stack[: len(old_stack)] == old_stack
def spaces_types(
a: int = 1,
b: tuple = (),
c: list = [],
d: dict = {},
e: bool = True,
f: int = -1,
g: int = 1 if False else 2,
h: str = "",
i: str = r"",
):
...
def spaces2(result=_core.Value(None)):
...
something = {
# fmt: off
key: 'value',
}
def subscriptlist():
atom[
# fmt: off
'some big and',
'complex subscript',
# fmt: on
goes + here,
andhere,
]
def import_as_names():
# fmt: off
from hello import a, b
'unformatted'
# fmt: on
def testlist_star_expr():
# fmt: off
a , b = *hello
'unformatted'
# fmt: on
def yield_expr():
# fmt: off
yield hello
'unformatted'
# fmt: on
"formatted"
# fmt: off
( yield hello )
'unformatted'
# fmt: on
def example(session):
# fmt: off
result = session\
.query(models.Customer.id)\
.filter(models.Customer.account_id == account_id,
models.Customer.email == email_address)\
.order_by(models.Customer.id.asc())\
.all()
# fmt: on
def off_and_on_without_data():
"""All comments here are technically on the same prefix.
The comments between will be formatted. This is a known limitation.
"""
# fmt: off
# hey, that won't work
# fmt: on
pass
def on_and_off_broken():
"""Another known limitation."""
# fmt: on
# fmt: off
this=should.not_be.formatted()
and_=indeed . it is not formatted
because . the . handling . inside . generate_ignored_nodes()
now . considers . multiple . fmt . directives . within . one . prefix
# fmt: on
# fmt: off
# ...but comments still get reformatted even though they should not be
# fmt: on
def long_lines():
if True:
typedargslist.extend(
gen_annotated_params(
ast_args.kwonlyargs,
ast_args.kw_defaults,
parameters,
implicit_default=True,
)
)
# fmt: off
a = (
unnecessary_bracket()
)
# fmt: on
_type_comment_re = re.compile(
r"""
^
[\t ]*
\#[ ]type:[ ]*
(?P<type>
[^#\t\n]+?
)
(?<!ignore) # note: this will force the non-greedy + in <type> to match
# a trailing space which is why we need the silliness below
(?<!ignore[ ]{1})(?<!ignore[ ]{2})(?<!ignore[ ]{3})(?<!ignore[ ]{4})
(?<!ignore[ ]{5})(?<!ignore[ ]{6})(?<!ignore[ ]{7})(?<!ignore[ ]{8})
(?<!ignore[ ]{9})(?<!ignore[ ]{10})
[\t ]*
(?P<nl>
(?:\#[^\n]*)?
\n?
)
$
""",
# fmt: off
re.MULTILINE|re.VERBOSE
# fmt: on
)
def single_literal_yapf_disable():
"""Black does not support this."""
BAZ = {(1, 2, 3, 4), (5, 6, 7, 8), (9, 10, 11, 12)} # yapf: disable
cfg.rule(
"Default",
"address",
xxxx_xxxx=["xxx-xxxxxx-xxxxxxxxxx"],
xxxxxx="xx_xxxxx",
xxxxxxx="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
xxxxxxxxx_xxxx=True,
xxxxxxxx_xxxxxxxxxx=False,
xxxxxx_xxxxxx=2,
xxxxxx_xxxxx_xxxxxxxx=70,
xxxxxx_xxxxxx_xxxxx=True,
# fmt: off
xxxxxxx_xxxxxxxxxxxx={
"xxxxxxxx": {
"xxxxxx": False,
"xxxxxxx": False,
"xxxx_xxxxxx": "xxxxx",
},
"xxxxxxxx-xxxxx": {
"xxxxxx": False,
"xxxxxxx": True,
"xxxx_xxxxxx": "xxxxxx",
},
},
# fmt: on
xxxxxxxxxx_xxxxxxxxxxx_xxxxxxx_xxxxxxxxx=5,
)
# fmt: off
yield 'hello'
# No formatting to the end of the file
l=[1,2,3]
d={'a':1,
'b':2}
| mit |
nttks/edx-platform | openedx/core/djangoapps/course_groups/tests/test_views.py | 30 | 51310 | """
Tests for course group views
"""
# pylint: disable=attribute-defined-outside-init
# pylint: disable=no-member
import json
from collections import namedtuple
from datetime import datetime
from unittest import skipUnless
from django.conf import settings
from django.contrib.auth.models import User
from django.http import Http404
from django.test.client import RequestFactory
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore.tests.factories import ItemFactory
from ..models import CourseUserGroup, CourseCohort
from ..views import (
course_cohort_settings_handler, cohort_handler, users_in_cohort, add_users_to_cohort, remove_user_from_cohort,
link_cohort_to_partition_group, cohort_discussion_topics
)
from ..cohorts import (
get_cohort, get_cohort_by_name, get_cohort_by_id,
DEFAULT_COHORT_NAME, get_group_info_for_cohort
)
from .helpers import (
config_course_cohorts, config_course_cohorts_legacy, CohortFactory, CourseCohortFactory, topic_name_to_id
)
class CohortViewsTestCase(ModuleStoreTestCase):
"""
Base class which sets up a course and staff/non-staff users.
"""
def setUp(self):
super(CohortViewsTestCase, self).setUp()
self.course = CourseFactory.create()
self.staff_user = UserFactory(is_staff=True, username="staff")
self.non_staff_user = UserFactory(username="nonstaff")
def _enroll_users(self, users, course_key):
"""Enroll each user in the specified course"""
for user in users:
CourseEnrollment.enroll(user, course_key)
def _create_cohorts(self):
"""Creates cohorts for testing"""
self.cohort1_users = [UserFactory() for _ in range(3)]
self.cohort2_users = [UserFactory() for _ in range(2)]
self.cohort3_users = [UserFactory() for _ in range(2)]
self.cohort4_users = [UserFactory() for _ in range(2)]
self.cohortless_users = [UserFactory() for _ in range(3)]
self.unenrolled_users = [UserFactory() for _ in range(3)]
self._enroll_users(
self.cohort1_users + self.cohort2_users + self.cohort3_users + self.cohortless_users + self.cohort4_users,
self.course.id
)
self.cohort1 = CohortFactory(course_id=self.course.id, users=self.cohort1_users)
self.cohort2 = CohortFactory(course_id=self.course.id, users=self.cohort2_users)
self.cohort3 = CohortFactory(course_id=self.course.id, users=self.cohort3_users)
self.cohort4 = CohortFactory(course_id=self.course.id, users=self.cohort4_users)
CourseCohortFactory(course_user_group=self.cohort1)
CourseCohortFactory(course_user_group=self.cohort2)
CourseCohortFactory(course_user_group=self.cohort3)
CourseCohortFactory(course_user_group=self.cohort4, assignment_type=CourseCohort.RANDOM)
def _user_in_cohort(self, username, cohort):
"""
Return true iff a user with `username` exists in `cohort`.
"""
return username in [user.username for user in cohort.users.all()]
def _verify_non_staff_cannot_access(self, view, request_method, view_args):
"""
Verify that a non-staff user cannot access a given view.
`view` is the view to test.
`view_args` is a list of arguments (not including the request) to pass
to the view.
"""
if request_method == "GET":
request = RequestFactory().get("dummy_url")
elif request_method == "POST":
request = RequestFactory().post("dummy_url")
else:
request = RequestFactory().request()
request.user = self.non_staff_user
view_args.insert(0, request)
self.assertRaises(Http404, view, *view_args)
def create_cohorted_discussions(self):
"""
Set up a cohorted discussion in the system, complete with all the fixings
"""
cohorted_inline_discussions = ['Topic A']
cohorted_course_wide_discussions = ["Topic B"]
cohorted_discussions = cohorted_inline_discussions + cohorted_course_wide_discussions
# inline discussion
ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id=topic_name_to_id(self.course, "Topic A"),
discussion_category="Chapter",
discussion_target="Discussion",
start=datetime.now()
)
# course-wide discussion
discussion_topics = {
"Topic B": {"id": "Topic B"},
}
config_course_cohorts(
self.course,
is_cohorted=True,
discussion_topics=discussion_topics,
cohorted_discussions=cohorted_discussions
)
return cohorted_inline_discussions, cohorted_course_wide_discussions
def get_handler(self, course, cohort=None, expected_response_code=200, handler=cohort_handler):
"""
Call a GET on `handler` for a given `course` and return its response as a dict.
Raise an exception if response status code is not as expected.
"""
request = RequestFactory().get("dummy_url")
request.user = self.staff_user
if cohort:
response = handler(request, unicode(course.id), cohort.id)
else:
response = handler(request, unicode(course.id))
self.assertEqual(response.status_code, expected_response_code)
return json.loads(response.content)
def put_handler(self, course, cohort=None, data=None, expected_response_code=200, handler=cohort_handler):
"""
Call a PUT on `handler` for a given `course` and return its response as a dict.
Raise an exception if response status code is not as expected.
"""
if not isinstance(data, basestring):
data = json.dumps(data or {})
request = RequestFactory().put(path="dummy path", data=data, content_type="application/json")
request.user = self.staff_user
if cohort:
response = handler(request, unicode(course.id), cohort.id)
else:
response = handler(request, unicode(course.id))
self.assertEqual(response.status_code, expected_response_code)
return json.loads(response.content)
def patch_handler(self, course, cohort=None, data=None, expected_response_code=200, handler=cohort_handler):
"""
Call a PATCH on `handler` for a given `course` and return its response as a dict.
Raise an exception if response status code is not as expected.
"""
if not isinstance(data, basestring):
data = json.dumps(data or {})
request = RequestFactory().patch(path="dummy path", data=data, content_type="application/json")
request.user = self.staff_user
if cohort:
response = handler(request, unicode(course.id), cohort.id)
else:
response = handler(request, unicode(course.id))
self.assertEqual(response.status_code, expected_response_code)
return json.loads(response.content)
class CourseCohortSettingsHandlerTestCase(CohortViewsTestCase):
"""
Tests the `course_cohort_settings_handler` view.
"""
def get_expected_response(self):
"""
Returns the static response dict.
"""
return {
'is_cohorted': True,
'always_cohort_inline_discussions': True,
'cohorted_inline_discussions': [],
'cohorted_course_wide_discussions': [],
'id': 1
}
def test_non_staff(self):
"""
Verify that we cannot access course_cohort_settings_handler if we're a non-staff user.
"""
self._verify_non_staff_cannot_access(course_cohort_settings_handler, "GET", [unicode(self.course.id)])
self._verify_non_staff_cannot_access(course_cohort_settings_handler, "PATCH", [unicode(self.course.id)])
def test_get_settings(self):
"""
Verify that course_cohort_settings_handler is working for HTTP GET.
"""
cohorted_inline_discussions, cohorted_course_wide_discussions = self.create_cohorted_discussions()
response = self.get_handler(self.course, handler=course_cohort_settings_handler)
expected_response = self.get_expected_response()
expected_response['cohorted_inline_discussions'] = [topic_name_to_id(self.course, name)
for name in cohorted_inline_discussions]
expected_response['cohorted_course_wide_discussions'] = [topic_name_to_id(self.course, name)
for name in cohorted_course_wide_discussions]
self.assertEqual(response, expected_response)
def test_update_is_cohorted_settings(self):
"""
Verify that course_cohort_settings_handler is working for is_cohorted via HTTP PATCH.
"""
config_course_cohorts(self.course, is_cohorted=True)
response = self.get_handler(self.course, handler=course_cohort_settings_handler)
expected_response = self.get_expected_response()
self.assertEqual(response, expected_response)
expected_response['is_cohorted'] = False
response = self.patch_handler(self.course, data=expected_response, handler=course_cohort_settings_handler)
self.assertEqual(response, expected_response)
def test_update_always_cohort_inline_discussion_settings(self):
"""
Verify that course_cohort_settings_handler is working for always_cohort_inline_discussions via HTTP PATCH.
"""
config_course_cohorts(self.course, is_cohorted=True)
response = self.get_handler(self.course, handler=course_cohort_settings_handler)
expected_response = self.get_expected_response()
self.assertEqual(response, expected_response)
expected_response['always_cohort_inline_discussions'] = False
response = self.patch_handler(self.course, data=expected_response, handler=course_cohort_settings_handler)
self.assertEqual(response, expected_response)
def test_update_course_wide_discussion_settings(self):
"""
Verify that course_cohort_settings_handler is working for cohorted_course_wide_discussions via HTTP PATCH.
"""
# course-wide discussion
discussion_topics = {
"Topic B": {"id": "Topic B"},
}
config_course_cohorts(self.course, is_cohorted=True, discussion_topics=discussion_topics)
response = self.get_handler(self.course, handler=course_cohort_settings_handler)
expected_response = self.get_expected_response()
self.assertEqual(response, expected_response)
expected_response['cohorted_course_wide_discussions'] = [topic_name_to_id(self.course, "Topic B")]
response = self.patch_handler(self.course, data=expected_response, handler=course_cohort_settings_handler)
self.assertEqual(response, expected_response)
def test_update_inline_discussion_settings(self):
"""
Verify that course_cohort_settings_handler is working for cohorted_inline_discussions via HTTP PATCH.
"""
config_course_cohorts(self.course, is_cohorted=True)
response = self.get_handler(self.course, handler=course_cohort_settings_handler)
expected_response = self.get_expected_response()
self.assertEqual(response, expected_response)
now = datetime.now()
# inline discussion
ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id="Topic_A",
discussion_category="Chapter",
discussion_target="Discussion",
start=now
)
expected_response['cohorted_inline_discussions'] = ["Topic_A"]
response = self.patch_handler(self.course, data=expected_response, handler=course_cohort_settings_handler)
self.assertEqual(response, expected_response)
def test_update_settings_with_missing_field(self):
"""
Verify that course_cohort_settings_handler return HTTP 400 if required data field is missing from post data.
"""
config_course_cohorts(self.course, is_cohorted=True)
response = self.patch_handler(self.course, expected_response_code=400, handler=course_cohort_settings_handler)
self.assertEqual("Bad Request", response.get("error"))
def test_update_settings_with_invalid_field_data_type(self):
"""
Verify that course_cohort_settings_handler return HTTP 400 if field data type is incorrect.
"""
config_course_cohorts(self.course, is_cohorted=True)
response = self.patch_handler(
self.course,
data={'is_cohorted': ''},
expected_response_code=400,
handler=course_cohort_settings_handler
)
self.assertEqual(
"Incorrect field type for `{}`. Type must be `{}`".format('is_cohorted', bool.__name__),
response.get("error")
)
class CohortHandlerTestCase(CohortViewsTestCase):
"""
Tests the `cohort_handler` view.
"""
def verify_lists_expected_cohorts(self, expected_cohorts, response_dict=None):
"""
Verify that the server response contains the expected_cohorts.
If response_dict is None, the list of cohorts is requested from the server.
"""
if response_dict is None:
response_dict = self.get_handler(self.course)
self.assertEqual(
response_dict.get("cohorts"),
[
{
"name": cohort.name,
"id": cohort.id,
"user_count": cohort.user_count,
"assignment_type": cohort.assignment_type,
"user_partition_id": None,
"group_id": None
}
for cohort in expected_cohorts
]
)
@staticmethod
def create_expected_cohort(cohort, user_count, assignment_type, user_partition_id=None, group_id=None):
"""
Create a tuple storing the expected cohort information.
"""
cohort_tuple = namedtuple("Cohort", "name id user_count assignment_type user_partition_id group_id")
return cohort_tuple(
name=cohort.name, id=cohort.id, user_count=user_count, assignment_type=assignment_type,
user_partition_id=user_partition_id, group_id=group_id
)
def test_non_staff(self):
"""
Verify that we cannot access cohort_handler if we're a non-staff user.
"""
self._verify_non_staff_cannot_access(cohort_handler, "GET", [unicode(self.course.id)])
self._verify_non_staff_cannot_access(cohort_handler, "POST", [unicode(self.course.id)])
self._verify_non_staff_cannot_access(cohort_handler, "PUT", [unicode(self.course.id)])
def test_no_cohorts(self):
"""
Verify that no cohorts are in response for a course with no cohorts.
"""
self.verify_lists_expected_cohorts([])
def test_some_cohorts(self):
"""
Verify that cohorts are in response for a course with some cohorts.
"""
self._create_cohorts()
expected_cohorts = [
CohortHandlerTestCase.create_expected_cohort(self.cohort1, 3, CourseCohort.MANUAL),
CohortHandlerTestCase.create_expected_cohort(self.cohort2, 2, CourseCohort.MANUAL),
CohortHandlerTestCase.create_expected_cohort(self.cohort3, 2, CourseCohort.MANUAL),
CohortHandlerTestCase.create_expected_cohort(self.cohort4, 2, CourseCohort.RANDOM),
]
self.verify_lists_expected_cohorts(expected_cohorts)
def test_auto_cohorts(self):
"""
Verify that auto cohorts are included in the response.
"""
config_course_cohorts(self.course, is_cohorted=True, auto_cohorts=["AutoGroup1", "AutoGroup2"])
# Will create manual cohorts cohort1, cohort2, and cohort3.
self._create_cohorts()
actual_cohorts = self.get_handler(self.course)
# Get references to the created auto cohorts.
auto_cohort_1 = get_cohort_by_name(self.course.id, "AutoGroup1")
auto_cohort_2 = get_cohort_by_name(self.course.id, "AutoGroup2")
expected_cohorts = [
CohortHandlerTestCase.create_expected_cohort(auto_cohort_1, 0, CourseCohort.RANDOM),
CohortHandlerTestCase.create_expected_cohort(auto_cohort_2, 0, CourseCohort.RANDOM),
CohortHandlerTestCase.create_expected_cohort(self.cohort1, 3, CourseCohort.MANUAL),
CohortHandlerTestCase.create_expected_cohort(self.cohort2, 2, CourseCohort.MANUAL),
CohortHandlerTestCase.create_expected_cohort(self.cohort3, 2, CourseCohort.MANUAL),
CohortHandlerTestCase.create_expected_cohort(self.cohort4, 2, CourseCohort.RANDOM),
]
self.verify_lists_expected_cohorts(expected_cohorts, actual_cohorts)
def test_default_cohort(self):
"""
Verify that the default cohort is not created and included in the response until students are assigned to it.
"""
# verify the default cohort is not created when the course is not cohorted
self.verify_lists_expected_cohorts([])
# create a cohorted course without any auto_cohorts
config_course_cohorts(self.course, is_cohorted=True)
# verify the default cohort is not yet created until a user is assigned
self.verify_lists_expected_cohorts([])
# create enrolled users
users = [UserFactory() for _ in range(3)]
self._enroll_users(users, self.course.id)
# mimic users accessing the discussion forum
# Default Cohort will be created here
for user in users:
get_cohort(user, self.course.id)
# verify the default cohort is automatically created
default_cohort = get_cohort_by_name(self.course.id, DEFAULT_COHORT_NAME)
actual_cohorts = self.get_handler(self.course)
self.verify_lists_expected_cohorts(
[CohortHandlerTestCase.create_expected_cohort(default_cohort, len(users), CourseCohort.RANDOM)],
actual_cohorts,
)
# set auto_cohort_groups
# these cohort config will have not effect on lms side as we are already done with migrations
config_course_cohorts_legacy(self.course, [], cohorted=True, auto_cohort_groups=["AutoGroup"])
# We should expect the DoesNotExist exception because above cohort config have
# no effect on lms side so as a result there will be no AutoGroup cohort present
with self.assertRaises(CourseUserGroup.DoesNotExist):
get_cohort_by_name(self.course.id, "AutoGroup")
def test_get_single_cohort(self):
"""
Tests that information for just a single cohort can be requested.
"""
self._create_cohorts()
response_dict = self.get_handler(self.course, self.cohort2)
self.assertEqual(
response_dict,
{
"name": self.cohort2.name,
"id": self.cohort2.id,
"user_count": 2,
"assignment_type": CourseCohort.MANUAL,
"user_partition_id": None,
"group_id": None
}
)
############### Tests of adding a new cohort ###############
def verify_contains_added_cohort(
self, response_dict, cohort_name, assignment_type=CourseCohort.MANUAL,
expected_user_partition_id=None, expected_group_id=None
):
"""
Verifies that the cohort was created properly and the correct response was returned.
"""
created_cohort = get_cohort_by_name(self.course.id, cohort_name)
self.assertIsNotNone(created_cohort)
self.assertEqual(
response_dict,
{
"name": cohort_name,
"id": created_cohort.id,
"user_count": 0,
"assignment_type": assignment_type,
"user_partition_id": expected_user_partition_id,
"group_id": expected_group_id
}
)
self.assertEqual((expected_group_id, expected_user_partition_id), get_group_info_for_cohort(created_cohort))
def test_create_new_cohort(self):
"""
Verify that a new cohort can be created, with and without user_partition_id/group_id information.
"""
new_cohort_name = "New cohort unassociated to content groups"
request_data = {'name': new_cohort_name, 'assignment_type': CourseCohort.RANDOM}
response_dict = self.put_handler(self.course, data=request_data)
self.verify_contains_added_cohort(response_dict, new_cohort_name, assignment_type=CourseCohort.RANDOM)
new_cohort_name = "New cohort linked to group"
data = {
'name': new_cohort_name,
'assignment_type': CourseCohort.MANUAL,
'user_partition_id': 1,
'group_id': 2
}
response_dict = self.put_handler(self.course, data=data)
self.verify_contains_added_cohort(
response_dict,
new_cohort_name,
expected_user_partition_id=1,
expected_group_id=2
)
def test_create_new_cohort_missing_name(self):
"""
Verify that we cannot create a cohort without specifying a name.
"""
response_dict = self.put_handler(self.course, expected_response_code=400)
self.assertEqual("Cohort name must be specified.", response_dict.get("error"))
def test_create_new_cohort_missing_assignment_type(self):
"""
Verify that we cannot create a cohort without specifying an assignment type.
"""
response_dict = self.put_handler(self.course, data={'name': 'COHORT NAME'}, expected_response_code=400)
self.assertEqual("Assignment type must be specified.", response_dict.get("error"))
def test_create_new_cohort_existing_name(self):
"""
Verify that we cannot add a cohort with the same name as an existing cohort.
"""
self._create_cohorts()
response_dict = self.put_handler(
self.course, data={'name': self.cohort1.name, 'assignment_type': CourseCohort.MANUAL},
expected_response_code=400
)
self.assertEqual("You cannot create two cohorts with the same name", response_dict.get("error"))
def test_create_new_cohort_missing_user_partition_id(self):
"""
Verify that we cannot create a cohort with a group_id if the user_partition_id is not also specified.
"""
data = {'name': "Cohort missing user_partition_id", 'assignment_type': CourseCohort.MANUAL, 'group_id': 2}
response_dict = self.put_handler(self.course, data=data, expected_response_code=400)
self.assertEqual(
"If group_id is specified, user_partition_id must also be specified.", response_dict.get("error")
)
############### Tests of updating an existing cohort ###############
def test_update_manual_cohort_name(self):
"""
Test that it is possible to update the name of an existing manual cohort.
"""
self._create_cohorts()
updated_name = self.cohort1.name + "_updated"
data = {'name': updated_name, 'assignment_type': CourseCohort.MANUAL}
response_dict = self.put_handler(self.course, self.cohort1, data=data)
self.assertEqual(updated_name, get_cohort_by_id(self.course.id, self.cohort1.id).name)
self.assertEqual(updated_name, response_dict.get("name"))
self.assertEqual(CourseCohort.MANUAL, response_dict.get("assignment_type"))
def test_update_random_cohort_name(self):
"""
Test that it is possible to update the name of an existing random cohort.
"""
# Create a new cohort with random assignment
cohort_name = 'I AM A RANDOM COHORT'
data = {'name': cohort_name, 'assignment_type': CourseCohort.RANDOM}
response_dict = self.put_handler(self.course, data=data)
self.assertEqual(cohort_name, response_dict.get("name"))
self.assertEqual(CourseCohort.RANDOM, response_dict.get("assignment_type"))
# Update the newly created random cohort
newly_created_cohort = get_cohort_by_name(self.course.id, cohort_name)
cohort_name = 'I AM AN UPDATED RANDOM COHORT'
data = {'name': cohort_name, 'assignment_type': CourseCohort.RANDOM}
response_dict = self.put_handler(self.course, newly_created_cohort, data=data)
self.assertEqual(cohort_name, get_cohort_by_id(self.course.id, newly_created_cohort.id).name)
self.assertEqual(cohort_name, response_dict.get("name"))
self.assertEqual(CourseCohort.RANDOM, response_dict.get("assignment_type"))
def test_cannot_update_assignment_type_of_single_random_cohort(self):
"""
Test that it is not possible to update the assignment type of a single random cohort.
"""
# Create a new cohort with random assignment
cohort_name = 'I AM A RANDOM COHORT'
data = {'name': cohort_name, 'assignment_type': CourseCohort.RANDOM}
response_dict = self.put_handler(self.course, data=data)
self.assertEqual(cohort_name, response_dict.get("name"))
self.assertEqual(CourseCohort.RANDOM, response_dict.get("assignment_type"))
# Try to update the assignment type of newly created random cohort
cohort = get_cohort_by_name(self.course.id, cohort_name)
data = {'name': cohort_name, 'assignment_type': CourseCohort.MANUAL}
response_dict = self.put_handler(self.course, cohort, data=data, expected_response_code=400)
self.assertEqual(
'There must be one cohort to which students can automatically be assigned.', response_dict.get("error")
)
def test_update_cohort_group_id(self):
"""
Test that it is possible to update the user_partition_id/group_id of an existing cohort.
"""
self._create_cohorts()
self.assertEqual((None, None), get_group_info_for_cohort(self.cohort1))
data = {
'name': self.cohort1.name,
'assignment_type': CourseCohort.MANUAL,
'group_id': 2,
'user_partition_id': 3
}
response_dict = self.put_handler(self.course, self.cohort1, data=data)
self.assertEqual((2, 3), get_group_info_for_cohort(self.cohort1))
self.assertEqual(2, response_dict.get("group_id"))
self.assertEqual(3, response_dict.get("user_partition_id"))
# Check that the name didn't change.
self.assertEqual(self.cohort1.name, response_dict.get("name"))
def test_update_cohort_remove_group_id(self):
"""
Test that it is possible to remove the user_partition_id/group_id linking of an existing cohort.
"""
self._create_cohorts()
link_cohort_to_partition_group(self.cohort1, 5, 0)
self.assertEqual((0, 5), get_group_info_for_cohort(self.cohort1))
data = {'name': self.cohort1.name, 'assignment_type': CourseCohort.RANDOM, 'group_id': None}
response_dict = self.put_handler(self.course, self.cohort1, data=data)
self.assertEqual((None, None), get_group_info_for_cohort(self.cohort1))
self.assertIsNone(response_dict.get("group_id"))
self.assertIsNone(response_dict.get("user_partition_id"))
def test_change_cohort_group_id(self):
"""
Test that it is possible to change the user_partition_id/group_id of an existing cohort to a
different group_id.
"""
self._create_cohorts()
self.assertEqual((None, None), get_group_info_for_cohort(self.cohort4))
data = {
'name': self.cohort4.name,
'assignment_type': CourseCohort.RANDOM,
'group_id': 2,
'user_partition_id': 3
}
self.put_handler(self.course, self.cohort4, data=data)
self.assertEqual((2, 3), get_group_info_for_cohort(self.cohort4))
data = {
'name': self.cohort4.name,
'assignment_type': CourseCohort.RANDOM,
'group_id': 1,
'user_partition_id': 3
}
self.put_handler(self.course, self.cohort4, data=data)
self.assertEqual((1, 3), get_group_info_for_cohort(self.cohort4))
def test_update_cohort_missing_user_partition_id(self):
"""
Verify that we cannot update a cohort with a group_id if the user_partition_id is not also specified.
"""
self._create_cohorts()
data = {'name': self.cohort1.name, 'assignment_type': CourseCohort.RANDOM, 'group_id': 2}
response_dict = self.put_handler(self.course, self.cohort1, data=data, expected_response_code=400)
self.assertEqual(
"If group_id is specified, user_partition_id must also be specified.", response_dict.get("error")
)
class UsersInCohortTestCase(CohortViewsTestCase):
"""
Tests the `users_in_cohort` view.
"""
def request_users_in_cohort(self, cohort, course, requested_page, should_return_bad_request=False):
"""
Call `users_in_cohort` for a given cohort/requested page, and return
its response as a dict. When `should_return_bad_request` is True,
verify that the response indicates a bad request.
"""
request = RequestFactory().get("dummy_url", {"page": requested_page})
request.user = self.staff_user
response = users_in_cohort(request, unicode(course.id), cohort.id)
if should_return_bad_request:
self.assertEqual(response.status_code, 400)
return
self.assertEqual(response.status_code, 200)
return json.loads(response.content)
def verify_users_in_cohort_and_response(self, cohort, response_dict, expected_users, expected_page,
expected_num_pages):
"""
Check that the `users_in_cohort` response contains the expected list of
users, page number, and total number of pages for a given cohort. Also
verify that those users are actually in the given cohort.
"""
self.assertTrue(response_dict.get("success"))
self.assertEqual(response_dict.get("page"), expected_page)
self.assertEqual(response_dict.get("num_pages"), expected_num_pages)
returned_users = User.objects.filter(username__in=[user.get("username") for user in response_dict.get("users")])
self.assertEqual(len(returned_users), len(expected_users))
self.assertEqual(set(returned_users), set(expected_users))
self.assertTrue(set(returned_users).issubset(cohort.users.all()))
def test_non_staff(self):
"""
Verify that non-staff users cannot access `check_users_in_cohort`.
"""
cohort = CohortFactory(course_id=self.course.id, users=[])
self._verify_non_staff_cannot_access(users_in_cohort, "GET", [unicode(self.course.id), cohort.id])
def test_no_users(self):
"""
Verify that we don't get back any users for a cohort with no users.
"""
cohort = CohortFactory(course_id=self.course.id, users=[])
response_dict = self.request_users_in_cohort(cohort, self.course, 1)
self.verify_users_in_cohort_and_response(
cohort,
response_dict,
expected_users=[],
expected_page=1,
expected_num_pages=1
)
def test_few_users(self):
"""
Verify that we get back all users for a cohort when the cohort has
<=100 users.
"""
users = [UserFactory() for _ in range(5)]
cohort = CohortFactory(course_id=self.course.id, users=users)
response_dict = self.request_users_in_cohort(cohort, self.course, 1)
self.verify_users_in_cohort_and_response(
cohort,
response_dict,
expected_users=users,
expected_page=1,
expected_num_pages=1
)
def test_many_users(self):
"""
Verify that pagination works correctly for cohorts with >100 users.
"""
users = [UserFactory() for _ in range(101)]
cohort = CohortFactory(course_id=self.course.id, users=users)
response_dict_1 = self.request_users_in_cohort(cohort, self.course, 1)
response_dict_2 = self.request_users_in_cohort(cohort, self.course, 2)
self.verify_users_in_cohort_and_response(
cohort,
response_dict_1,
expected_users=users[:100],
expected_page=1,
expected_num_pages=2
)
self.verify_users_in_cohort_and_response(
cohort,
response_dict_2,
expected_users=users[100:],
expected_page=2,
expected_num_pages=2
)
def test_out_of_range(self):
"""
Verify that we get a blank page of users when requesting page 0 or a
page greater than the actual number of pages.
"""
users = [UserFactory() for _ in range(5)]
cohort = CohortFactory(course_id=self.course.id, users=users)
response = self.request_users_in_cohort(cohort, self.course, 0)
self.verify_users_in_cohort_and_response(
cohort,
response,
expected_users=[],
expected_page=0,
expected_num_pages=1
)
response = self.request_users_in_cohort(cohort, self.course, 2)
self.verify_users_in_cohort_and_response(
cohort,
response,
expected_users=[],
expected_page=2,
expected_num_pages=1
)
def test_non_positive_page(self):
"""
Verify that we get a `HttpResponseBadRequest` (bad request) when the
page we request isn't a positive integer.
"""
users = [UserFactory() for _ in range(5)]
cohort = CohortFactory(course_id=self.course.id, users=users)
self.request_users_in_cohort(cohort, self.course, "invalid", should_return_bad_request=True)
self.request_users_in_cohort(cohort, self.course, -1, should_return_bad_request=True)
class AddUsersToCohortTestCase(CohortViewsTestCase):
"""
Tests the `add_users_to_cohort` view.
"""
def setUp(self):
super(AddUsersToCohortTestCase, self).setUp()
self._create_cohorts()
def request_add_users_to_cohort(self, users_string, cohort, course, should_raise_404=False):
"""
Call `add_users_to_cohort` for a given cohort, course, and list of
users, returning its response as a dict. When `should_raise_404` is
True, verify that the request raised a Http404.
"""
request = RequestFactory().post("dummy_url", {"users": users_string})
request.user = self.staff_user
if should_raise_404:
self.assertRaises(
Http404,
lambda: add_users_to_cohort(request, unicode(course.id), cohort.id)
)
else:
response = add_users_to_cohort(request, unicode(course.id), cohort.id)
self.assertEqual(response.status_code, 200)
return json.loads(response.content)
def verify_added_users_to_cohort(self, response_dict, cohort, course, expected_added, expected_changed,
expected_present, expected_unknown):
"""
Check that add_users_to_cohort returned the expected response and has
the expected side effects.
`expected_added` is a list of users
`expected_changed` is a list of (user, previous_cohort) tuples
`expected_present` is a list of (user, email/username) tuples where
email/username corresponds to the input
`expected_unknown` is a list of strings corresponding to the input
"""
self.assertTrue(response_dict.get("success"))
self.assertEqual(
response_dict.get("added"),
[
{"username": user.username, "name": user.profile.name, "email": user.email}
for user in expected_added
]
)
self.assertEqual(
response_dict.get("changed"),
[
{
"username": user.username,
"name": user.profile.name,
"email": user.email,
"previous_cohort": previous_cohort
}
for (user, previous_cohort) in expected_changed
]
)
self.assertEqual(
response_dict.get("present"),
[username_or_email for (_, username_or_email) in expected_present]
)
self.assertEqual(response_dict.get("unknown"), expected_unknown)
for user in expected_added + [user for (user, _) in expected_changed + expected_present]:
self.assertEqual(
CourseUserGroup.objects.get(
course_id=course.id,
group_type=CourseUserGroup.COHORT,
users__id=user.id
),
cohort
)
def test_non_staff(self):
"""
Verify that non-staff users cannot access `check_users_in_cohort`.
"""
cohort = CohortFactory(course_id=self.course.id, users=[])
self._verify_non_staff_cannot_access(
add_users_to_cohort,
"POST",
[unicode(self.course.id), cohort.id]
)
def test_empty(self):
"""
Verify that adding an empty list of users to a cohort has no result.
"""
response_dict = self.request_add_users_to_cohort("", self.cohort1, self.course)
self.verify_added_users_to_cohort(
response_dict,
self.cohort1,
self.course,
expected_added=[],
expected_changed=[],
expected_present=[],
expected_unknown=[]
)
def test_only_added(self):
"""
Verify that we can add users to their first cohort.
"""
response_dict = self.request_add_users_to_cohort(
",".join([user.username for user in self.cohortless_users]),
self.cohort1,
self.course
)
self.verify_added_users_to_cohort(
response_dict,
self.cohort1,
self.course,
expected_added=self.cohortless_users,
expected_changed=[],
expected_present=[],
expected_unknown=[]
)
def test_only_changed(self):
"""
Verify that we can move users to a different cohort.
"""
response_dict = self.request_add_users_to_cohort(
",".join([user.username for user in self.cohort2_users + self.cohort3_users]),
self.cohort1,
self.course
)
self.verify_added_users_to_cohort(
response_dict,
self.cohort1,
self.course,
expected_added=[],
expected_changed=(
[(user, self.cohort2.name) for user in self.cohort2_users] +
[(user, self.cohort3.name) for user in self.cohort3_users]
),
expected_present=[],
expected_unknown=[]
)
def test_only_present(self):
"""
Verify that we can 'add' users to their current cohort.
"""
usernames = [user.username for user in self.cohort1_users]
response_dict = self.request_add_users_to_cohort(
",".join(usernames),
self.cohort1,
self.course
)
self.verify_added_users_to_cohort(
response_dict,
self.cohort1,
self.course,
expected_added=[],
expected_changed=[],
expected_present=[(user, user.username) for user in self.cohort1_users],
expected_unknown=[]
)
def test_only_unknown(self):
"""
Verify that non-existent users are not added.
"""
usernames = ["unknown_user{}".format(i) for i in range(3)]
response_dict = self.request_add_users_to_cohort(
",".join(usernames),
self.cohort1,
self.course
)
self.verify_added_users_to_cohort(
response_dict,
self.cohort1,
self.course,
expected_added=[],
expected_changed=[],
expected_present=[],
expected_unknown=usernames
)
def test_all(self):
"""
Test all adding conditions together.
"""
unknowns = ["unknown_user{}".format(i) for i in range(3)]
response_dict = self.request_add_users_to_cohort(
",".join(
unknowns +
[
user.username
for user in self.cohortless_users + self.cohort1_users + self.cohort2_users + self.cohort3_users
]
),
self.cohort1,
self.course
)
self.verify_added_users_to_cohort(
response_dict,
self.cohort1,
self.course,
expected_added=self.cohortless_users,
expected_changed=(
[(user, self.cohort2.name) for user in self.cohort2_users] +
[(user, self.cohort3.name) for user in self.cohort3_users]
),
expected_present=[(user, user.username) for user in self.cohort1_users],
expected_unknown=unknowns
)
def test_emails(self):
"""
Verify that we can use emails to identify users.
"""
unknown = "unknown_user@example.com"
response_dict = self.request_add_users_to_cohort(
",".join([
self.cohort1_users[0].email,
self.cohort2_users[0].email,
self.cohortless_users[0].email,
unknown
]),
self.cohort1,
self.course
)
self.verify_added_users_to_cohort(
response_dict,
self.cohort1,
self.course,
expected_added=[self.cohortless_users[0]],
expected_changed=[(self.cohort2_users[0], self.cohort2.name)],
expected_present=[(self.cohort1_users[0], self.cohort1_users[0].email)],
expected_unknown=[unknown]
)
def test_delimiters(self):
"""
Verify that we can use different types of whitespace to delimit
usernames in the user string.
"""
unknown = "unknown_user"
response_dict = self.request_add_users_to_cohort(
" {} {}\t{}, \r\n{}".format(
unknown,
self.cohort1_users[0].username,
self.cohort2_users[0].username,
self.cohortless_users[0].username
),
self.cohort1,
self.course
)
self.verify_added_users_to_cohort(
response_dict,
self.cohort1,
self.course,
expected_added=[self.cohortless_users[0]],
expected_changed=[(self.cohort2_users[0], self.cohort2.name)],
expected_present=[(self.cohort1_users[0], self.cohort1_users[0].username)],
expected_unknown=[unknown]
)
def test_can_cohort_unenrolled_users(self):
"""
Verify that users can be added to a cohort of a course they're not
enrolled in. This feature is currently used to pre-cohort users that
are expected to enroll in a course.
"""
unenrolled_usernames = [user.username for user in self.unenrolled_users]
response_dict = self.request_add_users_to_cohort(
",".join(unenrolled_usernames),
self.cohort1,
self.course
)
self.verify_added_users_to_cohort(
response_dict,
self.cohort1,
self.course,
expected_added=self.unenrolled_users,
expected_changed=[],
expected_present=[],
expected_unknown=[]
)
def test_non_existent_cohort(self):
"""
Verify that an error is raised when trying to add users to a cohort
which does not belong to the given course.
"""
users = [UserFactory(username="user{0}".format(i)) for i in range(3)]
usernames = [user.username for user in users]
wrong_course_key = SlashSeparatedCourseKey("some", "arbitrary", "course")
wrong_course_cohort = CohortFactory(name="wrong_cohort", course_id=wrong_course_key, users=[])
self.request_add_users_to_cohort(
",".join(usernames),
wrong_course_cohort,
self.course,
should_raise_404=True
)
class RemoveUserFromCohortTestCase(CohortViewsTestCase):
"""
Tests the `remove_user_from_cohort` view.
"""
def request_remove_user_from_cohort(self, username, cohort):
"""
Call `remove_user_from_cohort` with the given username and cohort.
"""
if username is not None:
request = RequestFactory().post("dummy_url", {"username": username})
else:
request = RequestFactory().post("dummy_url")
request.user = self.staff_user
response = remove_user_from_cohort(request, unicode(self.course.id), cohort.id)
self.assertEqual(response.status_code, 200)
return json.loads(response.content)
def verify_removed_user_from_cohort(self, username, response_dict, cohort, expected_error_msg=None):
"""
Check that `remove_user_from_cohort` properly removes a user from a
cohort and returns appropriate success. If the removal should fail,
verify that the returned error message matches the expected one.
"""
if expected_error_msg is None:
self.assertTrue(response_dict.get("success"))
self.assertIsNone(response_dict.get("msg"))
self.assertFalse(self._user_in_cohort(username, cohort))
else:
self.assertFalse(response_dict.get("success"))
self.assertEqual(response_dict.get("msg"), expected_error_msg)
def test_non_staff(self):
"""
Verify that non-staff users cannot access `check_users_in_cohort`.
"""
cohort = CohortFactory(course_id=self.course.id, users=[])
self._verify_non_staff_cannot_access(
remove_user_from_cohort,
"POST",
[unicode(self.course.id), cohort.id]
)
def test_no_username_given(self):
"""
Verify that we get an error message when omitting a username.
"""
cohort = CohortFactory(course_id=self.course.id, users=[])
response_dict = self.request_remove_user_from_cohort(None, cohort)
self.verify_removed_user_from_cohort(
None,
response_dict,
cohort,
expected_error_msg='No username specified'
)
def test_user_does_not_exist(self):
"""
Verify that we get an error message when the requested user to remove
does not exist.
"""
username = "bogus"
cohort = CohortFactory(course_id=self.course.id, users=[])
response_dict = self.request_remove_user_from_cohort(
username,
cohort
)
self.verify_removed_user_from_cohort(
username,
response_dict,
cohort,
expected_error_msg='No user \'{0}\''.format(username)
)
def test_can_remove_user_not_in_cohort(self):
"""
Verify that we can "remove" a user from a cohort even if they are not a
member of that cohort.
"""
user = UserFactory()
cohort = CohortFactory(course_id=self.course.id, users=[])
response_dict = self.request_remove_user_from_cohort(user.username, cohort)
self.verify_removed_user_from_cohort(user.username, response_dict, cohort)
def test_can_remove_user_from_cohort(self):
"""
Verify that we can remove a user from a cohort.
"""
user = UserFactory()
cohort = CohortFactory(course_id=self.course.id, users=[user])
response_dict = self.request_remove_user_from_cohort(user.username, cohort)
self.verify_removed_user_from_cohort(user.username, response_dict, cohort)
@skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Tests only valid in LMS')
class CourseCohortDiscussionTopicsTestCase(CohortViewsTestCase):
"""
Tests the `cohort_discussion_topics` view.
"""
def test_non_staff(self):
"""
Verify that we cannot access cohort_discussion_topics if we're a non-staff user.
"""
self._verify_non_staff_cannot_access(cohort_discussion_topics, "GET", [unicode(self.course.id)])
def test_get_discussion_topics(self):
"""
Verify that course_cohort_settings_handler is working for HTTP GET.
"""
# create inline & course-wide discussion to verify the different map.
self.create_cohorted_discussions()
response = self.get_handler(self.course, handler=cohort_discussion_topics)
start_date = response['inline_discussions']['subcategories']['Chapter']['start_date']
expected_response = {
"course_wide_discussions": {
'children': ['Topic B'],
'entries': {
'Topic B': {
'sort_key': 'A',
'is_cohorted': True,
'id': topic_name_to_id(self.course, "Topic B"),
'start_date': response['course_wide_discussions']['entries']['Topic B']['start_date']
}
}
},
"inline_discussions": {
'subcategories': {
'Chapter': {
'subcategories': {},
'children': ['Discussion'],
'entries': {
'Discussion': {
'sort_key': None,
'is_cohorted': True,
'id': topic_name_to_id(self.course, "Topic A"),
'start_date': start_date
}
},
'sort_key': 'Chapter',
'start_date': start_date
}
},
'children': ['Chapter']
}
}
self.assertEqual(response, expected_response)
| agpl-3.0 |
patacrep/patanet | generator/name_paginator.py | 1 | 5677 | # -*- coding: utf-8 -*-
# Copyright (C) 2014 The Patacrep Team
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# adapted from https://djangosnippets.org/snippets/2732/
import string
from django.core.paginator import InvalidPage
from unidecode import unidecode
class NamePaginator(object):
"""Pagination for string-based objects"""
def __init__(self, queryset, paginate_by=25, **kwargs):
# We ignore allow_empty_first_page and orphans, just here for compliance
self.pages = []
self.object_list = queryset
self.count = len(self.object_list)
# chunk up the objects so we don't need to iterate
# over the whole list for each letter
chunks = {}
numbers = "0123456789"
alphabet = string.ascii_uppercase
# we sort them by the first model ordering key
for obj in self.object_list:
if queryset:
obj_str = unicode(getattr(obj, obj._meta.ordering[0]))
else:
obj_str = unicode(obj)
try:
letter = unicode.upper(obj_str[0])
except:
# If obj_str is empty, just ignore it.
continue
letter = unidecode(letter)
if letter in numbers:
letter = "0"
elif letter not in alphabet:
letter = "#"
if letter not in chunks:
chunks[letter] = []
chunks[letter].append(obj)
# the process for assigning objects to each page
current_page = NamePage(self)
for letter in alphabet + "#0":
if letter not in chunks:
if letter in alphabet:
current_page.add([], letter)
continue
# the items in object_list starting with this letter
sub_list = chunks[letter]
new_page_count = len(sub_list) + current_page.count
# first, check to see if sub_list will fit or it needs
# to go onto a new page. If assigning this list will
# cause the page to overflow... and an underflow is closer
# to per_page than an overflow... and the page isn't empty
# (which means len(sub_list) > per_page)...
if (new_page_count > paginate_by and \
abs(paginate_by - current_page.count) < abs(paginate_by - new_page_count) and \
current_page.count > 0) or \
letter == "#" or \
letter == "0":
# make a new page
self.pages.append(current_page)
current_page = NamePage(self)
current_page.add(sub_list, letter)
if letter == "0":
current_page.add([], "9")
self.pages.append(current_page)
current_page = NamePage(self)
# if we finished the for loop with a page that isn't empty, add it
if current_page.count > 0:
self.pages.append(current_page)
def page(self, num):
"""Returns a Page object for the given 1-based page number."""
if len(self.pages) == 0:
return None
elif num > 0 and num <= len(self.pages):
return self.pages[num - 1]
else:
raise InvalidPage
@property
def num_pages(self):
"""Returns the total number of pages"""
return len(self.pages)
class NamePage(object):
def __init__(self, paginator):
self.paginator = paginator
self.object_list = []
self.letters = []
@property
def count(self):
return len(self.object_list)
@property
def start_letter(self):
if len(self.letters) > 0:
self.letters.sort(key=str.upper)
return self.letters[0]
else:
return None
@property
def end_letter(self):
if len(self.letters) > 0:
self.letters.sort(key=str.upper)
return self.letters[-1]
else:
return None
@property
def number(self):
return self.paginator.pages.index(self) + 1
# just added the methods I needed to use in the templates
# feel free to add the ones you need too
def has_other_pages(self):
return len(self.object_list) > 0
def has_previous(self):
return self.paginator.pages.index(self)
def has_next(self):
return self.paginator.pages.index(self) + 2
def next_page_number(self):
return self.paginator.pages.index(self) + 2
def previous_page_number(self):
return self.paginator.pages.index(self)
def add(self, new_list, letter=None):
if len(new_list) > 0:
self.object_list = self.object_list + new_list
if letter:
self.letters.append(letter)
def __repr__(self):
if self.start_letter == self.end_letter:
return self.start_letter
else:
return '%c-%c' % (self.start_letter, self.end_letter)
| agpl-3.0 |
cherylyli/stress-aid | env/lib/python3.5/site-packages/tests/test_wrappers.py | 3 | 1539 | from tests import util
from werkzeug.exceptions import HTTPException
import pymongo
class CollectionTest(util.FlaskPyMongoTest):
def test_find_one_or_404(self):
self.mongo.db.things.remove()
try:
self.mongo.db.things.find_one_or_404({'_id': 'thing'})
except HTTPException as notfound:
assert notfound.code == 404, "raised wrong exception"
if pymongo.version_tuple[0] > 2:
self.mongo.db.things.insert_one({'_id': 'thing', 'val': 'foo'})
else:
self.mongo.db.things.insert({'_id': 'thing', 'val': 'foo'}, w=1)
# now it should not raise
thing = self.mongo.db.things.find_one_or_404({'_id': 'thing'})
assert thing['val'] == 'foo', 'got wrong thing'
# also test with dotted-named collections
self.mongo.db.things.morethings.remove()
try:
self.mongo.db.things.morethings.find_one_or_404({'_id': 'thing'})
except HTTPException as notfound:
assert notfound.code == 404, "raised wrong exception"
if pymongo.version_tuple[0] > 2:
# Write Concern is set to w=1 by default in pymongo > 3.0
self.mongo.db.things.morethings.insert_one({'_id': 'thing', 'val': 'foo'})
else:
self.mongo.db.things.morethings.insert({'_id': 'thing', 'val': 'foo'}, w=1)
# now it should not raise
thing = self.mongo.db.things.morethings.find_one_or_404({'_id': 'thing'})
assert thing['val'] == 'foo', 'got wrong thing'
| mit |
cswiercz/sympy | sympy/functions/special/bsplines.py | 75 | 4949 | from __future__ import print_function, division
from sympy.core import S, sympify
from sympy.core.compatibility import range
from sympy.functions import Piecewise, piecewise_fold
from sympy.sets.sets import Interval
def _add_splines(c, b1, d, b2):
"""Construct c*b1 + d*b2."""
if b1 == S.Zero or c == S.Zero:
rv = piecewise_fold(d*b2)
elif b2 == S.Zero or d == S.Zero:
rv = piecewise_fold(c*b1)
else:
new_args = []
n_intervals = len(b1.args)
if n_intervals != len(b2.args):
raise ValueError("Args of b1 and b2 are not equal")
new_args.append((c*b1.args[0].expr, b1.args[0].cond))
for i in range(1, n_intervals - 1):
new_args.append((
c*b1.args[i].expr + d*b2.args[i - 1].expr,
b1.args[i].cond
))
new_args.append((d*b2.args[-2].expr, b2.args[-2].cond))
new_args.append(b2.args[-1])
rv = Piecewise(*new_args)
return rv.expand()
def bspline_basis(d, knots, n, x, close=True):
"""The `n`-th B-spline at `x` of degree `d` with knots.
B-Splines are piecewise polynomials of degree `d` [1]_. They are defined on
a set of knots, which is a sequence of integers or floats.
The 0th degree splines have a value of one on a single interval:
>>> from sympy import bspline_basis
>>> from sympy.abc import x
>>> d = 0
>>> knots = range(5)
>>> bspline_basis(d, knots, 0, x)
Piecewise((1, And(x <= 1, x >= 0)), (0, True))
For a given ``(d, knots)`` there are ``len(knots)-d-1`` B-splines defined, that
are indexed by ``n`` (starting at 0).
Here is an example of a cubic B-spline:
>>> bspline_basis(3, range(5), 0, x)
Piecewise((x**3/6, And(x < 1, x >= 0)),
(-x**3/2 + 2*x**2 - 2*x + 2/3, And(x < 2, x >= 1)),
(x**3/2 - 4*x**2 + 10*x - 22/3, And(x < 3, x >= 2)),
(-x**3/6 + 2*x**2 - 8*x + 32/3, And(x <= 4, x >= 3)),
(0, True))
By repeating knot points, you can introduce discontinuities in the
B-splines and their derivatives:
>>> d = 1
>>> knots = [0,0,2,3,4]
>>> bspline_basis(d, knots, 0, x)
Piecewise((-x/2 + 1, And(x <= 2, x >= 0)), (0, True))
It is quite time consuming to construct and evaluate B-splines. If you
need to evaluate a B-splines many times, it is best to lambdify them
first:
>>> from sympy import lambdify
>>> d = 3
>>> knots = range(10)
>>> b0 = bspline_basis(d, knots, 0, x)
>>> f = lambdify(x, b0)
>>> y = f(0.5)
See Also
========
bsplines_basis_set
References
==========
.. [1] http://en.wikipedia.org/wiki/B-spline
"""
knots = [sympify(k) for k in knots]
d = int(d)
n = int(n)
n_knots = len(knots)
n_intervals = n_knots - 1
if n + d + 1 > n_intervals:
raise ValueError('n + d + 1 must not exceed len(knots) - 1')
if d == 0:
result = Piecewise(
(S.One, Interval(knots[n], knots[n + 1], False,
not close).contains(x)),
(0, True)
)
elif d > 0:
denom = knots[n + d + 1] - knots[n + 1]
if denom != S.Zero:
B = (knots[n + d + 1] - x)/denom
b2 = bspline_basis(d - 1, knots, n + 1, x, close)
else:
b2 = B = S.Zero
denom = knots[n + d] - knots[n]
if denom != S.Zero:
A = (x - knots[n])/denom
b1 = bspline_basis(
d - 1, knots, n, x, close and (B == S.Zero or b2 == S.Zero))
else:
b1 = A = S.Zero
result = _add_splines(A, b1, B, b2)
else:
raise ValueError('degree must be non-negative: %r' % n)
return result
def bspline_basis_set(d, knots, x):
"""Return the ``len(knots)-d-1`` B-splines at ``x`` of degree ``d`` with ``knots``.
This function returns a list of Piecewise polynomials that are the
``len(knots)-d-1`` B-splines of degree ``d`` for the given knots. This function
calls ``bspline_basis(d, knots, n, x)`` for different values of ``n``.
Examples
========
>>> from sympy import bspline_basis_set
>>> from sympy.abc import x
>>> d = 2
>>> knots = range(5)
>>> splines = bspline_basis_set(d, knots, x)
>>> splines
[Piecewise((x**2/2, And(x < 1, x >= 0)),
(-x**2 + 3*x - 3/2, And(x < 2, x >= 1)),
(x**2/2 - 3*x + 9/2, And(x <= 3, x >= 2)),
(0, True)),
Piecewise((x**2/2 - x + 1/2, And(x < 2, x >= 1)),
(-x**2 + 5*x - 11/2, And(x < 3, x >= 2)),
(x**2/2 - 4*x + 8, And(x <= 4, x >= 3)),
(0, True))]
See Also
========
bsplines_basis
"""
n_splines = len(knots) - d - 1
return [bspline_basis(d, knots, i, x) for i in range(n_splines)]
| bsd-3-clause |
kyilmaz80/emailrelay-dlp | tests/create_eml_files.py | 1 | 2993 | #!/bin/env python3
import sys
import os
import mimetypes
from email import encoders
from email.mime.audio import MIMEAudio
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
"""
Creates email eml messages from given attachment files
"""
if len(sys.argv) > 1:
FILE_DIR = sys.argv[1]
EML_DIR = sys.argv[2]
else:
print('KULLANIM: ./create_eml_files.py <FILE_DIR> <ATTACH_DIR>')
sys.exit(1)
if not os.path.exists(EML_DIR):
print(EML_DIR, ' a ulasilamadi!')
sys.exit(1)
# toplam dosyalar:
cnt = 0
for dirpath, dirnames, files in os.walk(FILE_DIR):
for name in files:
# Create the enclosing (outer) message
outer = MIMEMultipart()
outer['Subject'] = 'MESSAGE ' + str(cnt)
outer['To'] = 'test' + str(cnt) + '@test.local'
outer['From'] = 'korayy@test.local'
outer.preamble = 'MIME mail okuyucuda gozukmez.\n'
attach_file = os.path.join(dirpath, name)
filename, file_extension = os.path.splitext(attach_file)
eml_file = os.path.join(EML_DIR, os.path.basename(filename) + '.eml')
if os.path.isfile(attach_file):
print(name, ' dosyasindan eml dosyasi', EML_DIR,
' dizininde olusturuluyor..')
path = os.path.join(dirpath, name)
ctype, encoding = mimetypes.guess_type(path)
if ctype is None or encoding is not None:
# No guess could be made, or the file is encoded
# (compressed), so
# use a generic bag-of-bits type.
ctype = 'application/octet-stream'
maintype, subtype = ctype.split('/', 1)
if maintype == 'text':
fp = open(path)
# Note: we should handle calculating the charset
msg = MIMEText(fp.read(), _subtype=subtype)
fp.close()
elif maintype == 'image':
fp = open(path, 'rb')
msg = MIMEImage(fp.read(), _subtype=subtype)
fp.close()
elif maintype == 'audio':
fp = open(path, 'rb')
msg = MIMEAudio(fp.read(), _subtype=subtype)
fp.close()
else:
fp = open(path, 'rb')
msg = MIMEBase(maintype, subtype)
msg.set_payload(fp.read())
fp.close()
# Encode the payload using Base64
encoders.encode_base64(msg)
# Set the filename parameter
msg.add_header('Content-Disposition', 'attachment', filename=name)
outer.attach(msg)
with open(eml_file, 'w') as fp:
fp.write(outer.as_string())
# ret = parse_msg.main(path)
# eml_res[name] = ret
cnt = cnt + 1
else:
print(name, "dosya değil!")
print('Toplam dosya sayisi: ', cnt)
| mit |
GauthamGoli/django-organizations | organizations/backends/tokens.py | 2 | 2914 | # -*- coding: utf-8 -*-
# Copyright (c) 2012-2015, Ben Lopatin and contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. Redistributions in binary
# form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with
# the distribution
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from django.conf import settings
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.utils.crypto import constant_time_compare
from django.utils.http import base36_to_int
REGISTRATION_TIMEOUT_DAYS = getattr(settings, 'REGISTRATION_TIMEOUT_DAYS', 15)
class RegistrationTokenGenerator(PasswordResetTokenGenerator):
"""
Very similar to the password reset token generator, but should
allow slightly greater time for timeout, so it only updates one
method, replacing PASSWORD_RESET_TIMEOUT_DAYS from the global
settings with REGISTRATION_TIMEOUT_DAYS from application
settings.
Has the additional interface method:
-- make_token(user): Returns a token that can be used once to do a
password reset for the given user.
"""
def check_token(self, user, token):
"""
Check that a password reset token is correct for a given user.
"""
# Parse the token
try:
ts_b36, hash = token.split("-")
except ValueError:
return False
try:
ts = base36_to_int(ts_b36)
except ValueError:
return False
# Check that the timestamp/uid has not been tampered with
if not constant_time_compare(self._make_token_with_timestamp(user, ts), token):
return False
# Check the timestamp is within limit
if (self._num_days(self._today()) - ts) > REGISTRATION_TIMEOUT_DAYS:
return False
return True
| bsd-2-clause |
nooperpudd/pulsar | pulsar/utils/config.py | 4 | 36485 | '''Configuration utilities which provides pulsar with configuration parameters
which can be parsed from the command line. Parsing is implemented using
the python argparser_ standard library module.
Config
~~~~~~~~~~
.. autoclass:: Config
:members:
:member-order: bysource
Setting
~~~~~~~~~~
.. autoclass:: Setting
:members:
:member-order: bysource
.. _argparser: http://docs.python.org/dev/library/argparse.html
'''
import inspect
import argparse
import os
import textwrap
import logging
import pickle
from pulsar import __version__, SERVER_NAME
from . import system
from .internet import parse_address
from .importer import import_system_file
from .httpurl import HttpParser as PyHttpParser
from .log import configured_logger
from .pep import to_bytes
__all__ = ['Config',
'Setting',
'ordered_settings',
'validate_string',
'validate_callable',
'validate_bool',
'validate_list',
'validate_dict',
'validate_pos_int',
'validate_pos_float',
'make_optparse_options']
LOGGER = logging.getLogger('pulsar.config')
section_docs = {}
KNOWN_SETTINGS = {}
KNOWN_SETTINGS_ORDER = []
def pass_through(arg):
'''A dummy function accepting one parameter only.
It does nothing and it is used as default by
:ref:`Application Hooks <setting-section-application-hooks>`.
'''
pass
def set_if_avail(container, key, value, *skip_values):
if value is not None and value not in skip_values:
container[key] = value
def wrap_method(func):
def _wrapped(instance, *args, **kwargs):
return func(*args, **kwargs)
return _wrapped
def ordered_settings():
for name in KNOWN_SETTINGS_ORDER:
yield KNOWN_SETTINGS[name]
def valid_config_value(val):
try:
pickle.loads(pickle.dumps(val))
return True
except Exception:
return False
class Config(object):
'''A dictionary-like container of :class:`Setting` parameters for
fine tuning pulsar servers.
It provides easy access to :attr:`Setting.value`
attribute by exposing the :attr:`Setting.name` as attribute.
:param description: description used when parsing the command line,
same usage as in the :class:`argparse.ArgumentParser` class.
:param epilog: epilog used when parsing the command line, same usage
as in the :class:`argparse.ArgumentParser` class.
:param version: version used when parsing the command line, same usage
as in the :class:`argparse.ArgumentParser` class.
:param apps: list of application namespaces to include in the
:attr:`settings` dictionary. For example if ``apps`` is set to
``['socket', 'test']``, the
:ref:`socket server <setting-section-socket-servers>` and
:ref:`task queue <setting-section-test>` settings are
loaded in addition to the standard
:ref:`global settings <setting-section-global-server-settings>`,
:ref:`worker settings <setting-section-worker-processes>` and
:ref:`hook settings <setting-section-application-hooks>`.
.. attribute:: settings
Dictionary of all :class:`Setting` instances available in this
:class:`Config` container.
Keys are given by the :attr:`Setting.name` attribute.
.. attribute:: params
Dictionary of additional parameters which cannot be parsed on the
command line
'''
script = None
application = None
exclude_from_config = set(('config',))
def __init__(self, description=None, epilog=None,
version=None, apps=None, include=None,
exclude=None, settings=None, prefix=None,
name=None, log_name=None, **params):
self.settings = {} if settings is None else settings
self.params = {}
self.name = name
self.log_name = log_name
self.prefix = prefix
self.include = set(include or ())
self.exclude = set(exclude or ())
self.apps = set(apps or ())
if settings is None:
self.update_settings()
self.description = description or 'Pulsar server'
self.epilog = epilog or 'Have fun!'
self.version = version or __version__
self.update(params, True)
def __iter__(self):
return iter(self.settings)
def __len__(self):
return len(self.settings)
def __contains__(self, name):
return name in self.settings
def items(self):
for k, setting in self.settings.items():
yield k, setting.value
def __getstate__(self):
return self.__dict__.copy()
def __setstate__(self, state):
for k, v in state.items():
self.__dict__[k] = v
config = getattr(self, 'config', None)
if config:
self.import_from_module(config)
def __getattr__(self, name):
try:
return self._get(name)
except KeyError:
raise AttributeError("'%s' object has no attribute '%s'." %
(self.__class__.__name__, name))
def __setattr__(self, name, value):
if name != "settings" and name in self.settings:
raise AttributeError("Invalid access!")
super().__setattr__(name, value)
def update(self, data, default=False):
'''Update this :attr:`Config` with ``data``.
:param data: must be a ``Mapping`` like object exposing the ``item``
method for iterating through key-value pairs.
:param default: if ``True`` the updated :attr:`settings` will also
set their :attr:`~Setting.default` attribute with the
updating value (provided it is a valid one).
'''
for name, value in data.items():
if value is not None:
self.set(name, value, default)
def copy_globals(self, cfg):
'''Copy global settings from ``cfg`` to this config.
The settings are copied only if they were not already modified.
'''
for name, setting in cfg.settings.items():
csetting = self.settings.get(name)
if (setting.is_global and csetting is not None and
not csetting.modified):
csetting.set(setting.get())
def get(self, name, default=None):
'''Get the value at ``name`` for this :class:`Config` container
The returned value is obtained from:
* the value at ``name`` in the :attr:`settings` dictionary
if available.
* the value at ``name`` in the :attr:`params` dictionary if available.
* the ``default`` value.
'''
try:
return self._get(name, default)
except KeyError:
return default
def set(self, name, value, default=False):
'''Set the :class:`Setting` at ``name`` with a new ``value``.
If ``default`` is ``True``, the :attr:`Setting.default` is also set.
'''
if name in self.__dict__:
self.__dict__[name] = value
elif name not in self.settings:
# not in settings, check if this is a prefixed name
if self.prefix:
prefix_name = '%s_%s' % (self.prefix, name)
if prefix_name in self.settings:
return # don't set this value
self.params[name] = value
else:
self.settings[name].set(value, default=default)
def parser(self):
'''Create the argparser_ for this configuration by adding all
settings via the :meth:`Setting.add_argument` method.
:rtype: an instance of :class:`ArgumentParser`.
'''
kwargs = {
"description": self.description,
"epilog": self.epilog
}
parser = argparse.ArgumentParser(**kwargs)
parser.add_argument('--version',
action='version',
version=self.version)
return self.add_to_parser(parser)
def add_to_parser(self, parser):
'''Add this container :attr:`settings` to an existing ``parser``.
'''
setts = self.settings
def sorter(x):
return (setts[x].section, setts[x].order)
for k in sorted(setts, key=sorter):
setts[k].add_argument(parser)
return parser
def import_from_module(self, mod=None):
if mod:
self.set('config', mod)
try:
mod = import_system_file(self.config)
except Exception as e:
raise RuntimeError('Failed to read config file "%s". %s' %
(self.config, e))
unknowns = []
if mod:
for k in dir(mod):
# Skip private functions and attributes
kl = k.lower()
if k.startswith('_') or kl in self.exclude_from_config:
continue
val = getattr(mod, k)
# add unknown names to list
if kl not in self.settings:
if valid_config_value(val):
unknowns.append((k, val))
else:
self.set(kl, val)
return unknowns
def parse_command_line(self, argv=None):
'''Parse the command line
'''
parser = self.parser()
opts = parser.parse_args(argv)
config = getattr(opts, 'config', None)
# set the config only if config is part of the settings
if config is not None and self.config:
self.set('config', config)
self.params.update(self.import_from_module())
for k, v in opts.__dict__.items():
if v is None:
continue
self.set(k.lower(), v)
def on_start(self):
'''Invoked by a :class:`.Application` just before starting.
'''
for sett in self.settings.values():
sett.on_start()
def app(self):
if self.application:
return self.application.from_config(self)
@property
def workers(self):
return self.settings['workers'].get()
@property
def address(self):
'''An address to bind to, only available if a
:ref:`bind <setting-bind>` setting has been added to this
:class:`Config` container.
'''
bind = self.settings.get('bind')
if bind:
return parse_address(to_bytes(bind.get()))
@property
def uid(self):
user = self.settings.get('user')
if user:
return system.get_uid(user.get())
@property
def gid(self):
group = self.settings.get('group')
if group:
return system.get_gid(group.get())
@property
def proc_name(self):
pn = self.settings.get('process_name')
if pn:
pn = pn.get()
if pn is not None:
return pn
else:
pn = self.settings.get('default_process_name')
if pn:
return pn.get()
def copy(self, name=None, prefix=None):
'''A copy of this :class:`Config` container.
If ``prefix`` is given, it prefixes all non
:ref:`global settings <setting-section-global-server-settings>`
with it. Used when multiple applications are loaded.
'''
cls = self.__class__
me = cls.__new__(cls)
me.__dict__.update(self.__dict__)
if prefix:
me.prefix = prefix
settings = me.settings
me.settings = {}
for setting in settings.values():
setting = setting.copy(name, prefix)
me.settings[setting.name] = setting
me.params = me.params.copy()
return me
def clone(self):
return pickle.loads(pickle.dumps(self))
def configured_logger(self, name=None):
'''Configured logger.
'''
loghandlers = self.loghandlers
# logname
if not name:
# base name is always pulsar
basename = 'pulsar'
# the namespace name for this config
name = self.name
if name and name != basename:
name = '%s.%s' % (basename, name)
else:
name = basename
#
namespaces = {}
for loglevel in self.loglevel or ():
bits = loglevel.split('.')
namespaces['.'.join(bits[:-1]) or ''] = bits[-1]
for namespace in sorted(namespaces):
if self.daemon: # pragma nocover
handlers = []
for hnd in loghandlers:
if hnd != 'console':
handlers.append(hnd)
if not handlers:
handlers.append('file')
loghandlers = handlers
configured_logger(namespace,
config=self.logconfig,
level=namespaces[namespace],
handlers=loghandlers)
return logging.getLogger(name)
def __copy__(self):
return self.copy()
def __deepcopy__(self, memo):
return self.copy()
########################################################################
# INTERNALS
def update_settings(self):
for s in ordered_settings():
setting = s().copy(name=self.name, prefix=self.prefix)
if setting.name in self.settings:
continue
if setting.app and setting.app not in self.apps:
continue # the setting is for an app not in the apps set
if ((self.include and setting.name not in self.include) or
setting.name in self.exclude):
self.params[setting.name] = setting.get()
else:
self.settings[setting.name] = setting
def _get(self, name, default=None):
if name not in self.settings:
if name in self.params:
return self.params[name]
if name in KNOWN_SETTINGS:
return default
raise KeyError("'%s'" % name)
return self.settings[name].get()
class SettingMeta(type):
'''A metaclass which collects all setting classes and put them
in the global ``KNOWN_SETTINGS`` list.'''
def __new__(cls, name, bases, attrs):
super_new = super(SettingMeta, cls).__new__
# parents = [b for b in bases if isinstance(b, SettingMeta)]
val = attrs.get("validator")
attrs["validator"] = wrap_method(val) if val else None
if attrs.pop('virtual', False):
return super_new(cls, name, bases, attrs)
attrs["order"] = len(KNOWN_SETTINGS) + 1
new_class = super_new(cls, name, bases, attrs)
# build one instance to increase count
new_class()
new_class.fmt_desc(attrs['desc'] or '')
if not new_class.name:
new_class.name = new_class.__name__.lower()
if new_class.name in KNOWN_SETTINGS_ORDER:
old_class = KNOWN_SETTINGS.pop(new_class.name)
new_class.order = old_class.order
else:
KNOWN_SETTINGS_ORDER.append(new_class.name)
KNOWN_SETTINGS[new_class.name] = new_class
return new_class
def fmt_desc(cls, desc):
desc = textwrap.dedent(desc).strip()
setattr(cls, "desc", desc)
lines = desc.split('\n\n')
setattr(cls, "short", '' if not lines else lines[0])
class Setting(metaclass=SettingMeta):
'''Class for creating :ref:`pulsar settings <settings>`.
Most parameters can be specified on the command line,
all of them on a ``config`` file.
'''
creation_count = 0
virtual = True
'''If set to ``True`` the settings won't be loaded.
It can be only used as base class for other settings.'''
name = None
'''The key to access this setting in a :class:`Config` container.'''
validator = None
'''A validating function for this setting.
It provided it must be a function accepting one positional argument,
the value to validate.'''
value = None
'''The actual value for this setting.'''
default = None
'''The default value for this setting.'''
nargs = None
'''The number of command-line arguments that should be consumed'''
const = None
'''A constant value required by some action and nargs selections'''
app = None
'''Setting for a specific :class:`Application`.'''
section = None
'''Setting section, used for creating the
:ref:`settings documentation <settings>`.'''
flags = None
'''List of options strings, e.g. ``[-f, --foo]``.'''
choices = None
'''Restrict the argument to the choices provided.'''
type = None
'''The type to which the command-line argument should be converted'''
meta = None
'''Same usage as ``metavar`` in the python :mod:`argparse` module. It is
the name for the argument in usage message.'''
action = None
'''The basic type of action to be taken when this argument is encountered
at the command line'''
short = None
'''Optional shot description string'''
desc = None
'''Description string'''
is_global = False
'''``True`` only for
:ref:`global settings <setting-section-global-server-settings>`.'''
orig_name = None
def __init__(self, name=None, flags=None, action=None, type=None,
default=None, nargs=None, desc=None, validator=None,
app=None, meta=None, choices=None, const=None):
self.extra = e = {}
self.app = app or self.app
set_if_avail(e, 'choices', choices or self.choices)
set_if_avail(e, 'const', const or self.const)
set_if_avail(e, 'type', type or self.type, 'string')
self.default = default if default is not None else self.default
self.desc = desc or self.desc
self.flags = flags or self.flags
self.action = action or self.action
self.meta = meta or self.meta
self.name = name or self.name
self.nargs = nargs or self.nargs
self.short = self.short or self.desc
self.desc = self.desc or self.short
if self.default is not None:
self.set(self.default)
if self.app and not self.section:
self.section = self.app
if not self.section:
self.section = 'unknown'
self.__class__.creation_count += 1
if not hasattr(self, 'order'):
self.order = 1000 + self.__class__.creation_count
self.modified = False
def __getstate__(self):
return self.__dict__.copy()
def __str__(self):
return '{0} ({1})'.format(self.name, self.value)
__repr__ = __str__
def on_start(self):
'''Called when pulsar server starts.
It can be used to perform custom initialization for this
:class:`Setting`.
'''
pass
def get(self):
'''Returns :attr:`value`'''
return self.value
def set(self, val, default=False):
'''Set ``val`` as the :attr:`value` for this :class:`Setting`.
If ``default`` is ``True`` set also the :attr:`default` value.
'''
if hasattr(self.validator, '__call__'):
val = self.validator(val)
self.value = val
if default:
self.default = val
self.modified = True
def add_argument(self, parser, set_default=False):
'''Add this :class:`Setting` to the ``parser``.
The operation is carried out only if :attr:`flags` or
:attr:`nargs` and :attr:`name` are defined.
'''
default = self.default if set_default else None
kwargs = {'nargs': self.nargs}
kwargs.update(self.extra)
if self.flags:
args = tuple(self.flags)
kwargs.update({'dest': self.name,
'action': self.action or "store",
'default': default,
'help': "%s [%s]" % (self.short, self.default)})
if kwargs["action"] != "store":
kwargs.pop("type", None)
kwargs.pop("nargs", None)
elif self.nargs and self.name:
args = (self.name,)
kwargs.update({'metavar': self.meta or None,
'help': self.short})
else:
# Not added to argparser
return
if self.meta:
kwargs['metavar'] = self.meta
parser.add_argument(*args, **kwargs)
def copy(self, name=None, prefix=None):
'''Copy this :class:`SettingBase`'''
setting = self.__class__.__new__(self.__class__)
setting.__dict__.update(self.__dict__)
# Keep the modified flag?
# setting.modified = False
if prefix and not setting.is_global:
flags = setting.flags
if flags and flags[-1].startswith('--'):
# Prefix a setting
setting.orig_name = setting.name
setting.name = '%s_%s' % (prefix, setting.name)
setting.flags = ['--%s-%s' % (prefix, flags[-1][2:])]
if name and not setting.is_global:
setting.short = '%s application. %s' % (name, setting.short)
return setting
def __copy__(self):
return self.copy()
def __deepcopy__(self, memo):
return self.copy()
class TestOption(Setting):
virtual = True
app = 'test'
section = "Test"
def validate_bool(val):
if isinstance(val, bool):
return val
if not isinstance(val, str):
raise TypeError("Invalid type for casting: %s" % val)
if val.lower().strip() == "true":
return True
elif val.lower().strip() == "false":
return False
else:
raise ValueError("Invalid boolean: %s" % val)
def validate_pos_int(val):
if not isinstance(val, int):
val = int(val, 0)
else:
# Booleans are ints!
val = int(val)
if val < 0:
raise ValueError("Value must be positive: %s" % val)
return val
def validate_pos_float(val):
val = float(val)
if val < 0:
raise ValueError("Value must be positive: %s" % val)
return val
def validate_string(val):
if val is None:
return None
if not isinstance(val, str):
raise TypeError("Not a string: %s" % val)
return val.strip()
def validate_list(val):
if val and not isinstance(val, (list, tuple)):
raise TypeError("Not a list: %s" % val)
return list(val)
def validate_dict(val):
if val and not isinstance(val, dict):
raise TypeError("Not a dictionary: %s" % val)
return val
def validate_callable(arity):
def _validate_callable(val):
if not hasattr(val, '__call__'):
raise TypeError("Value is not callable: %s" % val)
if not inspect.isfunction(val):
cval = val.__call__
discount = 1
else:
discount = 0
cval = val
result = inspect.getargspec(cval)
nargs = len(result.args) - discount
if result.defaults:
group = tuple(range(nargs-len(result.defaults), nargs+1))
else:
group = (nargs,)
if arity not in group:
raise TypeError("Value must have an arity of: %s" % arity)
return val
return _validate_callable
def make_optparse_options(apps=None, exclude=None, include=None, **override):
'''Create a tuple of optparse options.'''
from optparse import make_option
class AddOptParser(list):
def add_argument(self, *args, **kwargs):
if 'const' in kwargs:
kwargs['action'] = 'store_const'
kwargs.pop('type')
self.append(make_option(*args, **kwargs))
config = Config(apps=apps, exclude=exclude, include=include, **override)
parser = AddOptParser()
config.add_to_parser(parser)
return tuple(parser)
############################################################################
# Global Server Settings
section_docs['Global Server Settings'] = '''
These settings are global in the sense that they are used by the arbiter
as well as all pulsar workers. They are server configuration parameters.
'''
class Global(Setting):
virtual = True
section = "Global Server Settings"
is_global = True
class ConfigFile(Global):
name = "config"
flags = ["-c", "--config"]
meta = "FILE"
validator = validate_string
default = 'config.py'
desc = """\
The path to a Pulsar config file, where default Settings
paramaters can be specified.
"""
class HttpProxyServer(Global):
name = "http_proxy"
flags = ["--http-proxy"]
default = ''
desc = """\
The HTTP proxy server to use with HttpClient.
"""
def on_start(self):
if self.value: # pragma nocover
os.environ['http_proxy'] = self.value
os.environ['https_proxy'] = self.value
os.environ['ws_proxy'] = self.value
os.environ['wss_proxy'] = self.value
class HttpParser(Global):
name = "http_py_parser"
flags = ["--http-py-parser"]
action = "store_true"
default = False
desc = '''\
Set the python parser as default HTTP parser
'''
def on_start(self):
if self.value: # pragma nocover
from pulsar.utils.httpurl import setDefaultHttpParser
setDefaultHttpParser(PyHttpParser)
class Debug(Global):
name = "debug"
flags = ["--debug"]
nargs = '?'
type = int
default = 0
const = 1
desc = """\
Turn on debugging in the server.
Set the log level to debug, limits the number of worker processes
to 1, set asyncio debug flag.
"""
class Daemon(Global):
name = "daemon"
flags = ["-D", "--daemon"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Daemonize the pulsar process (posix only).
Detaches the server from the controlling terminal and enters the
background.
"""
class Reload(Global):
name = "reload"
flags = ["--reload"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Auto reload modules when changes occurs.
Useful during development.
"""
class Noisy(Global):
name = "noisy"
flags = ["--noisy"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Log Failures as soon as they occur.
This option is really needed during development when hunting
for hidden bugs
"""
class Pidfile(Global):
name = "pidfile"
flags = ["-p", "--pid"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
A filename to use for the PID file.
If not set, no PID file will be written.
"""
class Password(Global):
name = "password"
flags = ["--password"]
validator = validate_string
default = None
desc = """Set a password for the server"""
class User(Global):
name = "user"
flags = ["-u", "--user"]
meta = "USER"
validator = validate_string
default = None
desc = """\
Switch worker processes to run as this user.
A valid user id (as an integer) or the name of a user that can be
retrieved with a call to pwd.getpwnam(value) or None to not change
the worker process user.
"""
class Group(Global):
name = "group"
flags = ["-g", "--group"]
meta = "GROUP"
validator = validate_string
default = None
desc = """\
Switch worker process to run as this group.
A valid group id (as an integer) or the name of a user that can be
retrieved with a call to pwd.getgrnam(value) or None to not change
the worker processes group.
"""
class Loglevel(Global):
name = "loglevel"
flags = ["--log-level"]
nargs = '+'
default = ['info']
validator = validate_list
desc = '''
The granularity of log outputs.
This setting controls loggers with ``pulsar`` namespace
and the the root logger (if not already set).
Valid level names are:
* debug
* info
* warning
* error
* critical
* none
'''
class LogHandlers(Global):
name = "loghandlers"
flags = ["--log-handlers"]
nargs = '+'
default = ['console']
validator = validate_list
desc = '''Log handlers for pulsar server'''
class LogConfig(Global):
name = "logconfig"
default = {}
validator = validate_dict
desc = '''
The logging configuration dictionary.
This settings can only be specified on a config file and therefore
no command-line parameter is available.
'''
class Procname(Global):
name = "process_name"
flags = ["-n", "--name"]
meta = "STRING"
validator = validate_string
default = None
desc = """\
A base to use with setproctitle for process naming.
This affects things like ``ps`` and ``top``. If you're going to be
running more than one instance of Pulsar you'll probably want to set a
name to tell them apart. This requires that you install the
setproctitle module.
It defaults to 'pulsar'.
"""
class DefaultProcName(Global):
name = "default_process_name"
validator = validate_string
default = SERVER_NAME
desc = """\
Internal setting that is adjusted for each type of application.
"""
class Coverage(Global):
name = "coverage"
flags = ["--coverage"]
validator = validate_bool
action = "store_true"
default = False
desc = """Collect code coverage from all spawn actors."""
class DataStore(Global):
name = 'data_store'
flags = ['--data-store']
meta = "CONNECTION STRING"
default = ''
desc = '''\
Default data store.
Use this setting to specify a datastore used by pulsar applications.
By default no datastore is used.
'''
class ExecutionId(Global):
name = 'exc_id'
flags = ['--exc-id']
default = ''
desc = '''\
Execution ID.
Use this setting to specify an execution ID.
If not provided, a value will be assigned by pulsar.
'''
class UseGreenlet(Global):
name = 'greenlet'
flags = ['--greenlet']
nargs = '?'
type = int
default = 0
const = 100
desc = '''\
Use greenlet whenever possible.
'''
class PyCares(Global):
name = 'no_pycares'
flags = ['--no-pycares']
validator = validate_bool
action = "store_true"
default = False
desc = '''\
Switch off pycares DNS lookup.
By default, pulsar uses pycares for its DNS lookups (if it is available).
Use this flag to revert to the standard library dns resolver.
'''
############################################################################
# Worker Processes
section_docs['Worker Processes'] = '''
This group of configuration parameters control the number of actors
for a given :class:`.Monitor`, the type of concurreny of the server and
other actor-specific parameters.
They are available to all applications and, unlike global settings,
each application can specify different values.
'''
class Workers(Setting):
name = "workers"
section = "Worker Processes"
flags = ["-w", "--workers"]
validator = validate_pos_int
type = int
default = 1
desc = """\
The number of workers for handling requests.
If using a multi-process concurrency, a number in the
the ``2-4 x NUM_CORES`` range should be good. If you are using
threads this number can be higher.
"""
class Concurrency(Setting):
name = "concurrency"
section = "Worker Processes"
choices = ('process', 'thread')
flags = ["--concurrency"]
default = "process"
desc = """The type of concurrency to use."""
class MaxRequests(Setting):
name = "max_requests"
section = "Worker Processes"
flags = ["--max-requests"]
validator = validate_pos_int
type = int
default = 0
desc = """\
The maximum number of requests a worker will process before restarting.
Any value greater than zero will limit the number of requests a worker
will process before automatically restarting. This is a simple method
to help limit the damage of memory leaks.
If this is set to zero (the default) then the automatic worker
restarts are disabled.
"""
class Timeout(Setting):
name = "timeout"
section = "Worker Processes"
flags = ["-t", "--timeout"]
validator = validate_pos_int
type = int
default = 30
desc = """\
Workers silent for more than this many seconds are
killed and restarted."""
class ThreadWorkers(Setting):
name = "thread_workers"
section = "Worker Processes"
flags = ["--thread-workers"]
validator = validate_pos_int
type = int
default = 5
desc = """\
Maximum number of threads used by the actor event loop executor.
The executor is a thread pool used by the event loop to perform CPU
intensive operations or when it needs to execute blocking calls.
It allows the actor main thread to be free to listen
to events on file descriptors and process them as quick as possible.
"""
############################################################################
# APPLICATION HOOKS
section_docs['Application Hooks'] = '''
Application hooks are functions which can be specified in a
:ref:`config <setting-config>` file to perform custom tasks in a pulsar server.
These tasks can be scheduled when events occurs or at every event loop of
the various components of a pulsar application.
All application hooks are functions which accept one positional
parameter and one key-valued parameter ``exc`` when an exception occurs::
def hook(arg, exc=None):
...
Like worker process settings, each application can specify their own.
'''
class Postfork(Setting):
name = "post_fork"
section = "Application Hooks"
validator = validate_callable(1)
type = "callable"
default = staticmethod(pass_through)
desc = 'Called just after a worker has been forked'
class WhenReady(Setting):
name = "when_ready"
section = "Application Hooks"
validator = validate_callable(1)
type = "callable"
default = staticmethod(pass_through)
desc = 'Called just before a worker starts running its event loop'
class WhenExit(Setting):
name = "when_exit"
section = "Application Hooks"
validator = validate_callable(1)
type = "callable"
default = staticmethod(pass_through)
desc = """\
Called just before an actor is garbadge collected.
This is a chance to check the actor status if needed.
"""
class ConnectionMade(Setting):
name = "connection_made"
section = "Application Hooks"
validator = validate_callable(1)
type = "callable"
default = staticmethod(pass_through)
desc = """\
Called after a new connection is made.
The callable needs to accept one parameter for the
connection instance.
"""
class ConnectionLost(Setting):
name = "connection_lost"
section = "Application Hooks"
validator = validate_callable(1)
type = "callable"
default = staticmethod(pass_through)
desc = """
Called after a connection is lost.
The callable needs to accept one parameter for the
connection instance.
"""
class PreRequest(Setting):
name = "pre_request"
section = "Application Hooks"
validator = validate_callable(1)
type = "callable"
default = staticmethod(pass_through)
desc = """\
Called just before an application server processes a request.
The callable needs to accept one parameters for the
consumer.
"""
class PostRequest(Setting):
name = "post_request"
section = "Application Hooks"
validator = validate_callable(1)
type = "callable"
default = staticmethod(pass_through)
desc = """\
Called after an application server processes a request.
The callable needs to accept one parameter for the
consumer.
"""
| bsd-3-clause |
xupingmao/xnote | core/xconfig.py | 1 | 10256 | # encoding=utf-8
# @author xupingmao
# @modified 2021/06/27 19:32:54
'''xnote系统配置
# 用户配置
- get_user_config
- get_user_config_dict
# 文件配置
- 约定目录叫 XXX_DIR
- 文件叫 XXX_FILE
# 通知的配置
- add_notice
- get_notice_list
# 别名配置
- set_alias
- get_alias
# 菜单配置
'''
import os
import time
from collections import OrderedDict
from xutils.base import Storage
from xutils.textutil import Properties
__version__ = "1.0"
__author__ = "xupingmao (578749341@qq.com)"
__copyright__ = "(C) 2016-2021 xupingmao. GNU GPL 3."
__contributors__ = []
# 系统错误信息
errors = []
##################################
# 系统配置项
##################################
# 开发者模式,会展示更多的选项和信息,会开启实验性功能
DEV_MODE = False
# 开启调试
DEBUG = False
# 调试盒子模型,针对某些不方便调试的浏览器
DEBUG_HTML_BOX = False
PORT = "1234"
SITE_HOME = None
# 线程数
MIN_THREADS = 20
# 打开浏览器
OPEN_IN_BROWSER = False
# 启用数据库的缓存搜索
USE_CACHE_SEARCH = False
# 文件系统使用urlencode方式,适用于只支持ASCII字符的系统
USE_URLENCODE = False
# 初始化脚本
INIT_SCRIPT = "init.py"
# 是否记录位置信息,可通过脚本配置打开
RECORD_LOCATION = False
# *** 样式设置 ***
# BASE_TEMPLATE = "common/theme/sidebar.html"
BASE_TEMPLATE = "base.html"
# 主题样式
THEME = "standard"
# 选项风格
OPTION_STYLE = "aside"
# 页面打开方式
PAGE_OPEN = "self"
# 页面宽度
PAGE_WIDTH = "1150"
USER_CSS = None
USER_JS = None
# 插件相关 具体的代码参考 handlers/plugins 目录
LOAD_PLUGINS_ON_INIT = True
PLUGINS_DICT = {}
PLUGIN_TEMPLATE = ""
# 菜单配置
MENU_LIST = []
# 导航配置
NAV_LIST = []
# 笔记的扩展配置
NOTE_OPTIONS = []
# 文件管理器的扩展配置
FS_OPTIONS = []
##################################
# 存储目录配置项
##################################
# 处理器目录
HANDLERS_DIR = "handlers"
# 工具目录
TOOLS_DIR = "handlers/tools"
# 语言配置目录
LANG_DIR = "config/lang"
DB_ENGINE = "leveldb"
WORKING_DIR = os.path.dirname(__file__)
WEBDIR = os.path.join(WORKING_DIR, "static")
PLUGINS_DIR = os.path.join(WORKING_DIR, "plugins")
LOG_DIR = os.path.join(WORKING_DIR, "log")
# 日志失效时间
LOG_EXPIRE = 24 * 3600 * 365
# 用户数据的地址
DATA_PATH = os.path.join(WORKING_DIR, "data")
DATA_DIR = DATA_PATH
SCRIPTS_DIR = os.path.join(DATA_DIR, "scripts")
DB_DIR = os.path.join(DATA_DIR, "db")
CONFIG_DIR = os.path.join(DATA_DIR, "config")
BACKUP_DIR = os.path.join(DATA_DIR, "backup")
# 备份失效时间
BACKUP_EXPIRE = 24 * 3600 * 365
# 回收站清理时间
TRASH_EXPIRE = 24 * 3600 * 90
# 临时文件失效时间
TMP_EXPIRE = 24 * 3600 * 90
# 其他标记
# 测试用的flag,开启会拥有admin权限
IS_TEST = False
# 开启性能分析
OPEN_PROFILE = False
PROFILE_PATH_SET = set(["/file/view"])
# 静音停止时间
MUTE_END_TIME = None
# 资料相关
# 分页数量
PAGE_SIZE = 20
# 搜索历史的最大记录数
SEARCH_HISTORY_MAX_SIZE = 1000
SEARCH_PAGE_SIZE = 20
# 搜索摘要长度
SEARCH_SUMMARY_LEN = 100
RECENT_SEARCH_LIMIT = 10
RECENT_SIZE = 6
IP_BLACK_LIST = ["192.168.56.1"] # this is vbox ip
# max file size to sync or backup
MAX_FILE_SIZE = 10 * 1024 ** 2
# 文本编辑器的最大文件限制
MAX_TEXT_SIZE = 100 * 1024
# 文件系统列分隔符,文件名保留符号参考函数 xutils.get_safe_file_name(filename)
FS_COL_SEP = "$"
# 是否隐藏系统文件
FS_HIDE_FILES = True
# 文件管理扩展的选项,类型Storage
FS_LINK = "/fs_list"
# 文件浏览模式 list/grid/sidebar
FS_VIEW_MODE = "list"
# 文本文件后缀
FS_TEXT_EXT_LIST = set()
FS_IMG_EXT_LIST = set()
FS_CODE_EXT_LIST = set()
MIME_TYPES = dict()
# 后面定义的set函数和系统函数冲突了,所以这里创建一个hashset的别名
hashset = set
# 剪切板
FS_CLIP = []
# 通知公告
_notice_list = []
# 搜索历史
search_history = None
# 笔记访问历史
note_history = None
# 配置项
_config = {}
START_TIME = None
# 是否隐藏词典的入口
HIDE_DICT_ENTRY = True
# 默认的用户配置
DEFAULT_USER_CONFIG = {
"HOME_PATH" : "/note/group",
"PROJECT_PATH": "/note/timeline",
"LANG" : "zh",
}
def makedirs(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
def init(path = DATA_DIR):
"""初始化系统配置项,启动时必须调用"""
global DATA_PATH
global DATA_DIR
global DB_PATH
global DB_FILE
global DICT_FILE
global RECORD_FILE
global BACKUP_DIR
global UPLOAD_DIR
global APP_DIR
global TMP_DIR
global DB_DIR
global SCRIPTS_DIR
global COMMANDS_DIR
global PLUGINS_DIR
global CODE_ZIP
global DATA_ZIP
global TRASH_DIR
global LOG_PATH
global LOG_DIR
global LOG_FILE
global STORAGE_DIR
global ETC_DIR
global PLUGIN_TEMPLATE
DATA_PATH = os.path.abspath(path)
DATA_DIR = os.path.abspath(path)
# 数据库地址
DB_DIR = os.path.join(DATA_DIR, "db")
DB_PATH = os.path.join(DATA_DIR, "data.db")
DICT_FILE = os.path.join(DATA_DIR, "dictionary.db")
RECORD_FILE = os.path.join(DATA_DIR, "record.db")
# 备份数据地址
BACKUP_DIR = os.path.join(DATA_DIR, "backup")
# APP地址
UPLOAD_DIR = os.path.join(DATA_DIR, "files")
APP_DIR = os.path.join(DATA_DIR, "app")
TMP_DIR = os.path.join(DATA_DIR, "tmp")
# 脚本的地址
SCRIPTS_DIR = os.path.join(DATA_DIR, "scripts")
COMMANDS_DIR = os.path.join(SCRIPTS_DIR, "commands")
PLUGINS_DIR = os.path.join(SCRIPTS_DIR, "plugins")
CODE_ZIP = os.path.join(DATA_DIR, "code.zip")
DATA_ZIP = os.path.join(DATA_DIR, "data.zip")
TRASH_DIR = os.path.join(DATA_DIR, "trash")
LOG_PATH = os.path.join(DATA_DIR, "xnote.log")
STORAGE_DIR = os.path.join(DATA_DIR, "storage")
ETC_DIR = os.path.join(DATA_DIR, "storage")
LOG_DIR = os.path.join(DATA_DIR, "log")
DB_FILE = DB_PATH
LOG_FILE = LOG_PATH
FILE_EXT_PATH = os.path.join(CONFIG_DIR, "file", "type.properties")
# 一级目录
makedirs(DATA_DIR)
makedirs(UPLOAD_DIR)
makedirs(TMP_DIR)
makedirs(SCRIPTS_DIR)
makedirs(TRASH_DIR)
makedirs(STORAGE_DIR)
makedirs(ETC_DIR)
makedirs(LOG_DIR)
makedirs(DB_DIR)
# 二级目录
makedirs(COMMANDS_DIR)
makedirs(PLUGINS_DIR)
# 加载文件后缀配置
load_file_type_config()
from xutils import fsutil
PLUGIN_TEMPLATE = fsutil.readfile("./config/plugin/plugin.tpl")
def mark_started():
global START_TIME
START_TIME = time.time()
def load_file_type_config0(fpath):
from xutils import fsutil, textutil
text = fsutil.readfile(fpath)
ext_set = hashset()
ext_type_dict = textutil.parse_config_text(text, 'dict')
for ext in ext_type_dict:
ext_set.add(ext)
return ext_set
def load_config_as_dict(fpath):
from xutils import fsutil, textutil
text = fsutil.readfile(fpath)
ext_set = hashset()
return textutil.parse_config_text(text, 'dict')
def load_file_type_config():
global FS_TEXT_EXT_LIST
global FS_IMG_EXT_LIST
global FS_CODE_EXT_LIST
global FS_ZIP_EXT_LIST
global FS_AUDIO_EXT_LIST
global FS_VIDEO_EXT_LIST
global MIME_TYPES
FS_TEXT_EXT_LIST = load_file_type_config0("./config/file/text.properties")
FS_IMG_EXT_LIST = load_file_type_config0("./config/file/image.properties")
FS_CODE_EXT_LIST = load_file_type_config0("./config/file/code.properties")
FS_ZIP_EXT_LIST = load_file_type_config0("./config/file/zip.properties")
FS_AUDIO_EXT_LIST = load_file_type_config0("./config/file/audio.properties")
FS_VIDEO_EXT_LIST = load_file_type_config0("./config/file/video.properties")
MIME_TYPES = load_config_as_dict("./config/file/mime-types.properties")
MIME_TYPES[""] = "application/octet-stream"
def get(name, default_value=None):
"""获取配置,如果不存在返回default值"""
value = globals().get(name)
if value is not None:
return value
value = _config.get(name)
if value is not None:
return value
if value is None:
return default_value
return value
def set(name, value):
"""和set函数冲突了,建议使用put"""
_config[name] = value
def put(name, value):
_config[name] = value
def get_config():
return _config
def has_config(key, subkey = None):
group_value = get(key)
if group_value is None:
return False
if subkey is None:
return True
return subkey in group_value
def has(key):
return has_config(key)
def is_mute():
"""是否静音"""
return MUTE_END_TIME is not None and time.time() < MUTE_END_TIME
# 设置别名
_alias_dict = {}
def set_alias(name, value):
"""设置别名,用于扩展命令"""
_alias_dict[name] = value
def get_alias(name, default_value):
"""获取别名,用于扩展命令"""
return _alias_dict.get(name, default_value)
def get_global_config(key):
if key in _config:
return _config.get(key)
return globals().get(key)
def get_user_config(user_name, config_key, default_value = None):
"""默认值参考DEFAULT_USER_CONFIG"""
# 未启动,直接返回默认值
if START_TIME is None:
return DEFAULT_USER_CONFIG.get(config_key)
import xauth
config = xauth.get_user_config_dict(user_name)
default_value = DEFAULT_USER_CONFIG.get(config_key)
if config is None:
return default_value
else:
return config.get(config_key, default_value)
def get_user_config_dict(user_name):
import xauth
value = xauth.get_user_config_dict(user_name)
if value is None:
return Storage()
return value
def get_current_user_config(key):
import xauth
"""默认值参考DEFAULT_USER_CONFIG"""
return get_user_config(xauth.current_name(), key)
| gpl-3.0 |
Jelloeater/NetworkMonitor | keyring/keyring/tests/backends/test_kwallet.py | 8 | 2177 | from ..py30compat import unittest
from keyring.backends import kwallet
from ..test_backend import BackendBasicTests
def is_qt4_supported():
try:
__import__('PyQt4.QtGui')
except ImportError:
return False
return True
@unittest.skipUnless(kwallet.Keyring.viable, "Need KWallet")
class KDEKWalletTestCase(BackendBasicTests, unittest.TestCase):
def init_keyring(self):
return kwallet.Keyring()
class UnOpenableKWallet(object):
"""A module-like object used to test KDE wallet fall-back."""
Synchronous = None
def openWallet(self, *args):
return None
def NetworkWallet(self):
return None
class FauxQtGui(object):
"""A fake module-like object used in testing the open_kwallet function."""
class qApp:
@staticmethod
def instance():
pass
class QApplication(object):
def __init__(self, *args):
pass
def exit(self):
pass
class QWidget(object):
def __init__(self, *args):
pass
def winId(self):
pass
class KDEWalletCanceledTestCase(unittest.TestCase):
def test_user_canceled(self):
# If the user cancels either the "enter your password to unlock the
# keyring" dialog or clicks "deny" on the "can this application access
# the wallet" dialog then openWallet() will return None. The
# open_wallet() function should handle that eventuality by returning
# None to signify that the KWallet backend is not available.
self.assertEqual(
kwallet.open_kwallet(UnOpenableKWallet(), FauxQtGui()),
None)
@unittest.skipUnless(kwallet.Keyring.viable and
is_qt4_supported(),
"Need KWallet and Qt4")
class KDEKWalletInQApplication(unittest.TestCase):
def test_QApplication(self):
try:
from PyKDE4.kdeui import KWallet
from PyQt4.QtGui import QApplication
except:
return
app = QApplication([])
wallet = kwallet.open_kwallet()
self.assertIsInstance(wallet, KWallet.Wallet)
app.exit()
| gpl-2.0 |
pkeane/simplerepo | jinja2/_markupsafe/_native.py | 186 | 1148 | # -*- coding: utf-8 -*-
"""
markupsafe._native
~~~~~~~~~~~~~~~~~~
Native Python implementation the C module is not compiled.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from jinja2._markupsafe import Markup
def escape(s):
"""Convert the characters &, <, >, ' and " in string s to HTML-safe
sequences. Use this if you need to display text that might contain
such characters in HTML. Marks return value as markup string.
"""
if hasattr(s, '__html__'):
return s.__html__()
return Markup(unicode(s)
.replace('&', '&')
.replace('>', '>')
.replace('<', '<')
.replace("'", ''')
.replace('"', '"')
)
def escape_silent(s):
"""Like :func:`escape` but converts `None` into an empty
markup string.
"""
if s is None:
return Markup()
return escape(s)
def soft_unicode(s):
"""Make a string unicode if it isn't already. That way a markup
string is not converted back to unicode.
"""
if not isinstance(s, unicode):
s = unicode(s)
return s
| mit |
miptliot/edx-platform | lms/djangoapps/student_profile/views.py | 2 | 4548 | """ Views for a student's profile information. """
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.http import Http404
from django.views.decorators.http import require_http_methods
from django_countries import countries
from badges.utils import badges_enabled
from edxmako.shortcuts import marketing_link, render_to_response
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.user_api.accounts.api import get_account_settings
from openedx.core.djangoapps.user_api.errors import UserNotAuthorized, UserNotFound
from openedx.core.djangoapps.user_api.preferences.api import get_user_preferences
from student.models import User
@login_required
@require_http_methods(['GET'])
def learner_profile(request, username):
"""Render the profile page for the specified username.
Args:
request (HttpRequest)
username (str): username of user whose profile is requested.
Returns:
HttpResponse: 200 if the page was sent successfully
HttpResponse: 302 if not logged in (redirect to login page)
HttpResponse: 405 if using an unsupported HTTP method
Raises:
Http404: 404 if the specified user is not authorized or does not exist
Example usage:
GET /account/profile
"""
try:
return render_to_response(
'student_profile/learner_profile.html',
learner_profile_context(request, username, request.user.is_staff)
)
except (UserNotAuthorized, UserNotFound, ObjectDoesNotExist):
raise Http404
def learner_profile_context(request, profile_username, user_is_staff):
"""Context for the learner profile page.
Args:
logged_in_user (object): Logged In user.
profile_username (str): username of user whose profile is requested.
user_is_staff (bool): Logged In user has staff access.
build_absolute_uri_func ():
Returns:
dict
Raises:
ObjectDoesNotExist: the specified profile_username does not exist.
"""
profile_user = User.objects.get(username=profile_username)
logged_in_user = request.user
own_profile = (logged_in_user.username == profile_username)
account_settings_data = get_account_settings(request, [profile_username])[0]
preferences_data = get_user_preferences(profile_user, profile_username)
context = {
'data': {
'profile_user_id': profile_user.id,
'default_public_account_fields': settings.ACCOUNT_VISIBILITY_CONFIGURATION['public_fields'],
'default_visibility': settings.ACCOUNT_VISIBILITY_CONFIGURATION['default_visibility'],
'accounts_api_url': reverse("accounts_api", kwargs={'username': profile_username}),
'preferences_api_url': reverse('preferences_api', kwargs={'username': profile_username}),
'preferences_data': preferences_data,
'account_settings_data': account_settings_data,
'profile_image_upload_url': reverse('profile_image_upload', kwargs={'username': profile_username}),
'profile_image_remove_url': reverse('profile_image_remove', kwargs={'username': profile_username}),
'profile_image_max_bytes': settings.PROFILE_IMAGE_MAX_BYTES,
'profile_image_min_bytes': settings.PROFILE_IMAGE_MIN_BYTES,
'account_settings_page_url': reverse('account_settings'),
'has_preferences_access': (logged_in_user.username == profile_username or user_is_staff),
'own_profile': own_profile,
'country_options': list(countries),
'find_courses_url': marketing_link('COURSES'),
'language_options': settings.ALL_LANGUAGES,
'badges_logo': staticfiles_storage.url('certificates/images/backpack-logo.png'),
'badges_icon': staticfiles_storage.url('certificates/images/ico-mozillaopenbadges.png'),
'backpack_ui_img': staticfiles_storage.url('certificates/images/backpack-ui.png'),
'platform_name': configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME),
},
'disable_courseware_js': True,
}
if badges_enabled():
context['data']['badges_api_url'] = reverse("badges_api:user_assertions", kwargs={'username': profile_username})
return context
| agpl-3.0 |
saurabhjn76/sympy | sympy/core/core.py | 87 | 2874 | """ The core's core. """
from __future__ import print_function, division
# used for canonical ordering of symbolic sequences
# via __cmp__ method:
# FIXME this is *so* irrelevant and outdated!
ordering_of_classes = [
# singleton numbers
'Zero', 'One', 'Half', 'Infinity', 'NaN', 'NegativeOne', 'NegativeInfinity',
# numbers
'Integer', 'Rational', 'Float',
# singleton symbols
'Exp1', 'Pi', 'ImaginaryUnit',
# symbols
'Symbol', 'Wild', 'Temporary',
# arithmetic operations
'Pow', 'Mul', 'Add',
# function values
'Derivative', 'Integral',
# defined singleton functions
'Abs', 'Sign', 'Sqrt',
'Floor', 'Ceiling',
'Re', 'Im', 'Arg',
'Conjugate',
'Exp', 'Log',
'Sin', 'Cos', 'Tan', 'Cot', 'ASin', 'ACos', 'ATan', 'ACot',
'Sinh', 'Cosh', 'Tanh', 'Coth', 'ASinh', 'ACosh', 'ATanh', 'ACoth',
'RisingFactorial', 'FallingFactorial',
'factorial', 'binomial',
'Gamma', 'LowerGamma', 'UpperGamma', 'PolyGamma',
'Erf',
# special polynomials
'Chebyshev', 'Chebyshev2',
# undefined functions
'Function', 'WildFunction',
# anonymous functions
'Lambda',
# Landau O symbol
'Order',
# relational operations
'Equality', 'Unequality', 'StrictGreaterThan', 'StrictLessThan',
'GreaterThan', 'LessThan',
]
class Registry(object):
"""
Base class for registry objects.
Registries map a name to an object using attribute notation. Registry
classes behave singletonically: all their instances share the same state,
which is stored in the class object.
All subclasses should set `__slots__ = []`.
"""
__slots__ = []
def __setattr__(self, name, obj):
setattr(self.__class__, name, obj)
def __delattr__(self, name):
delattr(self.__class__, name)
#A set containing all sympy class objects
all_classes = set()
class BasicMeta(type):
def __init__(cls, *args, **kws):
all_classes.add(cls)
def __cmp__(cls, other):
# If the other object is not a Basic subclass, then we are not equal to
# it.
if not isinstance(other, BasicMeta):
return -1
n1 = cls.__name__
n2 = other.__name__
if n1 == n2:
return 0
UNKNOWN = len(ordering_of_classes) + 1
try:
i1 = ordering_of_classes.index(n1)
except ValueError:
i1 = UNKNOWN
try:
i2 = ordering_of_classes.index(n2)
except ValueError:
i2 = UNKNOWN
if i1 == UNKNOWN and i2 == UNKNOWN:
return (n1 > n2) - (n1 < n2)
return (i1 > i2) - (i1 < i2)
def __lt__(cls, other):
if cls.__cmp__(other) == -1:
return True
return False
def __gt__(cls, other):
if cls.__cmp__(other) == 1:
return True
return False
| bsd-3-clause |
sonali0901/zulip | zerver/migrations/0029_realm_subdomain.py | 40 | 1161 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
def set_subdomain_of_default_realm(apps, schema_editor):
# type: (StateApps, DatabaseSchemaEditor) -> None
if settings.DEVELOPMENT:
Realm = apps.get_model('zerver', 'Realm')
try:
default_realm = Realm.objects.get(domain="zulip.com")
except ObjectDoesNotExist:
default_realm = None
if default_realm is not None:
default_realm.subdomain = "zulip"
default_realm.save()
class Migration(migrations.Migration):
dependencies = [
('zerver', '0028_userprofile_tos_version'),
]
operations = [
migrations.AddField(
model_name='realm',
name='subdomain',
field=models.CharField(max_length=40, unique=True, null=True),
),
migrations.RunPython(set_subdomain_of_default_realm)
]
| apache-2.0 |
derekjchow/models | research/brain_coder/common/schedules_test.py | 5 | 5186 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
"""Tests for common.schedules."""
from math import exp
from math import sqrt
import numpy as np
from six.moves import xrange
import tensorflow as tf
from common import config_lib # brain coder
from common import schedules # brain coder
class SchedulesTest(tf.test.TestCase):
def ScheduleTestHelper(self, config, schedule_subtype, io_values):
"""Run common checks for schedules.
Args:
config: Config object which is passed into schedules.make_schedule.
schedule_subtype: The expected schedule type to be instantiated.
io_values: List of (input, output) pairs. Must be in ascending input
order. No duplicate inputs.
"""
# Check that make_schedule makes the correct type.
f = schedules.make_schedule(config)
self.assertTrue(isinstance(f, schedule_subtype))
# Check that multiple instances returned from make_schedule behave the same.
fns = [schedules.make_schedule(config) for _ in xrange(3)]
# Check that all the inputs map to the right outputs.
for i, o in io_values:
for f in fns:
f_out = f(i)
self.assertTrue(
np.isclose(o, f_out),
'Wrong value at input %d. Expected %s, got %s' % (i, o, f_out))
# Check that a subset of the io_values are still correct.
f = schedules.make_schedule(config)
subseq = [io_values[i**2] for i in xrange(int(sqrt(len(io_values))))]
if subseq[-1] != io_values[-1]:
subseq.append(io_values[-1])
for i, o in subseq:
f_out = f(i)
self.assertTrue(
np.isclose(o, f_out),
'Wrong value at input %d. Expected %s, got %s' % (i, o, f_out))
# Check duplicate calls.
f = schedules.make_schedule(config)
for i, o in io_values:
for _ in xrange(3):
f_out = f(i)
self.assertTrue(
np.isclose(o, f_out),
'Duplicate calls at input %d are not equal. Expected %s, got %s'
% (i, o, f_out))
def testConstSchedule(self):
self.ScheduleTestHelper(
config_lib.Config(fn='const', const=5),
schedules.ConstSchedule,
[(0, 5), (1, 5), (10, 5), (20, 5), (100, 5), (1000000, 5)])
def testLinearDecaySchedule(self):
self.ScheduleTestHelper(
config_lib.Config(fn='linear_decay', initial=2, final=0, start_time=10,
end_time=20),
schedules.LinearDecaySchedule,
[(0, 2), (1, 2), (10, 2), (11, 1.8), (15, 1), (19, 0.2), (20, 0),
(100000, 0)])
# Test step function.
self.ScheduleTestHelper(
config_lib.Config(fn='linear_decay', initial=2, final=0, start_time=10,
end_time=10),
schedules.LinearDecaySchedule,
[(0, 2), (1, 2), (10, 2), (11, 0), (15, 0)])
def testExponentialDecaySchedule(self):
self.ScheduleTestHelper(
config_lib.Config(fn='exp_decay', initial=exp(-1), final=exp(-6),
start_time=10, end_time=20),
schedules.ExponentialDecaySchedule,
[(0, exp(-1)), (1, exp(-1)), (10, exp(-1)), (11, exp(-1/2. - 1)),
(15, exp(-5/2. - 1)), (19, exp(-9/2. - 1)), (20, exp(-6)),
(100000, exp(-6))])
# Test step function.
self.ScheduleTestHelper(
config_lib.Config(fn='exp_decay', initial=exp(-1), final=exp(-6),
start_time=10, end_time=10),
schedules.ExponentialDecaySchedule,
[(0, exp(-1)), (1, exp(-1)), (10, exp(-1)), (11, exp(-6)),
(15, exp(-6))])
def testSmootherstepDecaySchedule(self):
self.ScheduleTestHelper(
config_lib.Config(fn='smooth_decay', initial=2, final=0, start_time=10,
end_time=20),
schedules.SmootherstepDecaySchedule,
[(0, 2), (1, 2), (10, 2), (11, 1.98288), (15, 1), (19, 0.01712),
(20, 0), (100000, 0)])
# Test step function.
self.ScheduleTestHelper(
config_lib.Config(fn='smooth_decay', initial=2, final=0, start_time=10,
end_time=10),
schedules.SmootherstepDecaySchedule,
[(0, 2), (1, 2), (10, 2), (11, 0), (15, 0)])
def testHardOscillatorSchedule(self):
self.ScheduleTestHelper(
config_lib.Config(fn='hard_osc', high=2, low=0, start_time=100,
period=10, transition_fraction=0.5),
schedules.HardOscillatorSchedule,
[(0, 2), (1, 2), (10, 2), (100, 2), (101, 1.2), (102, 0.4), (103, 0),
(104, 0), (105, 0), (106, 0.8), (107, 1.6), (108, 2), (109, 2),
(110, 2), (111, 1.2), (112, 0.4), (115, 0), (116, 0.8), (119, 2),
(120, 2), (100001, 1.2), (100002, 0.4), (100005, 0), (100006, 0.8),
(100010, 2)])
# Test instantaneous step.
self.ScheduleTestHelper(
config_lib.Config(fn='hard_osc', high=2, low=0, start_time=100,
period=10, transition_fraction=0),
schedules.HardOscillatorSchedule,
[(0, 2), (1, 2), (10, 2), (99, 2), (100, 0), (104, 0), (105, 2),
(106, 2), (109, 2), (110, 0)])
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
orgito/ansible | lib/ansible/modules/cloud/google/gce.py | 9 | 27554 | #!/usr/bin/python
# Copyright 2013 Google Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gce
version_added: "1.4"
short_description: create or terminate GCE instances
description:
- Creates or terminates Google Compute Engine (GCE) instances. See
U(https://cloud.google.com/compute) for an overview.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
image:
description:
- image string to use for the instance (default will follow latest
stable debian image)
default: "debian-8"
image_family:
description:
- image family from which to select the image. The most recent
non-deprecated image in the family will be used.
version_added: "2.4"
external_projects:
description:
- A list of other projects (accessible with the provisioning credentials)
to be searched for the image.
version_added: "2.4"
instance_names:
description:
- a comma-separated list of instance names to create or destroy
machine_type:
description:
- machine type to use for the instance, use 'n1-standard-1' by default
default: "n1-standard-1"
metadata:
description:
- a hash/dictionary of custom data for the instance;
'{"key":"value", ...}'
service_account_email:
version_added: "1.5.1"
description:
- service account email
service_account_permissions:
version_added: "2.0"
description:
- service account permissions (see
U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
--scopes section for detailed information)
choices: [
"bigquery", "cloud-platform", "compute-ro", "compute-rw",
"useraccounts-ro", "useraccounts-rw", "datastore", "logging-write",
"monitoring", "sql-admin", "storage-full", "storage-ro",
"storage-rw", "taskqueue", "userinfo-email"
]
pem_file:
version_added: "1.5.1"
description:
- path to the pem file associated with the service account email
This option is deprecated. Use 'credentials_file'.
credentials_file:
version_added: "2.1.0"
description:
- path to the JSON file associated with the service account email
project_id:
version_added: "1.5.1"
description:
- your GCE project ID
name:
description:
- either a name of a single instance or when used with 'num_instances',
the base name of a cluster of nodes
aliases: ['base_name']
num_instances:
description:
- can be used with 'name', specifies
the number of nodes to provision using 'name'
as a base name
version_added: "2.3"
network:
description:
- name of the network, 'default' will be used if not specified
default: "default"
subnetwork:
description:
- name of the subnetwork in which the instance should be created
version_added: "2.2"
persistent_boot_disk:
description:
- if set, create the instance with a persistent boot disk
type: bool
default: 'no'
disks:
description:
- a list of persistent disks to attach to the instance; a string value
gives the name of the disk; alternatively, a dictionary value can
define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry
will be the boot disk (which must be READ_WRITE).
version_added: "1.7"
state:
description:
- desired state of the resource
default: "present"
choices: ["active", "present", "absent", "deleted", "started", "stopped", "terminated"]
tags:
description:
- a comma-separated list of tags to associate with the instance
zone:
description:
- the GCE zone to use. The list of available zones is at U(https://cloud.google.com/compute/docs/regions-zones/regions-zones#available).
required: true
default: "us-central1-a"
ip_forward:
version_added: "1.9"
description:
- set to C(yes) if the instance can forward ip packets (useful for
gateways)
type: bool
default: 'no'
external_ip:
version_added: "1.9"
description:
- type of external ip, ephemeral by default; alternatively, a fixed gce ip or ip name can be given. Specify 'none' if no external ip is desired.
default: "ephemeral"
disk_auto_delete:
version_added: "1.9"
description:
- if set boot disk will be removed after instance destruction
type: bool
default: 'yes'
preemptible:
version_added: "2.1"
description:
- if set to C(yes), instances will be preemptible and time-limited.
(requires libcloud >= 0.20.0)
type: bool
default: 'no'
disk_size:
description:
- The size of the boot disk created for this instance (in GB)
default: 10
version_added: "2.3"
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials,
>= 0.20.0 if using preemptible option"
notes:
- Either I(instance_names) or I(name) is required.
- JSON credentials strongly preferred.
author:
- Eric Johnson (@erjohnso) <erjohnso@google.com>
- Tom Melendez (@supertom) <supertom@google.com>
'''
EXAMPLES = '''
# Basic provisioning example. Create a single Debian 8 instance in the
# us-central1-a Zone of the n1-standard-1 machine type.
# Create multiple instances by specifying multiple names, separated by
# commas in the instance_names field
# (e.g. my-test-instance1,my-test-instance2)
- gce:
instance_names: my-test-instance1
zone: us-central1-a
machine_type: n1-standard-1
image: debian-8
state: present
service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
credentials_file: "/path/to/your-key.json"
project_id: "your-project-name"
disk_size: 32
# Create a single instance of an image from the "my-base-image" image family
# in the us-central1-a Zone of the n1-standard-1 machine type.
# This image family is in the "my-other-project" GCP project.
- gce:
instance_names: my-test-instance1
zone: us-central1-a
machine_type: n1-standard-1
image_family: my-base-image
external_projects:
- my-other-project
state: present
service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
credentials_file: "/path/to/your-key.json"
project_id: "your-project-name"
disk_size: 32
# Create a single Debian 8 instance in the us-central1-a Zone
# Use existing disks, custom network/subnetwork, set service account permissions
# add tags and metadata.
- gce:
instance_names: my-test-instance
zone: us-central1-a
machine_type: n1-standard-1
state: present
metadata: '{"db":"postgres", "group":"qa", "id":500}'
tags:
- http-server
- my-other-tag
disks:
- name: disk-2
mode: READ_WRITE
- name: disk-3
mode: READ_ONLY
disk_auto_delete: false
network: foobar-network
subnetwork: foobar-subnetwork-1
preemptible: true
ip_forward: true
service_account_permissions:
- storage-full
- taskqueue
- bigquery
- https://www.googleapis.com/auth/ndev.clouddns.readwrite
service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
credentials_file: "/path/to/your-key.json"
project_id: "your-project-name"
---
# Example Playbook
- name: Compute Engine Instance Examples
hosts: localhost
vars:
service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
credentials_file: "/path/to/your-key.json"
project_id: "your-project-name"
tasks:
- name: create multiple instances
# Basic provisioning example. Create multiple Debian 8 instances in the
# us-central1-a Zone of n1-standard-1 machine type.
gce:
instance_names: test1,test2,test3
zone: us-central1-a
machine_type: n1-standard-1
image: debian-8
state: present
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
metadata : '{ "startup-script" : "apt-get update" }'
register: gce
- name: Save host data
add_host:
hostname: "{{ item.public_ip }}"
groupname: gce_instances_ips
with_items: "{{ gce.instance_data }}"
- name: Wait for SSH for instances
wait_for:
delay: 1
host: "{{ item.public_ip }}"
port: 22
state: started
timeout: 30
with_items: "{{ gce.instance_data }}"
- name: Configure Hosts
hosts: gce_instances_ips
become: yes
become_method: sudo
roles:
- my-role-one
- my-role-two
tags:
- config
- name: delete test-instances
# Basic termination of instance.
gce:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
instance_names: "{{ gce.instance_names }}"
zone: us-central1-a
state: absent
tags:
- delete
'''
import socket
import logging
try:
from ast import literal_eval
HAS_PYTHON26 = True
except ImportError:
HAS_PYTHON26 = False
try:
import libcloud
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceInUseError, ResourceNotFoundError
from libcloud.compute.drivers.gce import GCEAddress
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gce import gce_connect, unexpected_error_msg
from ansible.module_utils.gcp import get_valid_location
from ansible.module_utils.six.moves import reduce
def get_instance_info(inst):
"""Retrieves instance information from an instance object and returns it
as a dictionary.
"""
metadata = {}
if 'metadata' in inst.extra and 'items' in inst.extra['metadata']:
for md in inst.extra['metadata']['items']:
metadata[md['key']] = md['value']
try:
netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
except Exception:
netname = None
try:
subnetname = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1]
except Exception:
subnetname = None
if 'disks' in inst.extra:
disk_names = [disk_info['source'].split('/')[-1]
for disk_info
in sorted(inst.extra['disks'],
key=lambda disk_info: disk_info['index'])]
else:
disk_names = []
if len(inst.public_ips) == 0:
public_ip = None
else:
public_ip = inst.public_ips[0]
return ({
'image': inst.image is not None and inst.image.split('/')[-1] or None,
'disks': disk_names,
'machine_type': inst.size,
'metadata': metadata,
'name': inst.name,
'network': netname,
'subnetwork': subnetname,
'private_ip': inst.private_ips[0],
'public_ip': public_ip,
'status': ('status' in inst.extra) and inst.extra['status'] or None,
'tags': ('tags' in inst.extra) and inst.extra['tags'] or [],
'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None,
})
def create_instances(module, gce, instance_names, number, lc_zone):
"""Creates new instances. Attributes other than instance_names are picked
up from 'module'
module : AnsibleModule object
gce: authenticated GCE libcloud driver
instance_names: python list of instance names to create
number: number of instances to create
lc_zone: GCEZone object
Returns:
A list of dictionaries with instance information
about the instances that were launched.
"""
image = module.params.get('image')
image_family = module.params.get('image_family')
external_projects = module.params.get('external_projects')
machine_type = module.params.get('machine_type')
metadata = module.params.get('metadata')
network = module.params.get('network')
subnetwork = module.params.get('subnetwork')
persistent_boot_disk = module.params.get('persistent_boot_disk')
disks = module.params.get('disks')
tags = module.params.get('tags')
ip_forward = module.params.get('ip_forward')
external_ip = module.params.get('external_ip')
disk_auto_delete = module.params.get('disk_auto_delete')
preemptible = module.params.get('preemptible')
disk_size = module.params.get('disk_size')
service_account_permissions = module.params.get('service_account_permissions')
if external_ip == "none":
instance_external_ip = None
elif external_ip != "ephemeral":
instance_external_ip = external_ip
try:
# check if instance_external_ip is an ip or a name
try:
socket.inet_aton(instance_external_ip)
instance_external_ip = GCEAddress(id='unknown', name='unknown', address=instance_external_ip, region='unknown', driver=gce)
except socket.error:
instance_external_ip = gce.ex_get_address(instance_external_ip)
except GoogleBaseError as e:
module.fail_json(msg='Unexpected error attempting to get a static ip %s, error: %s' % (external_ip, e.value))
else:
instance_external_ip = external_ip
new_instances = []
changed = False
lc_disks = []
disk_modes = []
for i, disk in enumerate(disks or []):
if isinstance(disk, dict):
lc_disks.append(gce.ex_get_volume(disk['name'], lc_zone))
disk_modes.append(disk['mode'])
else:
lc_disks.append(gce.ex_get_volume(disk, lc_zone))
# boot disk is implicitly READ_WRITE
disk_modes.append('READ_ONLY' if i > 0 else 'READ_WRITE')
lc_network = gce.ex_get_network(network)
lc_machine_type = gce.ex_get_size(machine_type, lc_zone)
# Try to convert the user's metadata value into the format expected
# by GCE. First try to ensure user has proper quoting of a
# dictionary-like syntax using 'literal_eval', then convert the python
# dict into a python list of 'key' / 'value' dicts. Should end up
# with:
# [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
if metadata:
if isinstance(metadata, dict):
md = metadata
else:
try:
md = literal_eval(str(metadata))
if not isinstance(md, dict):
raise ValueError('metadata must be a dict')
except ValueError as e:
module.fail_json(msg='bad metadata: %s' % str(e))
except SyntaxError as e:
module.fail_json(msg='bad metadata syntax')
if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15':
items = []
for k, v in md.items():
items.append({"key": k, "value": v})
metadata = {'items': items}
else:
metadata = md
lc_image = LazyDiskImage(module, gce, image, lc_disks, family=image_family, projects=external_projects)
ex_sa_perms = []
bad_perms = []
if service_account_permissions:
for perm in service_account_permissions:
if perm not in gce.SA_SCOPES_MAP and not perm.startswith('https://www.googleapis.com/auth'):
bad_perms.append(perm)
if len(bad_perms) > 0:
module.fail_json(msg='bad permissions: %s' % str(bad_perms))
ex_sa_perms.append({'email': "default"})
ex_sa_perms[0]['scopes'] = service_account_permissions
# These variables all have default values but check just in case
if not lc_network or not lc_machine_type or not lc_zone:
module.fail_json(msg='Missing required create instance variable',
changed=False)
gce_args = dict(
location=lc_zone,
ex_network=network, ex_tags=tags, ex_metadata=metadata,
ex_can_ip_forward=ip_forward,
external_ip=instance_external_ip, ex_disk_auto_delete=disk_auto_delete,
ex_service_accounts=ex_sa_perms
)
if preemptible is not None:
gce_args['ex_preemptible'] = preemptible
if subnetwork is not None:
gce_args['ex_subnetwork'] = subnetwork
if isinstance(instance_names, str) and not number:
instance_names = [instance_names]
if isinstance(instance_names, str) and number:
instance_responses = gce.ex_create_multiple_nodes(instance_names, lc_machine_type,
lc_image(), number, **gce_args)
for resp in instance_responses:
n = resp
if isinstance(resp, libcloud.compute.drivers.gce.GCEFailedNode):
try:
n = gce.ex_get_node(n.name, lc_zone)
except ResourceNotFoundError:
pass
else:
# Assure that at least one node has been created to set changed=True
changed = True
new_instances.append(n)
else:
for instance in instance_names:
pd = None
if lc_disks:
pd = lc_disks[0]
elif persistent_boot_disk:
try:
pd = gce.ex_get_volume("%s" % instance, lc_zone)
except ResourceNotFoundError:
pd = gce.create_volume(disk_size, "%s" % instance, image=lc_image())
gce_args['ex_boot_disk'] = pd
inst = None
try:
inst = gce.ex_get_node(instance, lc_zone)
except ResourceNotFoundError:
inst = gce.create_node(
instance, lc_machine_type, lc_image(), **gce_args
)
changed = True
except GoogleBaseError as e:
module.fail_json(msg='Unexpected error attempting to create ' +
'instance %s, error: %s' % (instance, e.value))
if inst:
new_instances.append(inst)
for inst in new_instances:
for i, lc_disk in enumerate(lc_disks):
# Check whether the disk is already attached
if (len(inst.extra['disks']) > i):
attached_disk = inst.extra['disks'][i]
if attached_disk['source'] != lc_disk.extra['selfLink']:
module.fail_json(
msg=("Disk at index %d does not match: requested=%s found=%s" % (
i, lc_disk.extra['selfLink'], attached_disk['source'])))
elif attached_disk['mode'] != disk_modes[i]:
module.fail_json(
msg=("Disk at index %d is in the wrong mode: requested=%s found=%s" % (
i, disk_modes[i], attached_disk['mode'])))
else:
continue
gce.attach_volume(inst, lc_disk, ex_mode=disk_modes[i])
# Work around libcloud bug: attached volumes don't get added
# to the instance metadata. get_instance_info() only cares about
# source and index.
if len(inst.extra['disks']) != i + 1:
inst.extra['disks'].append(
{'source': lc_disk.extra['selfLink'], 'index': i})
instance_names = []
instance_json_data = []
for inst in new_instances:
d = get_instance_info(inst)
instance_names.append(d['name'])
instance_json_data.append(d)
return (changed, instance_json_data, instance_names)
def change_instance_state(module, gce, instance_names, number, zone, state):
"""Changes the state of a list of instances. For example,
change from started to stopped, or started to absent.
module: Ansible module object
gce: authenticated GCE connection object
instance_names: a list of instance names to terminate
zone: GCEZone object where the instances reside prior to termination
state: 'state' parameter passed into module as argument
Returns a dictionary of instance names that were changed.
"""
changed = False
nodes = []
state_instance_names = []
if isinstance(instance_names, str) and number:
node_names = ['%s-%03d' % (instance_names, i) for i in range(number)]
elif isinstance(instance_names, str) and not number:
node_names = [instance_names]
else:
node_names = instance_names
for name in node_names:
inst = None
try:
inst = gce.ex_get_node(name, zone)
except ResourceNotFoundError:
state_instance_names.append(name)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
else:
nodes.append(inst)
state_instance_names.append(name)
if state in ['absent', 'deleted'] and number:
changed_nodes = gce.ex_destroy_multiple_nodes(nodes) or [False]
changed = reduce(lambda x, y: x or y, changed_nodes)
else:
for node in nodes:
if state in ['absent', 'deleted']:
gce.destroy_node(node)
changed = True
elif state == 'started' and node.state == libcloud.compute.types.NodeState.STOPPED:
gce.ex_start_node(node)
changed = True
elif state in ['stopped', 'terminated'] and node.state == libcloud.compute.types.NodeState.RUNNING:
gce.ex_stop_node(node)
changed = True
return (changed, state_instance_names)
def main():
module = AnsibleModule(
argument_spec=dict(
image=dict(default='debian-8'),
image_family=dict(),
external_projects=dict(type='list'),
instance_names=dict(),
machine_type=dict(default='n1-standard-1'),
metadata=dict(),
name=dict(aliases=['base_name']),
num_instances=dict(type='int'),
network=dict(default='default'),
subnetwork=dict(),
persistent_boot_disk=dict(type='bool', default=False),
disks=dict(type='list'),
state=dict(choices=['active', 'present', 'absent', 'deleted',
'started', 'stopped', 'terminated'],
default='present'),
tags=dict(type='list'),
zone=dict(default='us-central1-a'),
service_account_email=dict(),
service_account_permissions=dict(type='list'),
pem_file=dict(type='path'),
credentials_file=dict(type='path'),
project_id=dict(),
ip_forward=dict(type='bool', default=False),
external_ip=dict(default='ephemeral'),
disk_auto_delete=dict(type='bool', default=True),
disk_size=dict(type='int', default=10),
preemptible=dict(type='bool', default=None),
),
mutually_exclusive=[('instance_names', 'name')]
)
if not HAS_PYTHON26:
module.fail_json(msg="GCE module requires python's 'ast' module, python v2.6+")
if not HAS_LIBCLOUD:
module.fail_json(msg='libcloud with GCE support (0.17.0+) required for this module')
gce = gce_connect(module)
image = module.params.get('image')
image_family = module.params.get('image_family')
external_projects = module.params.get('external_projects')
instance_names = module.params.get('instance_names')
name = module.params.get('name')
number = module.params.get('num_instances')
subnetwork = module.params.get('subnetwork')
state = module.params.get('state')
zone = module.params.get('zone')
preemptible = module.params.get('preemptible')
changed = False
inames = None
if isinstance(instance_names, list):
inames = instance_names
elif isinstance(instance_names, str):
inames = instance_names.split(',')
if name:
inames = name
if not inames:
module.fail_json(msg='Must specify a "name" or "instance_names"',
changed=False)
if not zone:
module.fail_json(msg='Must specify a "zone"', changed=False)
lc_zone = get_valid_location(module, gce, zone)
if preemptible is not None and hasattr(libcloud, '__version__') and libcloud.__version__ < '0.20':
module.fail_json(msg="Apache Libcloud 0.20.0+ is required to use 'preemptible' option",
changed=False)
if subnetwork is not None and not hasattr(gce, 'ex_get_subnetwork'):
module.fail_json(msg="Apache Libcloud 1.0.0+ is required to use 'subnetwork' option",
changed=False)
json_output = {'zone': zone}
if state in ['absent', 'deleted', 'started', 'stopped', 'terminated']:
json_output['state'] = state
(changed, state_instance_names) = change_instance_state(
module, gce, inames, number, lc_zone, state)
# based on what user specified, return the same variable, although
# value could be different if an instance could not be destroyed
if instance_names or name and number:
json_output['instance_names'] = state_instance_names
elif name:
json_output['name'] = name
elif state in ['active', 'present']:
json_output['state'] = 'present'
(changed, instance_data, instance_name_list) = create_instances(
module, gce, inames, number, lc_zone)
json_output['instance_data'] = instance_data
if instance_names:
json_output['instance_names'] = instance_name_list
elif name:
json_output['name'] = name
json_output['changed'] = changed
module.exit_json(**json_output)
class LazyDiskImage:
"""
Object for lazy instantiation of disk image
gce.ex_get_image is a very expensive call, so we want to avoid calling it as much as possible.
"""
def __init__(self, module, gce, name, has_pd, family=None, projects=None):
self.image = None
self.was_called = False
self.gce = gce
self.name = name
self.has_pd = has_pd
self.module = module
self.family = family
self.projects = projects
def __call__(self):
if not self.was_called:
self.was_called = True
if not self.has_pd:
if self.family:
self.image = self.gce.ex_get_image_from_family(self.family, ex_project_list=self.projects)
else:
self.image = self.gce.ex_get_image(self.name, ex_project_list=self.projects)
if not self.image:
self.module.fail_json(msg='image or disks missing for create instance', changed=False)
return self.image
if __name__ == '__main__':
main()
| gpl-3.0 |
sv-dev1/odoo | addons/lunch/wizard/lunch_order.py | 440 | 1299 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class lunch_order_order(osv.TransientModel):
""" lunch order meal """
_name = 'lunch.order.order'
_description = 'Wizard to order a meal'
def order(self,cr,uid,ids,context=None):
return self.pool.get('lunch.order.line').order(cr, uid, ids, context=context)
| agpl-3.0 |
Jionglun/-w16b_test | static/Brython3.1.1-20150328-091302/Lib/posixpath.py | 722 | 14212 | """Common operations on Posix pathnames.
Instead of importing this module directly, import os and refer to
this module as os.path. The "os.path" name is an alias for this
module on Posix systems; on other systems (e.g. Mac, Windows),
os.path provides the same operations in a manner specific to that
platform, and is an alias to another module (e.g. macpath, ntpath).
Some of this can actually be useful on non-Posix systems too, e.g.
for manipulation of the pathname component of URLs.
"""
import os
import sys
import stat
import genericpath
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime","islink","exists","lexists","isdir","isfile",
"ismount", "expanduser","expandvars","normpath","abspath",
"samefile","sameopenfile","samestat",
"curdir","pardir","sep","pathsep","defpath","altsep","extsep",
"devnull","realpath","supports_unicode_filenames","relpath"]
# Strings representing various path-related bits and pieces.
# These are primarily for export; internally, they are hardcoded.
curdir = '.'
pardir = '..'
extsep = '.'
sep = '/'
pathsep = ':'
defpath = ':/bin:/usr/bin'
altsep = None
devnull = '/dev/null'
def _get_sep(path):
if isinstance(path, bytes):
return b'/'
else:
return '/'
# Normalize the case of a pathname. Trivial in Posix, string.lower on Mac.
# On MS-DOS this may also turn slashes into backslashes; however, other
# normalizations (such as optimizing '../' away) are not allowed
# (another function should be defined to do that).
def normcase(s):
"""Normalize case of pathname. Has no effect under Posix"""
# TODO: on Mac OS X, this should really return s.lower().
if not isinstance(s, (bytes, str)):
raise TypeError("normcase() argument must be str or bytes, "
"not '{}'".format(s.__class__.__name__))
return s
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
def isabs(s):
"""Test whether a path is absolute"""
sep = _get_sep(s)
return s.startswith(sep)
# Join pathnames.
# Ignore the previous parts if a part is absolute.
# Insert a '/' unless the first part is empty or already ends in '/'.
def join(a, *p):
"""Join two or more pathname components, inserting '/' as needed.
If any component is an absolute path, all previous path components
will be discarded. An empty last part will result in a path that
ends with a separator."""
sep = _get_sep(a)
path = a
try:
for b in p:
if b.startswith(sep):
path = b
elif not path or path.endswith(sep):
path += b
else:
path += sep + b
except TypeError:
valid_types = all(isinstance(s, (str, bytes, bytearray))
for s in (a, ) + p)
if valid_types:
# Must have a mixture of text and binary data
raise TypeError("Can't mix strings and bytes in path "
"components.") from None
raise
return path
# Split a path in head (everything up to the last '/') and tail (the
# rest). If the path ends in '/', tail will be empty. If there is no
# '/' in the path, head will be empty.
# Trailing '/'es are stripped from head unless it is the root.
def split(p):
"""Split a pathname. Returns tuple "(head, tail)" where "tail" is
everything after the final slash. Either part may be empty."""
sep = _get_sep(p)
i = p.rfind(sep) + 1
head, tail = p[:i], p[i:]
if head and head != sep*len(head):
head = head.rstrip(sep)
return head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
if isinstance(p, bytes):
sep = b'/'
extsep = b'.'
else:
sep = '/'
extsep = '.'
return genericpath._splitext(p, sep, None, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
# Split a pathname into a drive specification and the rest of the
# path. Useful on DOS/Windows/NT; on Unix, the drive is always empty.
def splitdrive(p):
"""Split a pathname into drive and path. On Posix, drive is always
empty."""
return p[:0], p
# Return the tail (basename) part of a path, same as split(path)[1].
def basename(p):
"""Returns the final component of a pathname"""
sep = _get_sep(p)
i = p.rfind(sep) + 1
return p[i:]
# Return the head (dirname) part of a path, same as split(path)[0].
def dirname(p):
"""Returns the directory component of a pathname"""
sep = _get_sep(p)
i = p.rfind(sep) + 1
head = p[:i]
if head and head != sep*len(head):
head = head.rstrip(sep)
return head
# Is a path a symbolic link?
# This will always return false on systems where os.lstat doesn't exist.
def islink(path):
"""Test whether a path is a symbolic link"""
try:
st = os.lstat(path)
except (os.error, AttributeError):
return False
return stat.S_ISLNK(st.st_mode)
# Being true for dangling symbolic links is also useful.
def lexists(path):
"""Test whether a path exists. Returns True for broken symbolic links"""
try:
os.lstat(path)
except os.error:
return False
return True
# Are two filenames really pointing to the same file?
def samefile(f1, f2):
"""Test whether two pathnames reference the same actual file"""
s1 = os.stat(f1)
s2 = os.stat(f2)
return samestat(s1, s2)
# Are two open files really referencing the same file?
# (Not necessarily the same file descriptor!)
def sameopenfile(fp1, fp2):
"""Test whether two open file objects reference the same file"""
s1 = os.fstat(fp1)
s2 = os.fstat(fp2)
return samestat(s1, s2)
# Are two stat buffers (obtained from stat, fstat or lstat)
# describing the same file?
def samestat(s1, s2):
"""Test whether two stat buffers reference the same file"""
return s1.st_ino == s2.st_ino and \
s1.st_dev == s2.st_dev
# Is a path a mount point?
# (Does this work for all UNIXes? Is it even guaranteed to work by Posix?)
def ismount(path):
"""Test whether a path is a mount point"""
if islink(path):
# A symlink can never be a mount point
return False
try:
s1 = os.lstat(path)
if isinstance(path, bytes):
parent = join(path, b'..')
else:
parent = join(path, '..')
s2 = os.lstat(parent)
except os.error:
return False # It doesn't exist -- so not a mount point :-)
dev1 = s1.st_dev
dev2 = s2.st_dev
if dev1 != dev2:
return True # path/.. on a different device as path
ino1 = s1.st_ino
ino2 = s2.st_ino
if ino1 == ino2:
return True # path/.. is the same i-node as path
return False
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
do nothing."""
if isinstance(path, bytes):
tilde = b'~'
else:
tilde = '~'
if not path.startswith(tilde):
return path
sep = _get_sep(path)
i = path.find(sep, 1)
if i < 0:
i = len(path)
if i == 1:
if 'HOME' not in os.environ:
import pwd
userhome = pwd.getpwuid(os.getuid()).pw_dir
else:
userhome = os.environ['HOME']
else:
import pwd
name = path[1:i]
if isinstance(name, bytes):
name = str(name, 'ASCII')
try:
pwent = pwd.getpwnam(name)
except KeyError:
return path
userhome = pwent.pw_dir
if isinstance(path, bytes):
userhome = os.fsencode(userhome)
root = b'/'
else:
root = '/'
userhome = userhome.rstrip(root)
return (userhome + path[i:]) or root
# Expand paths containing shell variable substitutions.
# This expands the forms $variable and ${variable} only.
# Non-existent variables are left unchanged.
_varprog = None
_varprogb = None
def expandvars(path):
"""Expand shell variables of form $var and ${var}. Unknown variables
are left unchanged."""
global _varprog, _varprogb
if isinstance(path, bytes):
if b'$' not in path:
return path
if not _varprogb:
import re
_varprogb = re.compile(br'\$(\w+|\{[^}]*\})', re.ASCII)
search = _varprogb.search
start = b'{'
end = b'}'
else:
if '$' not in path:
return path
if not _varprog:
import re
_varprog = re.compile(r'\$(\w+|\{[^}]*\})', re.ASCII)
search = _varprog.search
start = '{'
end = '}'
i = 0
while True:
m = search(path, i)
if not m:
break
i, j = m.span(0)
name = m.group(1)
if name.startswith(start) and name.endswith(end):
name = name[1:-1]
if isinstance(name, bytes):
name = str(name, 'ASCII')
if name in os.environ:
tail = path[j:]
value = os.environ[name]
if isinstance(path, bytes):
value = value.encode('ASCII')
path = path[:i] + value
i = len(path)
path += tail
else:
i = j
return path
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B.
# It should be understood that this may change the meaning of the path
# if it contains symbolic links!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
if isinstance(path, bytes):
sep = b'/'
empty = b''
dot = b'.'
dotdot = b'..'
else:
sep = '/'
empty = ''
dot = '.'
dotdot = '..'
if path == empty:
return dot
initial_slashes = path.startswith(sep)
# POSIX allows one or two initial slashes, but treats three or more
# as single slash.
if (initial_slashes and
path.startswith(sep*2) and not path.startswith(sep*3)):
initial_slashes = 2
comps = path.split(sep)
new_comps = []
for comp in comps:
if comp in (empty, dot):
continue
if (comp != dotdot or (not initial_slashes and not new_comps) or
(new_comps and new_comps[-1] == dotdot)):
new_comps.append(comp)
elif new_comps:
new_comps.pop()
comps = new_comps
path = sep.join(comps)
if initial_slashes:
path = sep*initial_slashes + path
return path or dot
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
if isinstance(path, bytes):
cwd = os.getcwdb()
else:
cwd = os.getcwd()
path = join(cwd, path)
return normpath(path)
# Return a canonical path (i.e. the absolute location of a file on the
# filesystem).
def realpath(filename):
"""Return the canonical path of the specified filename, eliminating any
symbolic links encountered in the path."""
path, ok = _joinrealpath(filename[:0], filename, {})
return abspath(path)
# Join two paths, normalizing ang eliminating any symbolic links
# encountered in the second path.
def _joinrealpath(path, rest, seen):
if isinstance(path, bytes):
sep = b'/'
curdir = b'.'
pardir = b'..'
else:
sep = '/'
curdir = '.'
pardir = '..'
if isabs(rest):
rest = rest[1:]
path = sep
while rest:
name, _, rest = rest.partition(sep)
if not name or name == curdir:
# current dir
continue
if name == pardir:
# parent dir
if path:
path, name = split(path)
if name == pardir:
path = join(path, pardir, pardir)
else:
path = pardir
continue
newpath = join(path, name)
if not islink(newpath):
path = newpath
continue
# Resolve the symbolic link
if newpath in seen:
# Already seen this path
path = seen[newpath]
if path is not None:
# use cached value
continue
# The symlink is not resolved, so we must have a symlink loop.
# Return already resolved part + rest of the path unchanged.
return join(newpath, rest), False
seen[newpath] = None # not resolved symlink
path, ok = _joinrealpath(path, os.readlink(newpath), seen)
if not ok:
return join(path, rest), False
seen[newpath] = path # resolved symlink
return path, True
supports_unicode_filenames = (sys.platform == 'darwin')
def relpath(path, start=None):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
if isinstance(path, bytes):
curdir = b'.'
sep = b'/'
pardir = b'..'
else:
curdir = '.'
sep = '/'
pardir = '..'
if start is None:
start = curdir
start_list = [x for x in abspath(start).split(sep) if x]
path_list = [x for x in abspath(path).split(sep) if x]
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
| agpl-3.0 |
tectronics/pyresample | setup.py | 3 | 1830 | #pyresample, Resampling of remote sensing image data in python
#
#Copyright (C) 2012 Esben S. Nielsen
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from setuptools import setup
import sys
import os
import imp
version = imp.load_source('pyresample.version', 'pyresample/version.py')
requirements = ['pyproj', 'numpy', 'configobj']
extras_require = {'pykdtree': ['pykdtree'], 'numexpr': ['numexpr']}
if sys.version_info < (2, 6):
# multiprocessing is not in the standard library
requirements.append('multiprocessing')
setup(name='pyresample',
version=version.__version__,
description='Resampling of remote sensing data in Python',
author='Esben S. Nielsen',
author_email='esn@dmi.dk',
package_dir = {'pyresample': 'pyresample'},
packages = ['pyresample'],
install_requires=requirements,
extras_require = extras_require,
zip_safe = False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering'
]
)
| gpl-3.0 |
amenonsen/ansible | lib/ansible/modules/identity/ipa/ipa_user.py | 30 | 12363 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ipa_user
author: Thomas Krahn (@Nosmoht)
short_description: Manage FreeIPA users
description:
- Add, modify and delete user within IPA server
options:
displayname:
description: Display name
update_password:
description:
- Set password for a user.
type: str
default: 'always'
choices: [ always, on_create ]
version_added: 2.8
givenname:
description: First name
krbpasswordexpiration:
description:
- Date at which the user password will expire
- In the format YYYYMMddHHmmss
- e.g. 20180121182022 will expire on 21 January 2018 at 18:20:22
version_added: 2.5
loginshell:
description: Login shell
mail:
description:
- List of mail addresses assigned to the user.
- If an empty list is passed all assigned email addresses will be deleted.
- If None is passed email addresses will not be checked or changed.
password:
description:
- Password for a user. Will not be set for an existing user unless C(update_password) is set to C(always), which is the default.
sn:
description: Surname
sshpubkey:
description:
- List of public SSH key.
- If an empty list is passed all assigned public keys will be deleted.
- If None is passed SSH public keys will not be checked or changed.
state:
description: State to ensure
default: "present"
choices: ["present", "absent", "enabled", "disabled"]
telephonenumber:
description:
- List of telephone numbers assigned to the user.
- If an empty list is passed all assigned telephone numbers will be deleted.
- If None is passed telephone numbers will not be checked or changed.
title:
description: Title
uid:
description: uid of the user
required: true
aliases: ["name"]
uidnumber:
description:
- Account Settings UID/Posix User ID number
version_added: 2.5
gidnumber:
description:
- Posix Group ID
version_added: 2.5
extends_documentation_fragment: ipa.documentation
version_added: "2.3"
requirements:
- base64
- hashlib
'''
EXAMPLES = '''
# Ensure pinky is present and always reset password
- ipa_user:
name: pinky
state: present
krbpasswordexpiration: 20200119235959
givenname: Pinky
sn: Acme
mail:
- pinky@acme.com
telephonenumber:
- '+555123456'
sshpubkey:
- ssh-rsa ....
- ssh-dsa ....
uidnumber: 1001
gidnumber: 100
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
# Ensure brain is absent
- ipa_user:
name: brain
state: absent
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
# Ensure pinky is present but don't reset password if already exists
- ipa_user:
name: pinky
state: present
givenname: Pinky
sn: Acme
password: zounds
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
update_password: on_create
'''
RETURN = '''
user:
description: User as returned by IPA API
returned: always
type: dict
'''
import base64
import hashlib
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ipa import IPAClient, ipa_argument_spec
from ansible.module_utils._text import to_native
class UserIPAClient(IPAClient):
def __init__(self, module, host, port, protocol):
super(UserIPAClient, self).__init__(module, host, port, protocol)
def user_find(self, name):
return self._post_json(method='user_find', name=None, item={'all': True, 'uid': name})
def user_add(self, name, item):
return self._post_json(method='user_add', name=name, item=item)
def user_mod(self, name, item):
return self._post_json(method='user_mod', name=name, item=item)
def user_del(self, name):
return self._post_json(method='user_del', name=name)
def user_disable(self, name):
return self._post_json(method='user_disable', name=name)
def user_enable(self, name):
return self._post_json(method='user_enable', name=name)
def get_user_dict(displayname=None, givenname=None, krbpasswordexpiration=None, loginshell=None,
mail=None, nsaccountlock=False, sn=None, sshpubkey=None, telephonenumber=None,
title=None, userpassword=None, gidnumber=None, uidnumber=None):
user = {}
if displayname is not None:
user['displayname'] = displayname
if krbpasswordexpiration is not None:
user['krbpasswordexpiration'] = krbpasswordexpiration + "Z"
if givenname is not None:
user['givenname'] = givenname
if loginshell is not None:
user['loginshell'] = loginshell
if mail is not None:
user['mail'] = mail
user['nsaccountlock'] = nsaccountlock
if sn is not None:
user['sn'] = sn
if sshpubkey is not None:
user['ipasshpubkey'] = sshpubkey
if telephonenumber is not None:
user['telephonenumber'] = telephonenumber
if title is not None:
user['title'] = title
if userpassword is not None:
user['userpassword'] = userpassword
if gidnumber is not None:
user['gidnumber'] = gidnumber
if uidnumber is not None:
user['uidnumber'] = uidnumber
return user
def get_user_diff(client, ipa_user, module_user):
"""
Return the keys of each dict whereas values are different. Unfortunately the IPA
API returns everything as a list even if only a single value is possible.
Therefore some more complexity is needed.
The method will check if the value type of module_user.attr is not a list and
create a list with that element if the same attribute in ipa_user is list. In this way I hope that the method
must not be changed if the returned API dict is changed.
:param ipa_user:
:param module_user:
:return:
"""
# sshpubkeyfp is the list of ssh key fingerprints. IPA doesn't return the keys itself but instead the fingerprints.
# These are used for comparison.
sshpubkey = None
if 'ipasshpubkey' in module_user:
hash_algo = 'md5'
if 'sshpubkeyfp' in ipa_user and ipa_user['sshpubkeyfp'][0][:7].upper() == 'SHA256:':
hash_algo = 'sha256'
module_user['sshpubkeyfp'] = [get_ssh_key_fingerprint(pubkey, hash_algo) for pubkey in module_user['ipasshpubkey']]
# Remove the ipasshpubkey element as it is not returned from IPA but save it's value to be used later on
sshpubkey = module_user['ipasshpubkey']
del module_user['ipasshpubkey']
result = client.get_diff(ipa_data=ipa_user, module_data=module_user)
# If there are public keys, remove the fingerprints and add them back to the dict
if sshpubkey is not None:
del module_user['sshpubkeyfp']
module_user['ipasshpubkey'] = sshpubkey
return result
def get_ssh_key_fingerprint(ssh_key, hash_algo='sha256'):
"""
Return the public key fingerprint of a given public SSH key
in format "[fp] [user@host] (ssh-rsa)" where fp is of the format:
FB:0C:AC:0A:07:94:5B:CE:75:6E:63:32:13:AD:AD:D7
for md5 or
SHA256:[base64]
for sha256
:param ssh_key:
:param hash_algo:
:return:
"""
parts = ssh_key.strip().split()
if len(parts) == 0:
return None
key_type = parts[0]
key = base64.b64decode(parts[1].encode('ascii'))
if hash_algo == 'md5':
fp_plain = hashlib.md5(key).hexdigest()
key_fp = ':'.join(a + b for a, b in zip(fp_plain[::2], fp_plain[1::2])).upper()
elif hash_algo == 'sha256':
fp_plain = base64.b64encode(hashlib.sha256(key).digest()).decode('ascii').rstrip('=')
key_fp = 'SHA256:{fp}'.format(fp=fp_plain)
if len(parts) < 3:
return "%s (%s)" % (key_fp, key_type)
else:
user_host = parts[2]
return "%s %s (%s)" % (key_fp, user_host, key_type)
def ensure(module, client):
state = module.params['state']
name = module.params['uid']
nsaccountlock = state == 'disabled'
module_user = get_user_dict(displayname=module.params.get('displayname'),
krbpasswordexpiration=module.params.get('krbpasswordexpiration'),
givenname=module.params.get('givenname'),
loginshell=module.params['loginshell'],
mail=module.params['mail'], sn=module.params['sn'],
sshpubkey=module.params['sshpubkey'], nsaccountlock=nsaccountlock,
telephonenumber=module.params['telephonenumber'], title=module.params['title'],
userpassword=module.params['password'],
gidnumber=module.params.get('gidnumber'), uidnumber=module.params.get('uidnumber'))
update_password = module.params.get('update_password')
ipa_user = client.user_find(name=name)
changed = False
if state in ['present', 'enabled', 'disabled']:
if not ipa_user:
changed = True
if not module.check_mode:
ipa_user = client.user_add(name=name, item=module_user)
else:
if update_password == 'on_create':
module_user.pop('userpassword', None)
diff = get_user_diff(client, ipa_user, module_user)
if len(diff) > 0:
changed = True
if not module.check_mode:
ipa_user = client.user_mod(name=name, item=module_user)
else:
if ipa_user:
changed = True
if not module.check_mode:
client.user_del(name)
return changed, ipa_user
def main():
argument_spec = ipa_argument_spec()
argument_spec.update(displayname=dict(type='str'),
givenname=dict(type='str'),
update_password=dict(type='str', default="always",
choices=['always', 'on_create']),
krbpasswordexpiration=dict(type='str'),
loginshell=dict(type='str'),
mail=dict(type='list'),
sn=dict(type='str'),
uid=dict(type='str', required=True, aliases=['name']),
gidnumber=dict(type='str'),
uidnumber=dict(type='str'),
password=dict(type='str', no_log=True),
sshpubkey=dict(type='list'),
state=dict(type='str', default='present',
choices=['present', 'absent', 'enabled', 'disabled']),
telephonenumber=dict(type='list'),
title=dict(type='str'))
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
client = UserIPAClient(module=module,
host=module.params['ipa_host'],
port=module.params['ipa_port'],
protocol=module.params['ipa_prot'])
# If sshpubkey is defined as None than module.params['sshpubkey'] is [None]. IPA itself returns None (not a list).
# Therefore a small check here to replace list(None) by None. Otherwise get_user_diff() would return sshpubkey
# as different which should be avoided.
if module.params['sshpubkey'] is not None:
if len(module.params['sshpubkey']) == 1 and module.params['sshpubkey'][0] == "":
module.params['sshpubkey'] = None
try:
client.login(username=module.params['ipa_user'],
password=module.params['ipa_pass'])
changed, user = ensure(module, client)
module.exit_json(changed=changed, user=user)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| gpl-3.0 |
gudcjfdldu/volatility | volatility/plugins/gui/eventhooks.py | 58 | 2622 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
# Copyright (C) 2010,2011,2012 Michael Hale Ligh <michael.ligh@mnin.org>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import volatility.plugins.gui.sessions as sessions
class EventHooks(sessions.Sessions):
"""Print details on windows event hooks"""
def render_text(self, outfd, data):
for session in data:
shared_info = session.find_shared_info()
if not shared_info:
continue
filters = [lambda x : str(x.bType) == "TYPE_WINEVENTHOOK"]
for handle in shared_info.handles(filters):
outfd.write("Handle: {0:#x}, Object: {1:#x}, Session: {2}\n".format(
handle.phead.h if handle.phead else 0,
handle.phead.v(),
session.SessionId))
outfd.write("Type: {0}, Flags: {1}, Thread: {2}, Process: {3}\n".format(
handle.bType,
handle.bFlags,
handle.Thread.Cid.UniqueThread,
handle.Process.UniqueProcessId,
))
event_hook = handle.reference_object()
outfd.write("eventMin: {0:#x} {1}\neventMax: {2:#x} {3}\n".format(
event_hook.eventMin.v(),
str(event_hook.eventMin),
event_hook.eventMax.v(),
str(event_hook.eventMax),
))
outfd.write("Flags: {0}, offPfn: {1:#x}, idProcess: {2}, idThread: {3}\n".format(
event_hook.dwFlags,
event_hook.offPfn,
event_hook.idProcess,
event_hook.idThread,
))
## Work out the WindowStation\Desktop path by the handle
## owner (thread or process)
outfd.write("ihmod: {0}\n".format(event_hook.ihmod))
outfd.write("\n")
| gpl-2.0 |
songmonit/CTTMSONLINE | addons/hr_attendance/__init__.py | 434 | 1122 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_attendance
import wizard
import report
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sogelink/ansible | lib/ansible/modules/cloud/amazon/iam_managed_policy.py | 13 | 15764 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: iam_managed_policy
short_description: Manage User Managed IAM policies
description:
- Allows creating and removing managed IAM policies
version_added: "2.4"
options:
policy_name:
description:
- The name of the managed policy.
required: True
policy_description:
description:
- A helpful description of this policy, this value is immuteable and only set when creating a new policy.
default: ''
policy:
description:
- A properly json formatted policy
make_default:
description:
- Make this revision the default revision.
default: True
only_version:
description:
- Remove all other non default revisions, if this is used with C(make_default) it will result in all other versions of this policy being deleted.
required: False
default: False
state:
description:
- Should this managed policy be present or absent. Set to absent to detach all entities from this policy and remove it if found.
required: True
default: null
choices: [ "present", "absent" ]
author: "Dan Kozlowski (@dkhenry)"
requirements:
- boto3
- botocore
'''
EXAMPLES = '''
# Create Policy ex nihilo
- name: Create IAM Managed Policy
iam_managed_policy:
policy_name: "ManagedPolicy"
policy_description: "A Helpful managed policy"
policy: "{{ lookup('template', 'managed_policy.json.j2') }}"
state: present
# Update a policy with a new default version
- name: Create IAM Managed Policy
iam_managed_policy:
policy_name: "ManagedPolicy"
policy: "{{ lookup('file', 'managed_policy_update.json') }}"
state: present
# Update a policy with a new non default version
- name: Create IAM Managed Policy
iam_managed_policy:
policy_name: "ManagedPolicy"
policy: "{{ lookup('file', 'managed_policy_update.json') }}"
make_default: false
state: present
# Update a policy and make it the only version and the default version
- name: Create IAM Managed Policy
iam_managed_policy:
policy_name: "ManagedPolicy"
policy: "{ 'Version': '2012-10-17', 'Statement':[{'Effect': 'Allow','Action': '*','Resource': '*'}]}"
only_version: true
state: present
# Remove a policy
- name: Create IAM Managed Policy
iam_managed_policy:
policy_name: "ManagedPolicy"
state: absent
'''
RETURN = '''
policy:
description: Returns the policy json structure, when state == absent this will return the value of the removed policy.
returned: success
type: string
sample: '{
"arn": "arn:aws:iam::aws:policy/AdministratorAccess "
"attachment_count": 0,
"create_date": "2017-03-01T15:42:55.981000+00:00",
"default_version_id": "v1",
"is_attachable": true,
"path": "/",
"policy_id": "ANPALM4KLDMTFXGOOJIHL",
"policy_name": "AdministratorAccess",
"update_date": "2017-03-01T15:42:55.981000+00:00"
}'
'''
import json
import traceback
try:
import botocore
except ImportError:
pass # caught by imported HAS_BOTO3
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (boto3_conn, get_aws_connection_info, ec2_argument_spec, AWSRetry,
sort_json_policy_dict, camel_dict_to_snake_dict, HAS_BOTO3)
from ansible.module_utils._text import to_native
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def list_policies_with_backoff(iam):
paginator = iam.get_paginator('list_policies')
return paginator.paginate(Scope='Local').build_full_result()
def get_policy_by_name(module, iam, name):
try:
response = list_policies_with_backoff(iam)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't list policies: %s" % str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
for policy in response['Policies']:
if policy['PolicyName'] == name:
return policy
return None
def delete_oldest_non_default_version(module, iam, policy):
try:
versions = [v for v in iam.list_policy_versions(PolicyArn=policy['Arn'])['Versions']
if not v['IsDefaultVersion']]
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't list policy versions: %s" % str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
versions.sort(key=lambda v: v['CreateDate'], reverse=True)
for v in versions[-1:]:
try:
iam.delete_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId'])
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't delete policy version: %s" % str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
# This needs to return policy_version, changed
def get_or_create_policy_version(module, iam, policy, policy_document):
try:
versions = iam.list_policy_versions(PolicyArn=policy['Arn'])['Versions']
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't list policy versions: %s" % str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
for v in versions:
try:
document = iam.get_policy_version(PolicyArn=policy['Arn'],
VersionId=v['VersionId'])['PolicyVersion']['Document']
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't get policy version %s: %s" % (v['VersionId'], str(e)),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
if sort_json_policy_dict(document) == sort_json_policy_dict(
json.loads(policy_document)):
return v, False
# No existing version so create one
# There is a service limit (typically 5) of policy versions.
#
# Rather than assume that it is 5, we'll try to create the policy
# and if that doesn't work, delete the oldest non default policy version
# and try again.
try:
version = iam.create_policy_version(PolicyArn=policy['Arn'], PolicyDocument=policy_document)['PolicyVersion']
return version, True
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'LimitExceeded':
delete_oldest_non_default_version(module, iam, policy)
try:
version = iam.create_policy_version(PolicyArn=policy['Arn'], PolicyDocument=policy_document)['PolicyVersion']
return version, True
except botocore.exceptions.ClientError as e:
pass
# Handle both when the exception isn't LimitExceeded or
# the second attempt still failed
module.fail_json(msg="Couldn't create policy version: %s" % str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def set_if_default(module, iam, policy, policy_version, is_default):
if is_default and not policy_version['IsDefaultVersion']:
try:
iam.set_default_policy_version(PolicyArn=policy['Arn'], VersionId=policy_version['VersionId'])
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't set default policy version: %s" % str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
return True
return False
def set_if_only(module, iam, policy, policy_version, is_only):
if is_only:
try:
versions = [v for v in iam.list_policy_versions(PolicyArn=policy['Arn'])[
'Versions'] if not v['IsDefaultVersion']]
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't list policy versions: %s" % str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
for v in versions:
try:
iam.delete_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId'])
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't delete policy version: %s" % str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
return len(versions) > 0
return False
def detach_all_entities(module, iam, policy, **kwargs):
try:
entities = iam.list_entities_for_policy(PolicyArn=policy['Arn'], **kwargs)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't detach list entities for policy %s: %s" % (policy['PolicyName'], str(e)),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
for g in entities['PolicyGroups']:
try:
iam.detach_group_policy(PolicyArn=policy['Arn'], GroupName=g['GroupName'])
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't detach group policy %s: %s" % (g['GroupName'], str(e)),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
for u in entities['PolicyUsers']:
try:
iam.detach_user_policy(PolicyArn=policy['Arn'], UserName=u['UserName'])
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't detach user policy %s: %s" % (u['UserName'], str(e)),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
for r in entities['PolicyRoles']:
try:
iam.detach_role_policy(PolicyArn=policy['Arn'], RoleName=r['RoleName'])
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't detach role policy %s: %s" % (r['RoleName'], str(e)),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
if entities['IsTruncated']:
detach_all_entities(module, iam, policy, marker=entities['Marker'])
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
policy_name=dict(required=True),
policy_description=dict(default=''),
policy=dict(type='json'),
make_default=dict(type='bool', default=True),
only_version=dict(type='bool', default=False),
fail_on_delete=dict(type='bool', default=True),
state=dict(required=True, choices=['present', 'absent']),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_if=[['state', 'present', ['policy']]]
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required for this module')
name = module.params.get('policy_name')
description = module.params.get('policy_description')
state = module.params.get('state')
default = module.params.get('make_default')
only = module.params.get('only_version')
policy = None
if module.params.get('policy') is not None:
policy = json.dumps(json.loads(module.params.get('policy')))
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
iam = boto3_conn(module, conn_type='client', resource='iam',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
except (botocore.exceptions.NoCredentialsError, botocore.exceptions.ProfileNotFound) as e:
module.fail_json(msg="Can't authorize connection. Check your credentials and profile.",
exceptions=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
p = get_policy_by_name(module, iam, name)
if state == 'present':
if p is None:
# No Policy so just create one
try:
rvalue = iam.create_policy(PolicyName=name, Path='/',
PolicyDocument=policy, Description=description)
except Exception as e:
module.fail_json(msg="Couldn't create policy %s: %s" % (name, to_native(e)),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
module.exit_json(changed=True, policy=camel_dict_to_snake_dict(rvalue['Policy']))
else:
policy_version, changed = get_or_create_policy_version(module, iam, p, policy)
changed = set_if_default(module, iam, p, policy_version, default) or changed
changed = set_if_only(module, iam, p, policy_version, only) or changed
# If anything has changed we needto refresh the policy
if changed:
try:
p = iam.get_policy(PolicyArn=p['Arn'])['Policy']
except Exception as e:
module.fail_json(msg="Couldn't get policy: %s" % to_native(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
module.exit_json(changed=changed, policy=camel_dict_to_snake_dict(p))
else:
# Check for existing policy
if p:
# Detach policy
detach_all_entities(module, iam, p)
# Delete Versions
try:
versions = iam.list_policy_versions(PolicyArn=p['Arn'])['Versions']
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't list policy versions: %s" % to_native(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
for v in versions:
if not v['IsDefaultVersion']:
try:
iam.delete_policy_version(PolicyArn=p['Arn'], VersionId=v['VersionId'])
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't delete policy version %s: %s" %
(v['VersionId'], to_native(e)),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
# Delete policy
try:
iam.delete_policy(PolicyArn=p['Arn'])
except Exception as e:
module.fail_json(msg="Couldn't delete policy %s: %s" % (p['PolicyName'], to_native(e)),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
# This is the one case where we will return the old policy
module.exit_json(changed=True, policy=camel_dict_to_snake_dict(p))
else:
module.exit_json(changed=False, policy=None)
# end main
if __name__ == '__main__':
main()
| gpl-3.0 |
jaggu303619/asylum | openerp/addons/account_asset/wizard/__init__.py | 445 | 1122 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_asset_change_duration
import wizard_asset_compute
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
RandallDW/Aruba_plugin | plugins/org.python.pydev.jython/Lib/fractions.py | 252 | 22390 | # Originally contributed by Sjoerd Mullender.
# Significantly modified by Jeffrey Yasskin <jyasskin at gmail.com>.
"""Rational, infinite-precision, real numbers."""
from __future__ import division
from decimal import Decimal
import math
import numbers
import operator
import re
__all__ = ['Fraction', 'gcd']
Rational = numbers.Rational
def gcd(a, b):
"""Calculate the Greatest Common Divisor of a and b.
Unless b==0, the result will have the same sign as b (so that when
b is divided by it, the result comes out positive).
"""
while b:
a, b = b, a%b
return a
_RATIONAL_FORMAT = re.compile(r"""
\A\s* # optional whitespace at the start, then
(?P<sign>[-+]?) # an optional sign, then
(?=\d|\.\d) # lookahead for digit or .digit
(?P<num>\d*) # numerator (possibly empty)
(?: # followed by
(?:/(?P<denom>\d+))? # an optional denominator
| # or
(?:\.(?P<decimal>\d*))? # an optional fractional part
(?:E(?P<exp>[-+]?\d+))? # and optional exponent
)
\s*\Z # and optional whitespace to finish
""", re.VERBOSE | re.IGNORECASE)
class Fraction(Rational):
"""This class implements rational numbers.
In the two-argument form of the constructor, Fraction(8, 6) will
produce a rational number equivalent to 4/3. Both arguments must
be Rational. The numerator defaults to 0 and the denominator
defaults to 1 so that Fraction(3) == 3 and Fraction() == 0.
Fractions can also be constructed from:
- numeric strings similar to those accepted by the
float constructor (for example, '-2.3' or '1e10')
- strings of the form '123/456'
- float and Decimal instances
- other Rational instances (including integers)
"""
__slots__ = ('_numerator', '_denominator')
# We're immutable, so use __new__ not __init__
def __new__(cls, numerator=0, denominator=None):
"""Constructs a Fraction.
Takes a string like '3/2' or '1.5', another Rational instance, a
numerator/denominator pair, or a float.
Examples
--------
>>> Fraction(10, -8)
Fraction(-5, 4)
>>> Fraction(Fraction(1, 7), 5)
Fraction(1, 35)
>>> Fraction(Fraction(1, 7), Fraction(2, 3))
Fraction(3, 14)
>>> Fraction('314')
Fraction(314, 1)
>>> Fraction('-35/4')
Fraction(-35, 4)
>>> Fraction('3.1415') # conversion from numeric string
Fraction(6283, 2000)
>>> Fraction('-47e-2') # string may include a decimal exponent
Fraction(-47, 100)
>>> Fraction(1.47) # direct construction from float (exact conversion)
Fraction(6620291452234629, 4503599627370496)
>>> Fraction(2.25)
Fraction(9, 4)
>>> Fraction(Decimal('1.47'))
Fraction(147, 100)
"""
self = super(Fraction, cls).__new__(cls)
if denominator is None:
if isinstance(numerator, Rational):
self._numerator = numerator.numerator
self._denominator = numerator.denominator
return self
elif isinstance(numerator, float):
# Exact conversion from float
value = Fraction.from_float(numerator)
self._numerator = value._numerator
self._denominator = value._denominator
return self
elif isinstance(numerator, Decimal):
value = Fraction.from_decimal(numerator)
self._numerator = value._numerator
self._denominator = value._denominator
return self
elif isinstance(numerator, basestring):
# Handle construction from strings.
m = _RATIONAL_FORMAT.match(numerator)
if m is None:
raise ValueError('Invalid literal for Fraction: %r' %
numerator)
numerator = int(m.group('num') or '0')
denom = m.group('denom')
if denom:
denominator = int(denom)
else:
denominator = 1
decimal = m.group('decimal')
if decimal:
scale = 10**len(decimal)
numerator = numerator * scale + int(decimal)
denominator *= scale
exp = m.group('exp')
if exp:
exp = int(exp)
if exp >= 0:
numerator *= 10**exp
else:
denominator *= 10**-exp
if m.group('sign') == '-':
numerator = -numerator
else:
raise TypeError("argument should be a string "
"or a Rational instance")
elif (isinstance(numerator, Rational) and
isinstance(denominator, Rational)):
numerator, denominator = (
numerator.numerator * denominator.denominator,
denominator.numerator * numerator.denominator
)
else:
raise TypeError("both arguments should be "
"Rational instances")
if denominator == 0:
raise ZeroDivisionError('Fraction(%s, 0)' % numerator)
g = gcd(numerator, denominator)
self._numerator = numerator // g
self._denominator = denominator // g
return self
@classmethod
def from_float(cls, f):
"""Converts a finite float to a rational number, exactly.
Beware that Fraction.from_float(0.3) != Fraction(3, 10).
"""
if isinstance(f, numbers.Integral):
return cls(f)
elif not isinstance(f, float):
raise TypeError("%s.from_float() only takes floats, not %r (%s)" %
(cls.__name__, f, type(f).__name__))
if math.isnan(f) or math.isinf(f):
raise TypeError("Cannot convert %r to %s." % (f, cls.__name__))
return cls(*f.as_integer_ratio())
@classmethod
def from_decimal(cls, dec):
"""Converts a finite Decimal instance to a rational number, exactly."""
from decimal import Decimal
if isinstance(dec, numbers.Integral):
dec = Decimal(int(dec))
elif not isinstance(dec, Decimal):
raise TypeError(
"%s.from_decimal() only takes Decimals, not %r (%s)" %
(cls.__name__, dec, type(dec).__name__))
if not dec.is_finite():
# Catches infinities and nans.
raise TypeError("Cannot convert %s to %s." % (dec, cls.__name__))
sign, digits, exp = dec.as_tuple()
digits = int(''.join(map(str, digits)))
if sign:
digits = -digits
if exp >= 0:
return cls(digits * 10 ** exp)
else:
return cls(digits, 10 ** -exp)
def limit_denominator(self, max_denominator=1000000):
"""Closest Fraction to self with denominator at most max_denominator.
>>> Fraction('3.141592653589793').limit_denominator(10)
Fraction(22, 7)
>>> Fraction('3.141592653589793').limit_denominator(100)
Fraction(311, 99)
>>> Fraction(4321, 8765).limit_denominator(10000)
Fraction(4321, 8765)
"""
# Algorithm notes: For any real number x, define a *best upper
# approximation* to x to be a rational number p/q such that:
#
# (1) p/q >= x, and
# (2) if p/q > r/s >= x then s > q, for any rational r/s.
#
# Define *best lower approximation* similarly. Then it can be
# proved that a rational number is a best upper or lower
# approximation to x if, and only if, it is a convergent or
# semiconvergent of the (unique shortest) continued fraction
# associated to x.
#
# To find a best rational approximation with denominator <= M,
# we find the best upper and lower approximations with
# denominator <= M and take whichever of these is closer to x.
# In the event of a tie, the bound with smaller denominator is
# chosen. If both denominators are equal (which can happen
# only when max_denominator == 1 and self is midway between
# two integers) the lower bound---i.e., the floor of self, is
# taken.
if max_denominator < 1:
raise ValueError("max_denominator should be at least 1")
if self._denominator <= max_denominator:
return Fraction(self)
p0, q0, p1, q1 = 0, 1, 1, 0
n, d = self._numerator, self._denominator
while True:
a = n//d
q2 = q0+a*q1
if q2 > max_denominator:
break
p0, q0, p1, q1 = p1, q1, p0+a*p1, q2
n, d = d, n-a*d
k = (max_denominator-q0)//q1
bound1 = Fraction(p0+k*p1, q0+k*q1)
bound2 = Fraction(p1, q1)
if abs(bound2 - self) <= abs(bound1-self):
return bound2
else:
return bound1
@property
def numerator(a):
return a._numerator
@property
def denominator(a):
return a._denominator
def __repr__(self):
"""repr(self)"""
return ('Fraction(%s, %s)' % (self._numerator, self._denominator))
def __str__(self):
"""str(self)"""
if self._denominator == 1:
return str(self._numerator)
else:
return '%s/%s' % (self._numerator, self._denominator)
def _operator_fallbacks(monomorphic_operator, fallback_operator):
"""Generates forward and reverse operators given a purely-rational
operator and a function from the operator module.
Use this like:
__op__, __rop__ = _operator_fallbacks(just_rational_op, operator.op)
In general, we want to implement the arithmetic operations so
that mixed-mode operations either call an implementation whose
author knew about the types of both arguments, or convert both
to the nearest built in type and do the operation there. In
Fraction, that means that we define __add__ and __radd__ as:
def __add__(self, other):
# Both types have numerators/denominator attributes,
# so do the operation directly
if isinstance(other, (int, long, Fraction)):
return Fraction(self.numerator * other.denominator +
other.numerator * self.denominator,
self.denominator * other.denominator)
# float and complex don't have those operations, but we
# know about those types, so special case them.
elif isinstance(other, float):
return float(self) + other
elif isinstance(other, complex):
return complex(self) + other
# Let the other type take over.
return NotImplemented
def __radd__(self, other):
# radd handles more types than add because there's
# nothing left to fall back to.
if isinstance(other, Rational):
return Fraction(self.numerator * other.denominator +
other.numerator * self.denominator,
self.denominator * other.denominator)
elif isinstance(other, Real):
return float(other) + float(self)
elif isinstance(other, Complex):
return complex(other) + complex(self)
return NotImplemented
There are 5 different cases for a mixed-type addition on
Fraction. I'll refer to all of the above code that doesn't
refer to Fraction, float, or complex as "boilerplate". 'r'
will be an instance of Fraction, which is a subtype of
Rational (r : Fraction <: Rational), and b : B <:
Complex. The first three involve 'r + b':
1. If B <: Fraction, int, float, or complex, we handle
that specially, and all is well.
2. If Fraction falls back to the boilerplate code, and it
were to return a value from __add__, we'd miss the
possibility that B defines a more intelligent __radd__,
so the boilerplate should return NotImplemented from
__add__. In particular, we don't handle Rational
here, even though we could get an exact answer, in case
the other type wants to do something special.
3. If B <: Fraction, Python tries B.__radd__ before
Fraction.__add__. This is ok, because it was
implemented with knowledge of Fraction, so it can
handle those instances before delegating to Real or
Complex.
The next two situations describe 'b + r'. We assume that b
didn't know about Fraction in its implementation, and that it
uses similar boilerplate code:
4. If B <: Rational, then __radd_ converts both to the
builtin rational type (hey look, that's us) and
proceeds.
5. Otherwise, __radd__ tries to find the nearest common
base ABC, and fall back to its builtin type. Since this
class doesn't subclass a concrete type, there's no
implementation to fall back to, so we need to try as
hard as possible to return an actual value, or the user
will get a TypeError.
"""
def forward(a, b):
if isinstance(b, (int, long, Fraction)):
return monomorphic_operator(a, b)
elif isinstance(b, float):
return fallback_operator(float(a), b)
elif isinstance(b, complex):
return fallback_operator(complex(a), b)
else:
return NotImplemented
forward.__name__ = '__' + fallback_operator.__name__ + '__'
forward.__doc__ = monomorphic_operator.__doc__
def reverse(b, a):
if isinstance(a, Rational):
# Includes ints.
return monomorphic_operator(a, b)
elif isinstance(a, numbers.Real):
return fallback_operator(float(a), float(b))
elif isinstance(a, numbers.Complex):
return fallback_operator(complex(a), complex(b))
else:
return NotImplemented
reverse.__name__ = '__r' + fallback_operator.__name__ + '__'
reverse.__doc__ = monomorphic_operator.__doc__
return forward, reverse
def _add(a, b):
"""a + b"""
return Fraction(a.numerator * b.denominator +
b.numerator * a.denominator,
a.denominator * b.denominator)
__add__, __radd__ = _operator_fallbacks(_add, operator.add)
def _sub(a, b):
"""a - b"""
return Fraction(a.numerator * b.denominator -
b.numerator * a.denominator,
a.denominator * b.denominator)
__sub__, __rsub__ = _operator_fallbacks(_sub, operator.sub)
def _mul(a, b):
"""a * b"""
return Fraction(a.numerator * b.numerator, a.denominator * b.denominator)
__mul__, __rmul__ = _operator_fallbacks(_mul, operator.mul)
def _div(a, b):
"""a / b"""
return Fraction(a.numerator * b.denominator,
a.denominator * b.numerator)
__truediv__, __rtruediv__ = _operator_fallbacks(_div, operator.truediv)
__div__, __rdiv__ = _operator_fallbacks(_div, operator.div)
def __floordiv__(a, b):
"""a // b"""
# Will be math.floor(a / b) in 3.0.
div = a / b
if isinstance(div, Rational):
# trunc(math.floor(div)) doesn't work if the rational is
# more precise than a float because the intermediate
# rounding may cross an integer boundary.
return div.numerator // div.denominator
else:
return math.floor(div)
def __rfloordiv__(b, a):
"""a // b"""
# Will be math.floor(a / b) in 3.0.
div = a / b
if isinstance(div, Rational):
# trunc(math.floor(div)) doesn't work if the rational is
# more precise than a float because the intermediate
# rounding may cross an integer boundary.
return div.numerator // div.denominator
else:
return math.floor(div)
def __mod__(a, b):
"""a % b"""
div = a // b
return a - b * div
def __rmod__(b, a):
"""a % b"""
div = a // b
return a - b * div
def __pow__(a, b):
"""a ** b
If b is not an integer, the result will be a float or complex
since roots are generally irrational. If b is an integer, the
result will be rational.
"""
if isinstance(b, Rational):
if b.denominator == 1:
power = b.numerator
if power >= 0:
return Fraction(a._numerator ** power,
a._denominator ** power)
else:
return Fraction(a._denominator ** -power,
a._numerator ** -power)
else:
# A fractional power will generally produce an
# irrational number.
return float(a) ** float(b)
else:
return float(a) ** b
def __rpow__(b, a):
"""a ** b"""
if b._denominator == 1 and b._numerator >= 0:
# If a is an int, keep it that way if possible.
return a ** b._numerator
if isinstance(a, Rational):
return Fraction(a.numerator, a.denominator) ** b
if b._denominator == 1:
return a ** b._numerator
return a ** float(b)
def __pos__(a):
"""+a: Coerces a subclass instance to Fraction"""
return Fraction(a._numerator, a._denominator)
def __neg__(a):
"""-a"""
return Fraction(-a._numerator, a._denominator)
def __abs__(a):
"""abs(a)"""
return Fraction(abs(a._numerator), a._denominator)
def __trunc__(a):
"""trunc(a)"""
if a._numerator < 0:
return -(-a._numerator // a._denominator)
else:
return a._numerator // a._denominator
def __hash__(self):
"""hash(self)
Tricky because values that are exactly representable as a
float must have the same hash as that float.
"""
# XXX since this method is expensive, consider caching the result
if self._denominator == 1:
# Get integers right.
return hash(self._numerator)
# Expensive check, but definitely correct.
if self == float(self):
return hash(float(self))
else:
# Use tuple's hash to avoid a high collision rate on
# simple fractions.
return hash((self._numerator, self._denominator))
def __eq__(a, b):
"""a == b"""
if isinstance(b, Rational):
return (a._numerator == b.numerator and
a._denominator == b.denominator)
if isinstance(b, numbers.Complex) and b.imag == 0:
b = b.real
if isinstance(b, float):
if math.isnan(b) or math.isinf(b):
# comparisons with an infinity or nan should behave in
# the same way for any finite a, so treat a as zero.
return 0.0 == b
else:
return a == a.from_float(b)
else:
# Since a doesn't know how to compare with b, let's give b
# a chance to compare itself with a.
return NotImplemented
def _richcmp(self, other, op):
"""Helper for comparison operators, for internal use only.
Implement comparison between a Rational instance `self`, and
either another Rational instance or a float `other`. If
`other` is not a Rational instance or a float, return
NotImplemented. `op` should be one of the six standard
comparison operators.
"""
# convert other to a Rational instance where reasonable.
if isinstance(other, Rational):
return op(self._numerator * other.denominator,
self._denominator * other.numerator)
# comparisons with complex should raise a TypeError, for consistency
# with int<->complex, float<->complex, and complex<->complex comparisons.
if isinstance(other, complex):
raise TypeError("no ordering relation is defined for complex numbers")
if isinstance(other, float):
if math.isnan(other) or math.isinf(other):
return op(0.0, other)
else:
return op(self, self.from_float(other))
else:
return NotImplemented
def __lt__(a, b):
"""a < b"""
return a._richcmp(b, operator.lt)
def __gt__(a, b):
"""a > b"""
return a._richcmp(b, operator.gt)
def __le__(a, b):
"""a <= b"""
return a._richcmp(b, operator.le)
def __ge__(a, b):
"""a >= b"""
return a._richcmp(b, operator.ge)
def __nonzero__(a):
"""a != 0"""
return a._numerator != 0
# support for pickling, copy, and deepcopy
def __reduce__(self):
return (self.__class__, (str(self),))
def __copy__(self):
if type(self) == Fraction:
return self # I'm immutable; therefore I am my own clone
return self.__class__(self._numerator, self._denominator)
def __deepcopy__(self, memo):
if type(self) == Fraction:
return self # My components are also immutable
return self.__class__(self._numerator, self._denominator)
| epl-1.0 |
labcodes/django | tests/annotations/models.py | 90 | 2591 | from django.db import models
class Author(models.Model):
name = models.CharField(max_length=100)
age = models.IntegerField()
friends = models.ManyToManyField('self', blank=True)
def __str__(self):
return self.name
class Publisher(models.Model):
name = models.CharField(max_length=255)
num_awards = models.IntegerField()
def __str__(self):
return self.name
class Book(models.Model):
isbn = models.CharField(max_length=9)
name = models.CharField(max_length=255)
pages = models.IntegerField()
rating = models.FloatField()
price = models.DecimalField(decimal_places=2, max_digits=6)
authors = models.ManyToManyField(Author)
contact = models.ForeignKey(Author, models.CASCADE, related_name='book_contact_set')
publisher = models.ForeignKey(Publisher, models.CASCADE)
pubdate = models.DateField()
def __str__(self):
return self.name
class Store(models.Model):
name = models.CharField(max_length=255)
books = models.ManyToManyField(Book)
original_opening = models.DateTimeField()
friday_night_closing = models.TimeField()
def __str__(self):
return self.name
class DepartmentStore(Store):
chain = models.CharField(max_length=255)
def __str__(self):
return '%s - %s ' % (self.chain, self.name)
class Employee(models.Model):
# The order of these fields matter, do not change. Certain backends
# rely on field ordering to perform database conversions, and this
# model helps to test that.
first_name = models.CharField(max_length=20)
manager = models.BooleanField(default=False)
last_name = models.CharField(max_length=20)
store = models.ForeignKey(Store, models.CASCADE)
age = models.IntegerField()
salary = models.DecimalField(max_digits=8, decimal_places=2)
def __str__(self):
return '%s %s' % (self.first_name, self.last_name)
class Company(models.Model):
name = models.CharField(max_length=200)
motto = models.CharField(max_length=200, null=True, blank=True)
ticker_name = models.CharField(max_length=10, null=True, blank=True)
description = models.CharField(max_length=200, null=True, blank=True)
def __str__(self):
return 'Company(name=%s, motto=%s, ticker_name=%s, description=%s)' % (
self.name, self.motto, self.ticker_name, self.description,
)
class Ticket(models.Model):
active_at = models.DateTimeField()
duration = models.DurationField()
def __str__(self):
return '{} - {}'.format(self.active_at, self.duration)
| bsd-3-clause |
sexroute/commandergenius | project/jni/python/src/Lib/test/test_filecmp.py | 77 | 5389 |
import os, filecmp, shutil, tempfile, shutil
import unittest
from test import test_support
class FileCompareTestCase(unittest.TestCase):
def setUp(self):
self.name = test_support.TESTFN
self.name_same = test_support.TESTFN + '-same'
self.name_diff = test_support.TESTFN + '-diff'
data = 'Contents of file go here.\n'
for name in [self.name, self.name_same, self.name_diff]:
output = open(name, 'w')
output.write(data)
output.close()
output = open(self.name_diff, 'a+')
output.write('An extra line.\n')
output.close()
self.dir = tempfile.gettempdir()
def tearDown(self):
os.unlink(self.name)
os.unlink(self.name_same)
os.unlink(self.name_diff)
def test_matching(self):
self.failUnless(filecmp.cmp(self.name, self.name_same),
"Comparing file to itself fails")
self.failUnless(filecmp.cmp(self.name, self.name_same, shallow=False),
"Comparing file to itself fails")
self.failUnless(filecmp.cmp(self.name, self.name, shallow=False),
"Comparing file to identical file fails")
self.failUnless(filecmp.cmp(self.name, self.name),
"Comparing file to identical file fails")
def test_different(self):
self.failIf(filecmp.cmp(self.name, self.name_diff),
"Mismatched files compare as equal")
self.failIf(filecmp.cmp(self.name, self.dir),
"File and directory compare as equal")
class DirCompareTestCase(unittest.TestCase):
def setUp(self):
tmpdir = tempfile.gettempdir()
self.dir = os.path.join(tmpdir, 'dir')
self.dir_same = os.path.join(tmpdir, 'dir-same')
self.dir_diff = os.path.join(tmpdir, 'dir-diff')
self.caseinsensitive = os.path.normcase('A') == os.path.normcase('a')
data = 'Contents of file go here.\n'
for dir in [self.dir, self.dir_same, self.dir_diff]:
shutil.rmtree(dir, True)
os.mkdir(dir)
if self.caseinsensitive and dir is self.dir_same:
fn = 'FiLe' # Verify case-insensitive comparison
else:
fn = 'file'
output = open(os.path.join(dir, fn), 'w')
output.write(data)
output.close()
output = open(os.path.join(self.dir_diff, 'file2'), 'w')
output.write('An extra file.\n')
output.close()
def tearDown(self):
shutil.rmtree(self.dir)
shutil.rmtree(self.dir_same)
shutil.rmtree(self.dir_diff)
def test_cmpfiles(self):
self.failUnless(filecmp.cmpfiles(self.dir, self.dir, ['file']) ==
(['file'], [], []),
"Comparing directory to itself fails")
self.failUnless(filecmp.cmpfiles(self.dir, self.dir_same, ['file']) ==
(['file'], [], []),
"Comparing directory to same fails")
# Try it with shallow=False
self.failUnless(filecmp.cmpfiles(self.dir, self.dir, ['file'],
shallow=False) ==
(['file'], [], []),
"Comparing directory to itself fails")
self.failUnless(filecmp.cmpfiles(self.dir, self.dir_same, ['file'],
shallow=False),
"Comparing directory to same fails")
# Add different file2
output = open(os.path.join(self.dir, 'file2'), 'w')
output.write('Different contents.\n')
output.close()
self.failIf(filecmp.cmpfiles(self.dir, self.dir_same,
['file', 'file2']) ==
(['file'], ['file2'], []),
"Comparing mismatched directories fails")
def test_dircmp(self):
# Check attributes for comparison of two identical directories
d = filecmp.dircmp(self.dir, self.dir_same)
if self.caseinsensitive:
self.assertEqual([d.left_list, d.right_list],[['file'], ['FiLe']])
else:
self.assertEqual([d.left_list, d.right_list],[['file'], ['file']])
self.failUnless(d.common == ['file'])
self.failUnless(d.left_only == d.right_only == [])
self.failUnless(d.same_files == ['file'])
self.failUnless(d.diff_files == [])
# Check attributes for comparison of two different directories
d = filecmp.dircmp(self.dir, self.dir_diff)
self.failUnless(d.left_list == ['file'])
self.failUnless(d.right_list == ['file', 'file2'])
self.failUnless(d.common == ['file'])
self.failUnless(d.left_only == [])
self.failUnless(d.right_only == ['file2'])
self.failUnless(d.same_files == ['file'])
self.failUnless(d.diff_files == [])
# Add different file2
output = open(os.path.join(self.dir, 'file2'), 'w')
output.write('Different contents.\n')
output.close()
d = filecmp.dircmp(self.dir, self.dir_diff)
self.failUnless(d.same_files == ['file'])
self.failUnless(d.diff_files == ['file2'])
def test_main():
test_support.run_unittest(FileCompareTestCase, DirCompareTestCase)
if __name__ == "__main__":
test_main()
| lgpl-2.1 |
code-for-india/sahana_shelter_worldbank | modules/tests/runner.py | 28 | 1592 | from tests.HTMLTestRunner import HTMLTestRunner
class EdenHTMLTestRunner(HTMLTestRunner):
def _generate_report_test(self, rows, cid, tid, n, t, o, e):
# e.g. "pt1.1", "ft1.1", etc
has_output = bool(o or e)
tid = (n == 0 and "p" or "f") + "t%s.%s" % (cid+1, tid+1)
name = t.id().split(".")[-1]
doc = t.shortDescription() or ""
desc = doc and ("%s: %s" % (name, doc)) or name
tmpl = has_output and self.REPORT_TEST_WITH_OUTPUT_TMPL or self.REPORT_TEST_NO_OUTPUT_TMPL
# o and e should be byte string because they are collected from stdout and stderr?
if isinstance(o, str):
# TODO: some problem with "string_escape": it escapes \n and messes up formating
# uo = unicode(o.encode("string_escape"))
uo = o.decode("latin-1")
else:
uo = o
if isinstance(e, str):
# TODO: some problem with "string_escape": it escapes \n and messes up formating
# ue = unicode(e.encode("string_escape"))
ue = e.decode("latin-1")
else:
ue = e
script = self.REPORT_TEST_OUTPUT_TMPL % dict(
id = tid,
output = uo + ue,
)
row = tmpl % dict(
tid = tid,
Class = (n == 0 and "hiddenRow" or "none"),
style = n == 2 and "errorCase" or (n == 1 and "failCase" or "none"),
desc = desc,
script = script,
status = self.STATUS[n],
)
rows.append(row)
if not has_output:
return
| mit |
renatogames2/namebench | nb_third_party/dns/zone.py | 215 | 31930 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Zones."""
from __future__ import generators
import sys
import dns.exception
import dns.name
import dns.node
import dns.rdataclass
import dns.rdatatype
import dns.rdata
import dns.rrset
import dns.tokenizer
import dns.ttl
class BadZone(dns.exception.DNSException):
"""The zone is malformed."""
pass
class NoSOA(BadZone):
"""The zone has no SOA RR at its origin."""
pass
class NoNS(BadZone):
"""The zone has no NS RRset at its origin."""
pass
class UnknownOrigin(BadZone):
"""The zone's origin is unknown."""
pass
class Zone(object):
"""A DNS zone.
A Zone is a mapping from names to nodes. The zone object may be
treated like a Python dictionary, e.g. zone[name] will retrieve
the node associated with that name. The I{name} may be a
dns.name.Name object, or it may be a string. In the either case,
if the name is relative it is treated as relative to the origin of
the zone.
@ivar rdclass: The zone's rdata class; the default is class IN.
@type rdclass: int
@ivar origin: The origin of the zone.
@type origin: dns.name.Name object
@ivar nodes: A dictionary mapping the names of nodes in the zone to the
nodes themselves.
@type nodes: dict
@ivar relativize: should names in the zone be relativized?
@type relativize: bool
@cvar node_factory: the factory used to create a new node
@type node_factory: class or callable
"""
node_factory = dns.node.Node
__slots__ = ['rdclass', 'origin', 'nodes', 'relativize']
def __init__(self, origin, rdclass=dns.rdataclass.IN, relativize=True):
"""Initialize a zone object.
@param origin: The origin of the zone.
@type origin: dns.name.Name object
@param rdclass: The zone's rdata class; the default is class IN.
@type rdclass: int"""
self.rdclass = rdclass
self.origin = origin
self.nodes = {}
self.relativize = relativize
def __eq__(self, other):
"""Two zones are equal if they have the same origin, class, and
nodes.
@rtype: bool
"""
if not isinstance(other, Zone):
return False
if self.rdclass != other.rdclass or \
self.origin != other.origin or \
self.nodes != other.nodes:
return False
return True
def __ne__(self, other):
"""Are two zones not equal?
@rtype: bool
"""
return not self.__eq__(other)
def _validate_name(self, name):
if isinstance(name, (str, unicode)):
name = dns.name.from_text(name, None)
elif not isinstance(name, dns.name.Name):
raise KeyError("name parameter must be convertable to a DNS name")
if name.is_absolute():
if not name.is_subdomain(self.origin):
raise KeyError("name parameter must be a subdomain of the zone origin")
if self.relativize:
name = name.relativize(self.origin)
return name
def __getitem__(self, key):
key = self._validate_name(key)
return self.nodes[key]
def __setitem__(self, key, value):
key = self._validate_name(key)
self.nodes[key] = value
def __delitem__(self, key):
key = self._validate_name(key)
del self.nodes[key]
def __iter__(self):
return self.nodes.iterkeys()
def iterkeys(self):
return self.nodes.iterkeys()
def keys(self):
return self.nodes.keys()
def itervalues(self):
return self.nodes.itervalues()
def values(self):
return self.nodes.values()
def iteritems(self):
return self.nodes.iteritems()
def items(self):
return self.nodes.items()
def get(self, key):
key = self._validate_name(key)
return self.nodes.get(key)
def __contains__(self, other):
return other in self.nodes
def find_node(self, name, create=False):
"""Find a node in the zone, possibly creating it.
@param name: the name of the node to find
@type name: dns.name.Name object or string
@param create: should the node be created if it doesn't exist?
@type create: bool
@raises KeyError: the name is not known and create was not specified.
@rtype: dns.node.Node object
"""
name = self._validate_name(name)
node = self.nodes.get(name)
if node is None:
if not create:
raise KeyError
node = self.node_factory()
self.nodes[name] = node
return node
def get_node(self, name, create=False):
"""Get a node in the zone, possibly creating it.
This method is like L{find_node}, except it returns None instead
of raising an exception if the node does not exist and creation
has not been requested.
@param name: the name of the node to find
@type name: dns.name.Name object or string
@param create: should the node be created if it doesn't exist?
@type create: bool
@rtype: dns.node.Node object or None
"""
try:
node = self.find_node(name, create)
except KeyError:
node = None
return node
def delete_node(self, name):
"""Delete the specified node if it exists.
It is not an error if the node does not exist.
"""
name = self._validate_name(name)
if self.nodes.has_key(name):
del self.nodes[name]
def find_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE,
create=False):
"""Look for rdata with the specified name and type in the zone,
and return an rdataset encapsulating it.
The I{name}, I{rdtype}, and I{covers} parameters may be
strings, in which case they will be converted to their proper
type.
The rdataset returned is not a copy; changes to it will change
the zone.
KeyError is raised if the name or type are not found.
Use L{get_rdataset} if you want to have None returned instead.
@param name: the owner name to look for
@type name: DNS.name.Name object or string
@param rdtype: the rdata type desired
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
@param create: should the node and rdataset be created if they do not
exist?
@type create: bool
@raises KeyError: the node or rdata could not be found
@rtype: dns.rrset.RRset object
"""
name = self._validate_name(name)
if isinstance(rdtype, str):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(covers, str):
covers = dns.rdatatype.from_text(covers)
node = self.find_node(name, create)
return node.find_rdataset(self.rdclass, rdtype, covers, create)
def get_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE,
create=False):
"""Look for rdata with the specified name and type in the zone,
and return an rdataset encapsulating it.
The I{name}, I{rdtype}, and I{covers} parameters may be
strings, in which case they will be converted to their proper
type.
The rdataset returned is not a copy; changes to it will change
the zone.
None is returned if the name or type are not found.
Use L{find_rdataset} if you want to have KeyError raised instead.
@param name: the owner name to look for
@type name: DNS.name.Name object or string
@param rdtype: the rdata type desired
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
@param create: should the node and rdataset be created if they do not
exist?
@type create: bool
@rtype: dns.rrset.RRset object
"""
try:
rdataset = self.find_rdataset(name, rdtype, covers, create)
except KeyError:
rdataset = None
return rdataset
def delete_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE):
"""Delete the rdataset matching I{rdtype} and I{covers}, if it
exists at the node specified by I{name}.
The I{name}, I{rdtype}, and I{covers} parameters may be
strings, in which case they will be converted to their proper
type.
It is not an error if the node does not exist, or if there is no
matching rdataset at the node.
If the node has no rdatasets after the deletion, it will itself
be deleted.
@param name: the owner name to look for
@type name: DNS.name.Name object or string
@param rdtype: the rdata type desired
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
"""
name = self._validate_name(name)
if isinstance(rdtype, str):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(covers, str):
covers = dns.rdatatype.from_text(covers)
node = self.get_node(name)
if not node is None:
node.delete_rdataset(self.rdclass, rdtype, covers)
if len(node) == 0:
self.delete_node(name)
def replace_rdataset(self, name, replacement):
"""Replace an rdataset at name.
It is not an error if there is no rdataset matching I{replacement}.
Ownership of the I{replacement} object is transferred to the zone;
in other words, this method does not store a copy of I{replacement}
at the node, it stores I{replacement} itself.
If the I{name} node does not exist, it is created.
@param name: the owner name
@type name: DNS.name.Name object or string
@param replacement: the replacement rdataset
@type replacement: dns.rdataset.Rdataset
"""
if replacement.rdclass != self.rdclass:
raise ValueError('replacement.rdclass != zone.rdclass')
node = self.find_node(name, True)
node.replace_rdataset(replacement)
def find_rrset(self, name, rdtype, covers=dns.rdatatype.NONE):
"""Look for rdata with the specified name and type in the zone,
and return an RRset encapsulating it.
The I{name}, I{rdtype}, and I{covers} parameters may be
strings, in which case they will be converted to their proper
type.
This method is less efficient than the similar
L{find_rdataset} because it creates an RRset instead of
returning the matching rdataset. It may be more convenient
for some uses since it returns an object which binds the owner
name to the rdata.
This method may not be used to create new nodes or rdatasets;
use L{find_rdataset} instead.
KeyError is raised if the name or type are not found.
Use L{get_rrset} if you want to have None returned instead.
@param name: the owner name to look for
@type name: DNS.name.Name object or string
@param rdtype: the rdata type desired
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
@raises KeyError: the node or rdata could not be found
@rtype: dns.rrset.RRset object
"""
name = self._validate_name(name)
if isinstance(rdtype, str):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(covers, str):
covers = dns.rdatatype.from_text(covers)
rdataset = self.nodes[name].find_rdataset(self.rdclass, rdtype, covers)
rrset = dns.rrset.RRset(name, self.rdclass, rdtype, covers)
rrset.update(rdataset)
return rrset
def get_rrset(self, name, rdtype, covers=dns.rdatatype.NONE):
"""Look for rdata with the specified name and type in the zone,
and return an RRset encapsulating it.
The I{name}, I{rdtype}, and I{covers} parameters may be
strings, in which case they will be converted to their proper
type.
This method is less efficient than the similar L{get_rdataset}
because it creates an RRset instead of returning the matching
rdataset. It may be more convenient for some uses since it
returns an object which binds the owner name to the rdata.
This method may not be used to create new nodes or rdatasets;
use L{find_rdataset} instead.
None is returned if the name or type are not found.
Use L{find_rrset} if you want to have KeyError raised instead.
@param name: the owner name to look for
@type name: DNS.name.Name object or string
@param rdtype: the rdata type desired
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
@rtype: dns.rrset.RRset object
"""
try:
rrset = self.find_rrset(name, rdtype, covers)
except KeyError:
rrset = None
return rrset
def iterate_rdatasets(self, rdtype=dns.rdatatype.ANY,
covers=dns.rdatatype.NONE):
"""Return a generator which yields (name, rdataset) tuples for
all rdatasets in the zone which have the specified I{rdtype}
and I{covers}. If I{rdtype} is dns.rdatatype.ANY, the default,
then all rdatasets will be matched.
@param rdtype: int or string
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
"""
if isinstance(rdtype, str):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(covers, str):
covers = dns.rdatatype.from_text(covers)
for (name, node) in self.iteritems():
for rds in node:
if rdtype == dns.rdatatype.ANY or \
(rds.rdtype == rdtype and rds.covers == covers):
yield (name, rds)
def iterate_rdatas(self, rdtype=dns.rdatatype.ANY,
covers=dns.rdatatype.NONE):
"""Return a generator which yields (name, ttl, rdata) tuples for
all rdatas in the zone which have the specified I{rdtype}
and I{covers}. If I{rdtype} is dns.rdatatype.ANY, the default,
then all rdatas will be matched.
@param rdtype: int or string
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
"""
if isinstance(rdtype, str):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(covers, str):
covers = dns.rdatatype.from_text(covers)
for (name, node) in self.iteritems():
for rds in node:
if rdtype == dns.rdatatype.ANY or \
(rds.rdtype == rdtype and rds.covers == covers):
for rdata in rds:
yield (name, rds.ttl, rdata)
def to_file(self, f, sorted=True, relativize=True, nl=None):
"""Write a zone to a file.
@param f: file or string. If I{f} is a string, it is treated
as the name of a file to open.
@param sorted: if True, the file will be written with the
names sorted in DNSSEC order from least to greatest. Otherwise
the names will be written in whatever order they happen to have
in the zone's dictionary.
@param relativize: if True, domain names in the output will be
relativized to the zone's origin (if possible).
@type relativize: bool
@param nl: The end of line string. If not specified, the
output will use the platform's native end-of-line marker (i.e.
LF on POSIX, CRLF on Windows, CR on Macintosh).
@type nl: string or None
"""
if sys.hexversion >= 0x02030000:
# allow Unicode filenames
str_type = basestring
else:
str_type = str
if nl is None:
opts = 'w'
else:
opts = 'wb'
if isinstance(f, str_type):
f = file(f, opts)
want_close = True
else:
want_close = False
try:
if sorted:
names = self.keys()
names.sort()
else:
names = self.iterkeys()
for n in names:
l = self[n].to_text(n, origin=self.origin,
relativize=relativize)
if nl is None:
print >> f, l
else:
f.write(l)
f.write(nl)
finally:
if want_close:
f.close()
def check_origin(self):
"""Do some simple checking of the zone's origin.
@raises dns.zone.NoSOA: there is no SOA RR
@raises dns.zone.NoNS: there is no NS RRset
@raises KeyError: there is no origin node
"""
if self.relativize:
name = dns.name.empty
else:
name = self.origin
if self.get_rdataset(name, dns.rdatatype.SOA) is None:
raise NoSOA
if self.get_rdataset(name, dns.rdatatype.NS) is None:
raise NoNS
class _MasterReader(object):
"""Read a DNS master file
@ivar tok: The tokenizer
@type tok: dns.tokenizer.Tokenizer object
@ivar ttl: The default TTL
@type ttl: int
@ivar last_name: The last name read
@type last_name: dns.name.Name object
@ivar current_origin: The current origin
@type current_origin: dns.name.Name object
@ivar relativize: should names in the zone be relativized?
@type relativize: bool
@ivar zone: the zone
@type zone: dns.zone.Zone object
@ivar saved_state: saved reader state (used when processing $INCLUDE)
@type saved_state: list of (tokenizer, current_origin, last_name, file)
tuples.
@ivar current_file: the file object of the $INCLUDed file being parsed
(None if no $INCLUDE is active).
@ivar allow_include: is $INCLUDE allowed?
@type allow_include: bool
@ivar check_origin: should sanity checks of the origin node be done?
The default is True.
@type check_origin: bool
"""
def __init__(self, tok, origin, rdclass, relativize, zone_factory=Zone,
allow_include=False, check_origin=True):
if isinstance(origin, (str, unicode)):
origin = dns.name.from_text(origin)
self.tok = tok
self.current_origin = origin
self.relativize = relativize
self.ttl = 0
self.last_name = None
self.zone = zone_factory(origin, rdclass, relativize=relativize)
self.saved_state = []
self.current_file = None
self.allow_include = allow_include
self.check_origin = check_origin
def _eat_line(self):
while 1:
token = self.tok.get()
if token.is_eol_or_eof():
break
def _rr_line(self):
"""Process one line from a DNS master file."""
# Name
if self.current_origin is None:
raise UnknownOrigin
token = self.tok.get(want_leading = True)
if not token.is_whitespace():
self.last_name = dns.name.from_text(token.value, self.current_origin)
else:
token = self.tok.get()
if token.is_eol_or_eof():
# treat leading WS followed by EOL/EOF as if they were EOL/EOF.
return
self.tok.unget(token)
name = self.last_name
if not name.is_subdomain(self.zone.origin):
self._eat_line()
return
if self.relativize:
name = name.relativize(self.zone.origin)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
# TTL
try:
ttl = dns.ttl.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except dns.ttl.BadTTL:
ttl = self.ttl
# Class
try:
rdclass = dns.rdataclass.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except dns.exception.SyntaxError:
raise dns.exception.SyntaxError
except:
rdclass = self.zone.rdclass
if rdclass != self.zone.rdclass:
raise dns.exception.SyntaxError("RR class is not zone's class")
# Type
try:
rdtype = dns.rdatatype.from_text(token.value)
except:
raise dns.exception.SyntaxError("unknown rdatatype '%s'" % token.value)
n = self.zone.nodes.get(name)
if n is None:
n = self.zone.node_factory()
self.zone.nodes[name] = n
try:
rd = dns.rdata.from_text(rdclass, rdtype, self.tok,
self.current_origin, False)
except dns.exception.SyntaxError:
# Catch and reraise.
(ty, va) = sys.exc_info()[:2]
raise va
except:
# All exceptions that occur in the processing of rdata
# are treated as syntax errors. This is not strictly
# correct, but it is correct almost all of the time.
# We convert them to syntax errors so that we can emit
# helpful filename:line info.
(ty, va) = sys.exc_info()[:2]
raise dns.exception.SyntaxError("caught exception %s: %s" % (str(ty), str(va)))
rd.choose_relativity(self.zone.origin, self.relativize)
covers = rd.covers()
rds = n.find_rdataset(rdclass, rdtype, covers, True)
rds.add(rd, ttl)
def read(self):
"""Read a DNS master file and build a zone object.
@raises dns.zone.NoSOA: No SOA RR was found at the zone origin
@raises dns.zone.NoNS: No NS RRset was found at the zone origin
"""
try:
while 1:
token = self.tok.get(True, True).unescape()
if token.is_eof():
if not self.current_file is None:
self.current_file.close()
if len(self.saved_state) > 0:
(self.tok,
self.current_origin,
self.last_name,
self.current_file,
self.ttl) = self.saved_state.pop(-1)
continue
break
elif token.is_eol():
continue
elif token.is_comment():
self.tok.get_eol()
continue
elif token.value[0] == '$':
u = token.value.upper()
if u == '$TTL':
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError("bad $TTL")
self.ttl = dns.ttl.from_text(token.value)
self.tok.get_eol()
elif u == '$ORIGIN':
self.current_origin = self.tok.get_name()
self.tok.get_eol()
if self.zone.origin is None:
self.zone.origin = self.current_origin
elif u == '$INCLUDE' and self.allow_include:
token = self.tok.get()
if not token.is_quoted_string():
raise dns.exception.SyntaxError("bad filename in $INCLUDE")
filename = token.value
token = self.tok.get()
if token.is_identifier():
new_origin = dns.name.from_text(token.value, \
self.current_origin)
self.tok.get_eol()
elif not token.is_eol_or_eof():
raise dns.exception.SyntaxError("bad origin in $INCLUDE")
else:
new_origin = self.current_origin
self.saved_state.append((self.tok,
self.current_origin,
self.last_name,
self.current_file,
self.ttl))
self.current_file = file(filename, 'r')
self.tok = dns.tokenizer.Tokenizer(self.current_file,
filename)
self.current_origin = new_origin
else:
raise dns.exception.SyntaxError("Unknown master file directive '" + u + "'")
continue
self.tok.unget(token)
self._rr_line()
except dns.exception.SyntaxError, detail:
(filename, line_number) = self.tok.where()
if detail is None:
detail = "syntax error"
raise dns.exception.SyntaxError("%s:%d: %s" % (filename, line_number, detail))
# Now that we're done reading, do some basic checking of the zone.
if self.check_origin:
self.zone.check_origin()
def from_text(text, origin = None, rdclass = dns.rdataclass.IN,
relativize = True, zone_factory=Zone, filename=None,
allow_include=False, check_origin=True):
"""Build a zone object from a master file format string.
@param text: the master file format input
@type text: string.
@param origin: The origin of the zone; if not specified, the first
$ORIGIN statement in the master file will determine the origin of the
zone.
@type origin: dns.name.Name object or string
@param rdclass: The zone's rdata class; the default is class IN.
@type rdclass: int
@param relativize: should names be relativized? The default is True
@type relativize: bool
@param zone_factory: The zone factory to use
@type zone_factory: function returning a Zone
@param filename: The filename to emit when describing where an error
occurred; the default is '<string>'.
@type filename: string
@param allow_include: is $INCLUDE allowed?
@type allow_include: bool
@param check_origin: should sanity checks of the origin node be done?
The default is True.
@type check_origin: bool
@raises dns.zone.NoSOA: No SOA RR was found at the zone origin
@raises dns.zone.NoNS: No NS RRset was found at the zone origin
@rtype: dns.zone.Zone object
"""
# 'text' can also be a file, but we don't publish that fact
# since it's an implementation detail. The official file
# interface is from_file().
if filename is None:
filename = '<string>'
tok = dns.tokenizer.Tokenizer(text, filename)
reader = _MasterReader(tok, origin, rdclass, relativize, zone_factory,
allow_include=allow_include,
check_origin=check_origin)
reader.read()
return reader.zone
def from_file(f, origin = None, rdclass = dns.rdataclass.IN,
relativize = True, zone_factory=Zone, filename=None,
allow_include=True, check_origin=True):
"""Read a master file and build a zone object.
@param f: file or string. If I{f} is a string, it is treated
as the name of a file to open.
@param origin: The origin of the zone; if not specified, the first
$ORIGIN statement in the master file will determine the origin of the
zone.
@type origin: dns.name.Name object or string
@param rdclass: The zone's rdata class; the default is class IN.
@type rdclass: int
@param relativize: should names be relativized? The default is True
@type relativize: bool
@param zone_factory: The zone factory to use
@type zone_factory: function returning a Zone
@param filename: The filename to emit when describing where an error
occurred; the default is '<file>', or the value of I{f} if I{f} is a
string.
@type filename: string
@param allow_include: is $INCLUDE allowed?
@type allow_include: bool
@param check_origin: should sanity checks of the origin node be done?
The default is True.
@type check_origin: bool
@raises dns.zone.NoSOA: No SOA RR was found at the zone origin
@raises dns.zone.NoNS: No NS RRset was found at the zone origin
@rtype: dns.zone.Zone object
"""
if sys.hexversion >= 0x02030000:
# allow Unicode filenames; turn on universal newline support
str_type = basestring
opts = 'rU'
else:
str_type = str
opts = 'r'
if isinstance(f, str_type):
if filename is None:
filename = f
f = file(f, opts)
want_close = True
else:
if filename is None:
filename = '<file>'
want_close = False
try:
z = from_text(f, origin, rdclass, relativize, zone_factory,
filename, allow_include, check_origin)
finally:
if want_close:
f.close()
return z
def from_xfr(xfr, zone_factory=Zone, relativize=True):
"""Convert the output of a zone transfer generator into a zone object.
@param xfr: The xfr generator
@type xfr: generator of dns.message.Message objects
@param relativize: should names be relativized? The default is True.
It is essential that the relativize setting matches the one specified
to dns.query.xfr().
@type relativize: bool
@raises dns.zone.NoSOA: No SOA RR was found at the zone origin
@raises dns.zone.NoNS: No NS RRset was found at the zone origin
@rtype: dns.zone.Zone object
"""
z = None
for r in xfr:
if z is None:
if relativize:
origin = r.origin
else:
origin = r.answer[0].name
rdclass = r.answer[0].rdclass
z = zone_factory(origin, rdclass, relativize=relativize)
for rrset in r.answer:
znode = z.nodes.get(rrset.name)
if not znode:
znode = z.node_factory()
z.nodes[rrset.name] = znode
zrds = znode.find_rdataset(rrset.rdclass, rrset.rdtype,
rrset.covers, True)
zrds.update_ttl(rrset.ttl)
for rd in rrset:
rd.choose_relativity(z.origin, relativize)
zrds.add(rd)
z.check_origin()
return z
| apache-2.0 |
google-research/exoplanet-ml | exoplanet-ml/light_curve/util.py | 1 | 11300 | # Copyright 2018 The Exoplanet ML Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Light curve utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import scipy.interpolate
from six.moves import range # pylint:disable=redefined-builtin
def phase_fold_time(time, period, t0):
"""Creates a phase-folded time vector.
result[i] is the unique number in [-period / 2, period / 2)
such that result[i] = time[i] - t0 + k_i * period, for some integer k_i.
Args:
time: 1D numpy array of time values.
period: A positive real scalar; the period to fold over.
t0: The center of the resulting folded vector; this value is mapped to 0.
Returns:
A 1D numpy array.
"""
half_period = period / 2
result = np.mod(time + (half_period - t0), period)
result -= half_period
return result
def split(all_time, all_flux, gap_width=0.75):
"""Splits a light curve on discontinuities (gaps).
This function accepts a light curve that is either a single segment, or is
piecewise defined (e.g. split by quarter breaks or gaps in the in the data).
Args:
all_time: Numpy array or sequence of numpy arrays; each is a sequence of
time values.
all_flux: Numpy array or sequence of numpy arrays; each is a sequence of
flux values of the corresponding time array.
gap_width: Minimum gap size (in time units) for a split.
Returns:
out_time: List of numpy arrays; the split time arrays.
out_flux: List of numpy arrays; the split flux arrays.
"""
# Handle single-segment inputs.
if isinstance(all_time, np.ndarray) and all_time.ndim == 1:
all_time = [all_time]
all_flux = [all_flux]
out_time = []
out_flux = []
for time, flux in zip(all_time, all_flux):
start = 0
for end in range(1, len(time) + 1):
# Choose the largest endpoint such that time[start:end] has no gaps.
if end == len(time) or time[end] - time[end - 1] > gap_width:
out_time.append(time[start:end])
out_flux.append(flux[start:end])
start = end
return out_time, out_flux
def remove_events(all_time,
all_flux,
events,
width_factor=1.0,
include_empty_segments=True):
"""Removes events from a light curve.
This function accepts either a single-segment or piecewise-defined light
curve (e.g. one that is split by quarter breaks or gaps in the in the data).
Args:
all_time: Numpy array or sequence of numpy arrays; each is a sequence of
time values.
all_flux: Numpy array or sequence of numpy arrays; each is a sequence of
flux values of the corresponding time array.
events: List of Event objects to remove.
width_factor: Fractional multiplier of the duration of each event to remove.
include_empty_segments: Whether to include empty segments in the output.
Returns:
output_time: Numpy array or list of numpy arrays; the time arrays with
events removed.
output_flux: Numpy array or list of numpy arrays; the flux arrays with
events removed.
"""
# Handle single-segment inputs.
if isinstance(all_time, np.ndarray) and all_time.ndim == 1:
all_time = [all_time]
all_flux = [all_flux]
single_segment = True
else:
single_segment = False
output_time = []
output_flux = []
for time, flux in zip(all_time, all_flux):
mask = np.ones_like(time, dtype=np.bool)
for event in events:
transit_dist = np.abs(phase_fold_time(time, event.period, event.t0))
mask = np.logical_and(mask,
transit_dist > 0.5 * width_factor * event.duration)
if single_segment:
output_time = time[mask]
output_flux = flux[mask]
elif include_empty_segments or np.any(mask):
output_time.append(time[mask])
output_flux.append(flux[mask])
return output_time, output_flux
def interpolate_missing_time(time, cadence_no=None, fill_value="extrapolate"):
"""Interpolates missing (NaN or Inf) time values.
Args:
time: A numpy array of monotonically increasing values, with missing values
denoted by NaN or Inf.
cadence_no: Optional numpy array of cadence numbers corresponding to the
time values. If not provided, missing time values are assumed to be evenly
spaced between present time values.
fill_value: Specifies how missing time values should be treated at the
beginning and end of the array. See scipy.interpolate.interp1d.
Returns:
A numpy array of the same length as the input time array, with NaN/Inf
values replaced with interpolated values.
Raises:
ValueError: If fewer than 2 values of time are finite.
"""
if cadence_no is None:
cadence_no = np.arange(len(time))
is_finite = np.isfinite(time)
num_finite = np.sum(is_finite)
if num_finite < 2:
raise ValueError(
"Cannot interpolate time with fewer than 2 finite values. Got "
"len(time) = {} with {} finite values.".format(len(time), num_finite))
interpolate_fn = scipy.interpolate.interp1d(
cadence_no[is_finite],
time[is_finite],
copy=False,
bounds_error=False,
fill_value=fill_value,
assume_sorted=True)
return interpolate_fn(cadence_no)
def interpolate_masked_spline(all_time, all_masked_time, all_masked_spline):
"""Linearly interpolates spline values across masked points.
Args:
all_time: List of numpy arrays; each is a sequence of time values.
all_masked_time: List of numpy arrays; each is a sequence of time values
with some values missing (masked).
all_masked_spline: List of numpy arrays; the masked spline values
corresponding to all_masked_time.
Returns:
interp_spline: List of numpy arrays; each is the masked spline with missing
points linearly interpolated.
"""
interp_spline = []
for time, masked_time, masked_spline in zip(all_time, all_masked_time,
all_masked_spline):
if masked_time.size:
interp_spline.append(np.interp(time, masked_time, masked_spline))
else:
interp_spline.append(np.array([np.nan] * len(time)))
return interp_spline
def reshard_arrays(xs, ys):
"""Reshards arrays in xs to match the lengths of arrays in ys.
Args:
xs: List of 1d numpy arrays with the same total length as ys.
ys: List of 1d numpy arrays with the same total length as xs.
Returns:
A list of numpy arrays containing the same elements as xs, in the same
order, but with array lengths matching the pairwise array in ys.
Raises:
ValueError: If xs and ys do not have the same total length.
"""
# Compute indices of boundaries between segments of ys, plus the end boundary.
boundaries = np.cumsum([len(y) for y in ys])
concat_x = np.concatenate(xs)
if len(concat_x) != boundaries[-1]:
raise ValueError(
"xs and ys do not have the same total length ({} vs. {}).".format(
len(concat_x), boundaries[-1]))
boundaries = boundaries[:-1] # Remove exclusive end boundary.
return np.split(concat_x, boundaries)
def uniform_cadence_light_curve(cadence_no, time, flux):
"""Combines data into a single light curve with uniform cadence numbers.
Args:
cadence_no: numpy array; the cadence numbers of the light curve.
time: numpy array; the time values of the light curve.
flux: numpy array; the flux values of the light curve.
Returns:
cadence_no: numpy array; the cadence numbers of the light curve with no
gaps. It starts and ends at the minimum and maximum cadence numbers in the
input light curve, respectively.
time: numpy array; the time values of the light curve. Missing data points
have value zero and correspond to a False value in the mask.
flux: numpy array; the time values of the light curve. Missing data points
have value zero and correspond to a False value in the mask.
mask: Boolean numpy array; False indicates missing data points, where
missing data points are those that have no corresponding cadence number in
the input or those where at least one of the cadence number, time value,
or flux value is NaN/Inf.
Raises:
ValueError: If there are duplicate cadence numbers in the input.
"""
min_cadence_no = np.min(cadence_no)
max_cadence_no = np.max(cadence_no)
out_cadence_no = np.arange(
min_cadence_no, max_cadence_no + 1, dtype=cadence_no.dtype)
out_time = np.zeros_like(out_cadence_no, dtype=time.dtype)
out_flux = np.zeros_like(out_cadence_no, dtype=flux.dtype)
out_mask = np.zeros_like(out_cadence_no, dtype=np.bool)
for c, t, f in zip(cadence_no, time, flux):
if np.isfinite(c) and np.isfinite(t) and np.isfinite(f):
i = int(c - min_cadence_no)
if out_mask[i]:
raise ValueError("Duplicate cadence number: {}".format(c))
out_time[i] = t
out_flux[i] = f
out_mask[i] = True
return out_cadence_no, out_time, out_flux, out_mask
def count_transit_points(time, event):
"""Computes the number of points in each transit of a given event.
Args:
time: Sorted numpy array of time values.
event: An Event object.
Returns:
A numpy array containing the number of time points "in transit" for each
transit occurring between the first and last time values.
Raises:
ValueError: If there are more than 10**6 transits.
"""
t_min = np.min(time)
t_max = np.max(time)
# Tiny periods or erroneous time values could make this loop take forever.
if (t_max - t_min) / event.period > 10**6:
raise ValueError(
"Too many transits! Time range is [{:.4f}, {:.4f}] and period is "
"{:.4e}.".format(t_min, t_max, event.period))
# Make sure t0 is in [t_min, t_min + period).
t0 = np.mod(event.t0 - t_min, event.period) + t_min
# Prepare loop variables.
points_in_transit = []
i, j = 0, 0
for transit_midpoint in np.arange(t0, t_max, event.period):
transit_begin = transit_midpoint - event.duration / 2
transit_end = transit_midpoint + event.duration / 2
# Move time[i] to the first point >= transit_begin.
while time[i] < transit_begin:
# transit_begin is guaranteed to be < np.max(t) (provided duration >= 0).
# Therefore, i cannot go out of range.
i += 1
# Move time[j] to the first point > transit_end.
while time[j] <= transit_end:
j += 1
# j went out of range. We're finished.
if j >= len(time):
break
# The points in the current transit duration are precisely time[i:j].
# Since j is an exclusive index, there are exactly j-i points in transit.
points_in_transit.append(j - i)
return np.array(points_in_transit)
| apache-2.0 |
kobolabs/calibre | src/calibre/library/caches.py | 2 | 47037 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import itertools, time, traceback, locale
from itertools import repeat, izip, imap
from datetime import timedelta
from threading import Thread
from calibre.utils.config import tweaks, prefs
from calibre.utils.date import parse_date, now, UNDEFINED_DATE, clean_date_for_sort
from calibre.utils.search_query_parser import SearchQueryParser
from calibre.utils.search_query_parser import ParseException
from calibre.utils.localization import (canonicalize_lang, lang_map, get_udc)
from calibre.db.search import CONTAINS_MATCH, EQUALS_MATCH, REGEXP_MATCH, _match
from calibre.ebooks.metadata import title_sort, author_to_author_sort
from calibre.ebooks.metadata.opf2 import metadata_to_opf
from calibre import prints
class MetadataBackup(Thread): # {{{
'''
Continuously backup changed metadata into OPF files
in the book directory. This class runs in its own
thread and makes sure that the actual file write happens in the
GUI thread to prevent Windows' file locking from causing problems.
'''
def __init__(self, db):
Thread.__init__(self)
self.daemon = True
self.db = db
self.keep_running = True
from calibre.gui2 import FunctionDispatcher
self.do_write = FunctionDispatcher(self.write)
self.get_metadata_for_dump = FunctionDispatcher(db.get_metadata_for_dump)
self.clear_dirtied = FunctionDispatcher(db.clear_dirtied)
self.set_dirtied = FunctionDispatcher(db.dirtied)
def stop(self):
self.keep_running = False
def break_cycles(self):
# Break cycles so that this object doesn't hold references to db
self.do_write = self.get_metadata_for_dump = self.clear_dirtied = \
self.set_dirtied = self.db = None
def run(self):
while self.keep_running:
try:
time.sleep(2) # Limit to one book per two seconds
(id_, sequence) = self.db.get_a_dirtied_book()
if id_ is None:
continue
# print 'writer thread', id_, sequence
except:
# Happens during interpreter shutdown
break
if not self.keep_running:
break
try:
path, mi, sequence = self.get_metadata_for_dump(id_)
except:
prints('Failed to get backup metadata for id:', id_, 'once')
traceback.print_exc()
time.sleep(2)
try:
path, mi, sequence = self.get_metadata_for_dump(id_)
except:
prints('Failed to get backup metadata for id:', id_, 'again, giving up')
traceback.print_exc()
continue
if mi is None:
self.clear_dirtied(id_, sequence)
continue
if not self.keep_running:
break
# Give the GUI thread a chance to do something. Python threads don't
# have priorities, so this thread would naturally keep the processor
# until some scheduling event happens. The sleep makes such an event
time.sleep(0.1)
try:
raw = metadata_to_opf(mi)
except:
prints('Failed to convert to opf for id:', id_)
traceback.print_exc()
continue
if not self.keep_running:
break
time.sleep(0.1) # Give the GUI thread a chance to do something
try:
self.do_write(path, raw)
except:
prints('Failed to write backup metadata for id:', id_, 'once')
time.sleep(2)
try:
self.do_write(path, raw)
except:
prints('Failed to write backup metadata for id:', id_,
'again, giving up')
continue
self.clear_dirtied(id_, sequence)
self.break_cycles()
def write(self, path, raw):
with lopen(path, 'wb') as f:
f.write(raw)
# }}}
# Global utility function for get_match here and in gui2/library.py
# This is a global for performance
pref_use_primary_find_in_search = False
def set_use_primary_find_in_search(toWhat):
global pref_use_primary_find_in_search
pref_use_primary_find_in_search = toWhat
def force_to_bool(val):
if isinstance(val, (str, unicode)):
try:
val = icu_lower(val)
if not val:
val = None
elif val in [_('yes'), _('checked'), 'true']:
val = True
elif val in [_('no'), _('unchecked'), 'false']:
val = False
else:
val = bool(int(val))
except:
val = None
return val
class CacheRow(list): # {{{
def __init__(self, db, composites, val, series_col, series_sort_col):
self.db = db
self._composites = composites
list.__init__(self, val)
self._must_do = len(composites) > 0
self._series_col = series_col
self._series_sort_col = series_sort_col
self._series_sort = None
def __getitem__(self, col):
if self._must_do:
is_comp = False
if isinstance(col, slice):
start = 0 if col.start is None else col.start
step = 1 if col.stop is None else col.stop
for c in range(start, col.stop, step):
if c in self._composites:
is_comp = True
break
elif col in self._composites:
is_comp = True
if is_comp:
id_ = list.__getitem__(self, 0)
self._must_do = False
mi = self.db.get_metadata(id_, index_is_id=True,
get_user_categories=False)
for c in self._composites:
self[c] = mi.get(self._composites[c])
if col == self._series_sort_col and self._series_sort is None:
if self[self._series_col]:
self._series_sort = title_sort(self[self._series_col])
self[self._series_sort_col] = self._series_sort
else:
self._series_sort = ''
self[self._series_sort_col] = ''
return list.__getitem__(self, col)
def __getslice__(self, i, j):
return self.__getitem__(slice(i, j))
def refresh_composites(self):
for c in self._composites:
self[c] = None
self._must_do = True
# }}}
class ResultCache(SearchQueryParser): # {{{
'''
Stores sorted and filtered metadata in memory.
'''
def __init__(self, FIELD_MAP, field_metadata, db_prefs=None):
self.FIELD_MAP = FIELD_MAP
self.db_prefs = db_prefs
self.composites = {}
self.udc = get_udc()
for key in field_metadata:
if field_metadata[key]['datatype'] == 'composite':
self.composites[field_metadata[key]['rec_index']] = key
self.series_col = field_metadata['series']['rec_index']
self.series_sort_col = field_metadata['series_sort']['rec_index']
self._data = []
self._map = self._map_filtered = []
self.first_sort = True
self.search_restriction = self.base_restriction = ''
self.base_restriction_name = self.search_restriction_name = ''
self.search_restriction_book_count = 0
self.marked_ids_dict = {}
self.field_metadata = field_metadata
self.all_search_locations = field_metadata.get_search_terms()
SearchQueryParser.__init__(self, self.all_search_locations, optimize=True)
self.build_date_relop_dict()
self.build_numeric_relop_dict()
# Do this here so the var get updated when a library changes
global pref_use_primary_find_in_search
pref_use_primary_find_in_search = prefs['use_primary_find_in_search']
self._uuid_column_index = self.FIELD_MAP['uuid']
self._uuid_map = {}
def break_cycles(self):
self._data = self.field_metadata = self.FIELD_MAP = \
self.numeric_search_relops = self.date_search_relops = \
self.db_prefs = self.all_search_locations = None
self.sqp_change_locations([])
def __getitem__(self, row):
return self._data[self._map_filtered[row]]
def __len__(self):
return len(self._map_filtered)
def __iter__(self):
for id in self._map_filtered:
yield self._data[id]
def iterall(self):
for x in self._data:
if x is not None:
yield x
def iterallids(self):
idx = self.FIELD_MAP['id']
for x in self.iterall():
yield x[idx]
# Search functions {{{
def universal_set(self):
return set([i[0] for i in self._data if i is not None])
def change_search_locations(self, locations):
self.sqp_change_locations(locations)
self.all_search_locations = locations
def build_date_relop_dict(self):
'''
Because the database dates have time in them, we can't use direct
comparisons even when field_count == 3. The query has time = 0, but
the database object has time == something. As such, a complete compare
will almost never be correct.
'''
def relop_eq(db, query, field_count):
if db.year == query.year:
if field_count == 1:
return True
if db.month == query.month:
if field_count == 2:
return True
return db.day == query.day
return False
def relop_gt(db, query, field_count):
if db.year > query.year:
return True
if field_count > 1 and db.year == query.year:
if db.month > query.month:
return True
return field_count == 3 and db.month == query.month and db.day > query.day
return False
def relop_lt(db, query, field_count):
if db.year < query.year:
return True
if field_count > 1 and db.year == query.year:
if db.month < query.month:
return True
return field_count == 3 and db.month == query.month and db.day < query.day
return False
def relop_ne(db, query, field_count):
return not relop_eq(db, query, field_count)
def relop_ge(db, query, field_count):
return not relop_lt(db, query, field_count)
def relop_le(db, query, field_count):
return not relop_gt(db, query, field_count)
self.date_search_relops = {
'=' :[1, relop_eq],
'>' :[1, relop_gt],
'<' :[1, relop_lt],
'!=':[2, relop_ne],
'>=':[2, relop_ge],
'<=':[2, relop_le]
}
local_today = ('_today', icu_lower(_('today')))
local_yesterday = ('_yesterday', icu_lower(_('yesterday')))
local_thismonth = ('_thismonth', icu_lower(_('thismonth')))
local_daysago = icu_lower(_('daysago'))
local_daysago_len = len(local_daysago)
untrans_daysago = '_daysago'
untrans_daysago_len = len('_daysago')
def get_dates_matches(self, location, query, candidates):
matches = set([])
if len(query) < 2:
return matches
if location == 'date':
location = 'timestamp'
loc = self.field_metadata[location]['rec_index']
if query == 'false':
for id_ in candidates:
item = self._data[id_]
if item is None:
continue
v = item[loc]
if isinstance(v, (str, unicode)):
v = parse_date(v)
if v is None or v <= UNDEFINED_DATE:
matches.add(item[0])
return matches
if query == 'true':
for id_ in candidates:
item = self._data[id_]
if item is None:
continue
v = item[loc]
if isinstance(v, (str, unicode)):
v = parse_date(v)
if v is not None and v > UNDEFINED_DATE:
matches.add(item[0])
return matches
relop = None
for k in self.date_search_relops.keys():
if query.startswith(k):
(p, relop) = self.date_search_relops[k]
query = query[p:]
if relop is None:
(p, relop) = self.date_search_relops['=']
if query in self.local_today:
qd = now()
field_count = 3
elif query in self.local_yesterday:
qd = now() - timedelta(1)
field_count = 3
elif query in self.local_thismonth:
qd = now()
field_count = 2
elif query.endswith(self.local_daysago) or query.endswith(self.untrans_daysago):
num = query[0:-(self.local_daysago_len if query.endswith(self.local_daysago) else self.untrans_daysago_len)]
try:
qd = now() - timedelta(int(num))
except:
raise ParseException(_('Number conversion error: {0}').format(num))
field_count = 3
else:
try:
qd = parse_date(query, as_utc=False)
except:
raise ParseException(_('Date conversion error: {0}').format(query))
if '-' in query:
field_count = query.count('-') + 1
else:
field_count = query.count('/') + 1
for id_ in candidates:
item = self._data[id_]
if item is None or item[loc] is None:
continue
v = item[loc]
if isinstance(v, (str, unicode)):
v = parse_date(v)
if relop(v, qd, field_count):
matches.add(item[0])
return matches
def build_numeric_relop_dict(self):
self.numeric_search_relops = {
'=':[1, lambda r, q: r == q],
'>':[1, lambda r, q: r is not None and r > q],
'<':[1, lambda r, q: r is not None and r < q],
'!=':[2, lambda r, q: r != q],
'>=':[2, lambda r, q: r is not None and r >= q],
'<=':[2, lambda r, q: r is not None and r <= q]
}
def get_numeric_matches(self, location, query, candidates, val_func=None):
matches = set([])
if len(query) == 0:
return matches
if val_func is None:
loc = self.field_metadata[location]['rec_index']
val_func = lambda item, loc=loc: item[loc]
q = ''
cast = adjust = lambda x: x
dt = self.field_metadata[location]['datatype']
if query == 'false':
if dt == 'rating' or location == 'cover':
relop = lambda x,y: not bool(x)
else:
relop = lambda x,y: x is None
elif query == 'true':
if dt == 'rating' or location == 'cover':
relop = lambda x,y: bool(x)
else:
relop = lambda x,y: x is not None
else:
relop = None
for k in self.numeric_search_relops.keys():
if query.startswith(k):
(p, relop) = self.numeric_search_relops[k]
query = query[p:]
if relop is None:
(p, relop) = self.numeric_search_relops['=']
if dt == 'int':
cast = lambda x: int(x)
elif dt == 'rating':
cast = lambda x: 0 if x is None else int(x)
adjust = lambda x: x/2
elif dt in ('float', 'composite'):
cast = lambda x : float(x)
else: # count operation
cast = (lambda x: int(x))
if len(query) > 1:
mult = query[-1:].lower()
mult = {'k':1024.,'m': 1024.**2, 'g': 1024.**3}.get(mult, 1.0)
if mult != 1.0:
query = query[:-1]
else:
mult = 1.0
try:
q = cast(query) * mult
except:
raise ParseException(_('Non-numeric value in query: {0}').format(query))
for id_ in candidates:
item = self._data[id_]
if item is None:
continue
try:
v = cast(val_func(item))
except:
v = None
if v:
v = adjust(v)
if relop(v, q):
matches.add(item[0])
return matches
def get_user_category_matches(self, location, query, candidates):
matches = set([])
if self.db_prefs is None or len(query) < 2:
return matches
user_cats = self.db_prefs.get('user_categories', [])
c = set(candidates)
if query.startswith('.'):
check_subcats = True
query = query[1:]
else:
check_subcats = False
for key in user_cats:
if key == location or (check_subcats and key.startswith(location + '.')):
for (item, category, ign) in user_cats[key]:
s = self.get_matches(category, '=' + item, candidates=c)
c -= s
matches |= s
if query == 'false':
return candidates - matches
return matches
def get_keypair_matches(self, location, query, candidates):
matches = set([])
if query.find(':') >= 0:
q = [q.strip() for q in query.split(':')]
if len(q) != 2:
raise ParseException(
_('Invalid query format for colon-separated search: {0}').format(query))
(keyq, valq) = q
keyq_mkind, keyq = self._matchkind(keyq)
valq_mkind, valq = self._matchkind(valq)
else:
keyq = keyq_mkind = ''
valq_mkind, valq = self._matchkind(query)
loc = self.field_metadata[location]['rec_index']
split_char = self.field_metadata[location]['is_multiple'].get(
'cache_to_list', ',')
for id_ in candidates:
item = self._data[id_]
if item is None:
continue
if item[loc] is None:
if valq == 'false':
matches.add(id_)
continue
add_if_nothing_matches = valq == 'false'
pairs = [p.strip() for p in item[loc].split(split_char)]
for pair in pairs:
parts = pair.split(':')
if len(parts) != 2:
continue
k = parts[:1]
v = parts[1:]
if keyq and not _match(keyq, k, keyq_mkind,
use_primary_find_in_search=pref_use_primary_find_in_search):
continue
if valq:
if valq == 'true':
if not v:
continue
elif valq == 'false':
if v:
add_if_nothing_matches = False
continue
elif not _match(valq, v, valq_mkind,
use_primary_find_in_search=pref_use_primary_find_in_search):
continue
matches.add(id_)
if add_if_nothing_matches:
matches.add(id_)
return matches
def _matchkind(self, query):
matchkind = CONTAINS_MATCH
if (len(query) > 1):
if query.startswith('\\'):
query = query[1:]
elif query.startswith('='):
matchkind = EQUALS_MATCH
query = query[1:]
elif query.startswith('~'):
matchkind = REGEXP_MATCH
query = query[1:]
if matchkind != REGEXP_MATCH:
# leave case in regexps because it can be significant e.g. \S \W \D
query = icu_lower(query)
return matchkind, query
local_no = icu_lower(_('no'))
local_yes = icu_lower(_('yes'))
local_unchecked = icu_lower(_('unchecked'))
local_checked = icu_lower(_('checked'))
local_empty = icu_lower(_('empty'))
local_blank = icu_lower(_('blank'))
local_bool_values = (
local_no, local_unchecked, '_no', 'false',
local_yes, local_checked, '_yes', 'true',
local_empty, local_blank, '_empty')
def get_bool_matches(self, location, query, candidates):
bools_are_tristate = self.db_prefs.get('bools_are_tristate')
loc = self.field_metadata[location]['rec_index']
matches = set()
query = icu_lower(query)
if query not in self.local_bool_values:
raise ParseException(_('Invalid boolean query "{0}"').format(query))
for id_ in candidates:
item = self._data[id_]
if item is None:
continue
val = force_to_bool(item[loc])
if not bools_are_tristate:
if val is None or not val: # item is None or set to false
if query in (self.local_no, self.local_unchecked, '_no', 'false'):
matches.add(item[0])
else: # item is explicitly set to true
if query in (self.local_yes, self.local_checked, '_yes', 'true'):
matches.add(item[0])
else:
if val is None:
if query in (self.local_empty, self.local_blank, '_empty', 'false'):
matches.add(item[0])
elif not val: # is not None and false
if query in (self.local_no, self.local_unchecked, '_no', 'true'):
matches.add(item[0])
else: # item is not None and true
if query in (self.local_yes, self.local_checked, '_yes', 'true'):
matches.add(item[0])
return matches
def get_matches(self, location, query, candidates=None,
allow_recursion=True):
# If candidates is not None, it must not be modified. Changing its
# value will break query optimization in the search parser
matches = set([])
if candidates is None:
candidates = self.universal_set()
if len(candidates) == 0:
return matches
if location not in self.all_search_locations:
return matches
if len(location) > 2 and location.startswith('@') and \
location[1:] in self.db_prefs['grouped_search_terms']:
location = location[1:]
if query and query.strip():
# get metadata key associated with the search term. Eliminates
# dealing with plurals and other aliases
original_location = location
location = self.field_metadata.search_term_to_field_key(icu_lower(location.strip()))
# grouped search terms
if isinstance(location, list):
if allow_recursion:
if query.lower() == 'false':
invert = True
query = 'true'
else:
invert = False
for loc in location:
c = candidates.copy()
m = self.get_matches(loc, query,
candidates=c, allow_recursion=False)
matches |= m
c -= m
if len(c) == 0:
break
if invert:
matches = self.universal_set() - matches
return matches
raise ParseException(_('Recursive query group detected: {0}').format(query))
# apply the limit if appropriate
if location == 'all' and prefs['limit_search_columns'] and \
prefs['limit_search_columns_to']:
terms = set([])
for l in prefs['limit_search_columns_to']:
l = icu_lower(l.strip())
if l and l != 'all' and l in self.all_search_locations:
terms.add(l)
if terms:
c = candidates.copy()
for l in terms:
try:
m = self.get_matches(l, query,
candidates=c, allow_recursion=allow_recursion)
matches |= m
c -= m
if len(c) == 0:
break
except:
pass
return matches
if location in self.field_metadata:
fm = self.field_metadata[location]
# take care of dates special case
if fm['datatype'] == 'datetime' or \
(fm['datatype'] == 'composite' and
fm['display'].get('composite_sort', '') == 'date'):
return self.get_dates_matches(location, query.lower(), candidates)
# take care of numbers special case
if fm['datatype'] in ('rating', 'int', 'float') or \
(fm['datatype'] == 'composite' and
fm['display'].get('composite_sort', '') == 'number'):
return self.get_numeric_matches(location, query.lower(), candidates)
if fm['datatype'] == 'bool':
return self.get_bool_matches(location, query, candidates)
# take care of the 'count' operator for is_multiples
if fm['is_multiple'] and \
len(query) > 1 and query.startswith('#') and \
query[1:1] in '=<>!':
vf = lambda item, loc=fm['rec_index'], \
ms=fm['is_multiple']['cache_to_list']:\
len(item[loc].split(ms)) if item[loc] is not None else 0
return self.get_numeric_matches(location, query[1:],
candidates, val_func=vf)
# special case: colon-separated fields such as identifiers. isbn
# is a special case within the case
if fm.get('is_csp', False):
if location == 'identifiers' and original_location == 'isbn':
return self.get_keypair_matches('identifiers',
'=isbn:'+query, candidates)
return self.get_keypair_matches(location, query, candidates)
# check for user categories
if len(location) >= 2 and location.startswith('@'):
return self.get_user_category_matches(location[1:], query.lower(),
candidates)
# everything else, or 'all' matches
matchkind, query = self._matchkind(query)
if not isinstance(query, unicode):
query = query.decode('utf-8')
db_col = {}
exclude_fields = [] # fields to not check when matching against text.
col_datatype = []
is_multiple_cols = {}
for x in range(len(self.FIELD_MAP)):
col_datatype.append('')
for x in self.field_metadata:
if x.startswith('@'):
continue
if len(self.field_metadata[x]['search_terms']):
db_col[x] = self.field_metadata[x]['rec_index']
if self.field_metadata[x]['datatype'] not in \
['composite', 'text', 'comments', 'series', 'enumeration']:
exclude_fields.append(db_col[x])
col_datatype[db_col[x]] = self.field_metadata[x]['datatype']
is_multiple_cols[db_col[x]] = \
self.field_metadata[x]['is_multiple'].get('cache_to_list', None)
try:
rating_query = int(query) * 2
except:
rating_query = None
location = [location] if location != 'all' else list(db_col.keys())
for i, loc in enumerate(location):
location[i] = db_col[loc]
current_candidates = candidates.copy()
for loc in location: # location is now an array of field indices
if loc == db_col['authors']:
# DB stores authors with commas changed to bars, so change query
if matchkind == REGEXP_MATCH:
q = query.replace(',', r'\|')
else:
q = query.replace(',', '|')
elif loc == db_col['languages']:
q = canonicalize_lang(query)
if q is None:
lm = lang_map()
rm = {v.lower():k for k,v in lm.iteritems()}
q = rm.get(query, query)
else:
q = query
for id_ in current_candidates:
item = self._data[id_]
if item is None:
continue
if not item[loc]:
if q == 'false' and matchkind == CONTAINS_MATCH:
matches.add(item[0])
continue # item is empty. No possible matches below
if q == 'false'and matchkind == CONTAINS_MATCH:
# Field has something in it, so a false query does not match
continue
if q == 'true' and matchkind == CONTAINS_MATCH:
if isinstance(item[loc], basestring):
if item[loc].strip() == '':
continue
matches.add(item[0])
continue
if col_datatype[loc] == 'rating': # get here if 'all' query
if rating_query and rating_query == int(item[loc]):
matches.add(item[0])
continue
try: # a conversion below might fail
# relationals are not supported in 'all' queries
if col_datatype[loc] == 'float':
if float(query) == item[loc]:
matches.add(item[0])
continue
if col_datatype[loc] == 'int':
if int(query) == item[loc]:
matches.add(item[0])
continue
except:
# A conversion threw an exception. Because of the type,
# no further match is possible
continue
if loc not in exclude_fields: # time for text matching
if is_multiple_cols[loc] is not None:
vals = [v.strip() for v in item[loc].split(is_multiple_cols[loc])]
else:
vals = [item[loc]] # make into list to make _match happy
if _match(q, vals, matchkind,
use_primary_find_in_search=pref_use_primary_find_in_search):
matches.add(item[0])
continue
current_candidates -= matches
return matches
def search(self, query, return_matches=False):
ans = self.search_getting_ids(query, self.search_restriction,
set_restriction_count=True)
if return_matches:
return ans
self._map_filtered = ans
def _build_restriction_string(self, restriction):
if self.base_restriction:
if restriction:
return u'(%s) and (%s)' % (self.base_restriction, restriction)
else:
return self.base_restriction
else:
return restriction
def search_getting_ids(self, query, search_restriction,
set_restriction_count=False, use_virtual_library=True):
if use_virtual_library:
search_restriction = self._build_restriction_string(search_restriction)
q = ''
if not query or not query.strip():
q = search_restriction
else:
q = query
if search_restriction:
q = u'(%s) and (%s)' % (search_restriction, query)
if not q:
if set_restriction_count:
self.search_restriction_book_count = len(self._map)
return list(self._map)
matches = self.parse(q)
tmap = list(itertools.repeat(False, len(self._data)))
for x in matches:
tmap[x] = True
rv = [x for x in self._map if tmap[x]]
if set_restriction_count and q == search_restriction:
self.search_restriction_book_count = len(rv)
return rv
def get_search_restriction(self):
return self.search_restriction
def set_search_restriction(self, s):
self.search_restriction = s
def get_base_restriction(self):
return self.base_restriction
def set_base_restriction(self, s):
self.base_restriction = s
def get_base_restriction_name(self):
return self.base_restriction_name
def set_base_restriction_name(self, s):
self.base_restriction_name = s
def get_search_restriction_name(self):
return self.search_restriction_name
def set_search_restriction_name(self, s):
self.search_restriction_name = s
def search_restriction_applied(self):
return bool(self.search_restriction) or bool((self.base_restriction))
def get_search_restriction_book_count(self):
return self.search_restriction_book_count
def set_marked_ids(self, id_dict):
'''
ids in id_dict are "marked". They can be searched for by
using the search term ``marked:true``. Pass in an empty dictionary or
set to clear marked ids.
:param id_dict: Either a dictionary mapping ids to values or a set
of ids. In the latter case, the value is set to 'true' for all ids. If
a mapping is provided, then the search can be used to search for
particular values: ``marked:value``
'''
if not hasattr(id_dict, 'items'):
# Simple list. Make it a dict of string 'true'
self.marked_ids_dict = dict.fromkeys(id_dict, u'true')
else:
# Ensure that all the items in the dict are text
self.marked_ids_dict = dict(izip(id_dict.iterkeys(), imap(unicode,
id_dict.itervalues())))
# Set the values in the cache
marked_col = self.FIELD_MAP['marked']
for r in self.iterall():
r[marked_col] = None
for id_, val in self.marked_ids_dict.iteritems():
try:
self._data[id_][marked_col] = val
except:
pass
# }}}
def remove(self, id):
try:
self._uuid_map.pop(self._data[id][self._uuid_column_index], None)
except IndexError:
pass # id is out of bounds -- no uuid in the map to remove
try:
self._data[id] = None
except IndexError:
pass # id is out of bounds, no point setting it to None anyway
try:
self._map.remove(id)
except ValueError:
pass
try:
self._map_filtered.remove(id)
except ValueError:
pass
def set(self, row, col, val, row_is_id=False):
id = row if row_is_id else self._map_filtered[row]
d = self._data[id]
if col == self._uuid_column_index:
self._uuid_map.pop(d[col], None)
d[col] = val
if col == self._uuid_column_index:
self._uuid_map[val] = id
d.refresh_composites()
def get(self, row, col, row_is_id=False):
id = row if row_is_id else self._map_filtered[row]
return self._data[id][col]
def index(self, id, cache=False):
x = self._map if cache else self._map_filtered
return x.index(id)
def row(self, id):
return self.index(id)
def has_id(self, id):
try:
return self._data[id] is not None
except IndexError:
pass
return False
def refresh_ids(self, db, ids):
'''
Refresh the data in the cache for books identified by ids.
Returns a list of affected rows or None if the rows are filtered.
'''
for id in ids:
try:
self._data[id] = CacheRow(db, self.composites,
db.conn.get('SELECT * from meta2 WHERE id=?', (id,))[0],
self.series_col, self.series_sort_col)
self._data[id].append(db.book_on_device_string(id))
self._data[id].append(self.marked_ids_dict.get(id, None))
self._data[id].append(None)
self._uuid_map[self._data[id][self._uuid_column_index]] = id
except IndexError:
return None
try:
return map(self.row, ids)
except ValueError:
pass
return None
def books_added(self, ids, db):
if not ids:
return
self._data.extend(repeat(None, max(ids)-len(self._data)+2))
for id in ids:
self._data[id] = CacheRow(db, self.composites,
db.conn.get('SELECT * from meta2 WHERE id=?', (id,))[0],
self.series_col, self.series_sort_col)
self._data[id].append(db.book_on_device_string(id))
self._data[id].append(self.marked_ids_dict.get(id, None))
self._data[id].append(None) # Series sort column
self._uuid_map[self._data[id][self._uuid_column_index]] = id
self._map[0:0] = ids
self._map_filtered[0:0] = ids
def books_deleted(self, ids):
for id in ids:
self.remove(id)
def count(self):
return len(self._map)
def refresh_ondevice(self, db):
ondevice_col = self.FIELD_MAP['ondevice']
for item in self._data:
if item is not None:
item[ondevice_col] = db.book_on_device_string(item[0])
item.refresh_composites()
def refresh(self, db, field=None, ascending=True):
# reinitialize the template cache in case a composite column has changed
db.initialize_template_cache()
temp = db.conn.get('SELECT * FROM meta2')
self._data = list(itertools.repeat(None, temp[-1][0]+2)) if temp else []
for r in temp:
self._data[r[0]] = CacheRow(db, self.composites, r,
self.series_col, self.series_sort_col)
self._uuid_map[self._data[r[0]][self._uuid_column_index]] = r[0]
for item in self._data:
if item is not None:
item.append(db.book_on_device_string(item[0]))
# Temp mark and series_sort columns
item.extend((None, None))
marked_col = self.FIELD_MAP['marked']
for id_,val in self.marked_ids_dict.iteritems():
try:
self._data[id_][marked_col] = val
except:
pass
self._map = [i[0] for i in self._data if i is not None]
if field is not None:
self.sort(field, ascending)
self._map_filtered = list(self._map)
if self.search_restriction or self.base_restriction:
self.search('', return_matches=False)
# Sorting functions {{{
def sanitize_sort_field_name(self, field):
field = self.field_metadata.search_term_to_field_key(field.lower().strip())
# translate some fields to their hidden equivalent
if field == 'title':
field = 'sort'
elif field == 'authors':
field = 'author_sort'
return field
def sort(self, field, ascending, subsort=False):
self.multisort([(field, ascending)])
def multisort(self, fields=[], subsort=False, only_ids=None):
'''
fields is a list of 2-tuple, each tuple is of the form
(field_name, is_ascending)
If only_ids is a list of ids, this function will sort that list instead
of the internal mapping of ids.
'''
fields = [(self.sanitize_sort_field_name(x), bool(y)) for x, y in fields]
keys = self.field_metadata.sortable_field_keys()
fields = [x for x in fields if x[0] in keys]
if subsort and 'sort' not in [x[0] for x in fields]:
fields += [('sort', True)]
if not fields:
fields = [('timestamp', False)]
keyg = SortKeyGenerator(fields, self.field_metadata, self._data, self.db_prefs)
if only_ids is None:
self._map.sort(key=keyg)
tmap = list(itertools.repeat(False, len(self._data)))
for x in self._map_filtered:
tmap[x] = True
self._map_filtered = [x for x in self._map if tmap[x]]
else:
only_ids.sort(key=keyg)
class SortKey(object):
def __init__(self, orders, values):
self.orders, self.values = orders, values
def __cmp__(self, other):
for i, ascending in enumerate(self.orders):
ans = cmp(self.values[i], other.values[i])
if ans != 0:
return ans * ascending
return 0
class SortKeyGenerator(object):
def __init__(self, fields, field_metadata, data, db_prefs):
from calibre.utils.icu import sort_key
self.field_metadata = field_metadata
self.db_prefs = db_prefs
self.orders = [1 if x[1] else -1 for x in fields]
self.entries = [(x[0], field_metadata[x[0]]) for x in fields]
self.library_order = tweaks['title_series_sorting'] == 'library_order'
self.data = data
self.string_sort_key = sort_key
self.lang_idx = field_metadata['languages']['rec_index']
def __call__(self, record):
values = tuple(self.itervals(self.data[record]))
return SortKey(self.orders, values)
def itervals(self, record):
for name, fm in self.entries:
dt = fm['datatype']
val = record[fm['rec_index']]
if dt == 'composite':
sb = fm['display'].get('composite_sort', 'text')
if sb == 'date':
try:
val = parse_date(val)
except:
val = UNDEFINED_DATE
dt = 'datetime'
elif sb == 'number':
try:
p = 1
for i, candidate in enumerate(
('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB')):
if val.endswith(candidate):
p = 1024**(i)
val = val[:-len(candidate)].strip()
break
val = locale.atof(val) * p
except:
val = 0.0
dt = 'float'
elif sb == 'bool':
val = force_to_bool(val)
dt = 'bool'
if dt == 'datetime':
if val is None:
val = UNDEFINED_DATE
if tweaks['sort_dates_using_visible_fields']:
format = None
if name == 'timestamp':
format = tweaks['gui_timestamp_display_format']
elif name == 'pubdate':
format = tweaks['gui_pubdate_display_format']
elif name == 'last_modified':
format = tweaks['gui_last_modified_display_format']
elif fm['is_custom']:
format = fm['display'].get('date_format', None)
val = clean_date_for_sort(val, format)
elif dt == 'series':
if val is None:
val = ('', 1)
else:
if self.library_order:
try:
lang = record[self.lang_idx].partition(u',')[0]
except (AttributeError, ValueError, KeyError,
IndexError, TypeError):
lang = None
val = title_sort(val, order='library_order', lang=lang)
sidx_fm = self.field_metadata[name + '_index']
sidx = record[sidx_fm['rec_index']]
val = (self.string_sort_key(val), sidx)
elif dt in ('text', 'comments', 'composite', 'enumeration'):
if val:
if fm['is_multiple']:
jv = fm['is_multiple']['list_to_ui']
sv = fm['is_multiple']['cache_to_list']
if '&' in jv:
val = jv.join(
[author_to_author_sort(v) for v in val.split(sv)])
else:
val = jv.join(sorted(val.split(sv),
key=self.string_sort_key))
val = self.string_sort_key(val)
elif dt == 'bool':
if not self.db_prefs.get('bools_are_tristate'):
val = {True: 1, False: 2, None: 2}.get(val, 2)
else:
val = {True: 1, False: 2, None: 3}.get(val, 3)
yield val
# }}}
# }}}
| gpl-3.0 |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.5/Lib/encodings/cp500.py | 593 | 13377 | """ Python Character Mapping Codec cp500 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP500.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp500',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x9c' # 0x04 -> CONTROL
u'\t' # 0x05 -> HORIZONTAL TABULATION
u'\x86' # 0x06 -> CONTROL
u'\x7f' # 0x07 -> DELETE
u'\x97' # 0x08 -> CONTROL
u'\x8d' # 0x09 -> CONTROL
u'\x8e' # 0x0A -> CONTROL
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x9d' # 0x14 -> CONTROL
u'\x85' # 0x15 -> CONTROL
u'\x08' # 0x16 -> BACKSPACE
u'\x87' # 0x17 -> CONTROL
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x92' # 0x1A -> CONTROL
u'\x8f' # 0x1B -> CONTROL
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u'\x80' # 0x20 -> CONTROL
u'\x81' # 0x21 -> CONTROL
u'\x82' # 0x22 -> CONTROL
u'\x83' # 0x23 -> CONTROL
u'\x84' # 0x24 -> CONTROL
u'\n' # 0x25 -> LINE FEED
u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
u'\x1b' # 0x27 -> ESCAPE
u'\x88' # 0x28 -> CONTROL
u'\x89' # 0x29 -> CONTROL
u'\x8a' # 0x2A -> CONTROL
u'\x8b' # 0x2B -> CONTROL
u'\x8c' # 0x2C -> CONTROL
u'\x05' # 0x2D -> ENQUIRY
u'\x06' # 0x2E -> ACKNOWLEDGE
u'\x07' # 0x2F -> BELL
u'\x90' # 0x30 -> CONTROL
u'\x91' # 0x31 -> CONTROL
u'\x16' # 0x32 -> SYNCHRONOUS IDLE
u'\x93' # 0x33 -> CONTROL
u'\x94' # 0x34 -> CONTROL
u'\x95' # 0x35 -> CONTROL
u'\x96' # 0x36 -> CONTROL
u'\x04' # 0x37 -> END OF TRANSMISSION
u'\x98' # 0x38 -> CONTROL
u'\x99' # 0x39 -> CONTROL
u'\x9a' # 0x3A -> CONTROL
u'\x9b' # 0x3B -> CONTROL
u'\x14' # 0x3C -> DEVICE CONTROL FOUR
u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
u'\x9e' # 0x3E -> CONTROL
u'\x1a' # 0x3F -> SUBSTITUTE
u' ' # 0x40 -> SPACE
u'\xa0' # 0x41 -> NO-BREAK SPACE
u'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
u'[' # 0x4A -> LEFT SQUARE BRACKET
u'.' # 0x4B -> FULL STOP
u'<' # 0x4C -> LESS-THAN SIGN
u'(' # 0x4D -> LEFT PARENTHESIS
u'+' # 0x4E -> PLUS SIGN
u'!' # 0x4F -> EXCLAMATION MARK
u'&' # 0x50 -> AMPERSAND
u'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
u'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
u'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
u']' # 0x5A -> RIGHT SQUARE BRACKET
u'$' # 0x5B -> DOLLAR SIGN
u'*' # 0x5C -> ASTERISK
u')' # 0x5D -> RIGHT PARENTHESIS
u';' # 0x5E -> SEMICOLON
u'^' # 0x5F -> CIRCUMFLEX ACCENT
u'-' # 0x60 -> HYPHEN-MINUS
u'/' # 0x61 -> SOLIDUS
u'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xa6' # 0x6A -> BROKEN BAR
u',' # 0x6B -> COMMA
u'%' # 0x6C -> PERCENT SIGN
u'_' # 0x6D -> LOW LINE
u'>' # 0x6E -> GREATER-THAN SIGN
u'?' # 0x6F -> QUESTION MARK
u'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
u'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
u'`' # 0x79 -> GRAVE ACCENT
u':' # 0x7A -> COLON
u'#' # 0x7B -> NUMBER SIGN
u'@' # 0x7C -> COMMERCIAL AT
u"'" # 0x7D -> APOSTROPHE
u'=' # 0x7E -> EQUALS SIGN
u'"' # 0x7F -> QUOTATION MARK
u'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
u'a' # 0x81 -> LATIN SMALL LETTER A
u'b' # 0x82 -> LATIN SMALL LETTER B
u'c' # 0x83 -> LATIN SMALL LETTER C
u'd' # 0x84 -> LATIN SMALL LETTER D
u'e' # 0x85 -> LATIN SMALL LETTER E
u'f' # 0x86 -> LATIN SMALL LETTER F
u'g' # 0x87 -> LATIN SMALL LETTER G
u'h' # 0x88 -> LATIN SMALL LETTER H
u'i' # 0x89 -> LATIN SMALL LETTER I
u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
u'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
u'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
u'\xb1' # 0x8F -> PLUS-MINUS SIGN
u'\xb0' # 0x90 -> DEGREE SIGN
u'j' # 0x91 -> LATIN SMALL LETTER J
u'k' # 0x92 -> LATIN SMALL LETTER K
u'l' # 0x93 -> LATIN SMALL LETTER L
u'm' # 0x94 -> LATIN SMALL LETTER M
u'n' # 0x95 -> LATIN SMALL LETTER N
u'o' # 0x96 -> LATIN SMALL LETTER O
u'p' # 0x97 -> LATIN SMALL LETTER P
u'q' # 0x98 -> LATIN SMALL LETTER Q
u'r' # 0x99 -> LATIN SMALL LETTER R
u'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
u'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
u'\xb8' # 0x9D -> CEDILLA
u'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
u'\xa4' # 0x9F -> CURRENCY SIGN
u'\xb5' # 0xA0 -> MICRO SIGN
u'~' # 0xA1 -> TILDE
u's' # 0xA2 -> LATIN SMALL LETTER S
u't' # 0xA3 -> LATIN SMALL LETTER T
u'u' # 0xA4 -> LATIN SMALL LETTER U
u'v' # 0xA5 -> LATIN SMALL LETTER V
u'w' # 0xA6 -> LATIN SMALL LETTER W
u'x' # 0xA7 -> LATIN SMALL LETTER X
u'y' # 0xA8 -> LATIN SMALL LETTER Y
u'z' # 0xA9 -> LATIN SMALL LETTER Z
u'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
u'\xbf' # 0xAB -> INVERTED QUESTION MARK
u'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
u'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
u'\xae' # 0xAF -> REGISTERED SIGN
u'\xa2' # 0xB0 -> CENT SIGN
u'\xa3' # 0xB1 -> POUND SIGN
u'\xa5' # 0xB2 -> YEN SIGN
u'\xb7' # 0xB3 -> MIDDLE DOT
u'\xa9' # 0xB4 -> COPYRIGHT SIGN
u'\xa7' # 0xB5 -> SECTION SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
u'\xac' # 0xBA -> NOT SIGN
u'|' # 0xBB -> VERTICAL LINE
u'\xaf' # 0xBC -> MACRON
u'\xa8' # 0xBD -> DIAERESIS
u'\xb4' # 0xBE -> ACUTE ACCENT
u'\xd7' # 0xBF -> MULTIPLICATION SIGN
u'{' # 0xC0 -> LEFT CURLY BRACKET
u'A' # 0xC1 -> LATIN CAPITAL LETTER A
u'B' # 0xC2 -> LATIN CAPITAL LETTER B
u'C' # 0xC3 -> LATIN CAPITAL LETTER C
u'D' # 0xC4 -> LATIN CAPITAL LETTER D
u'E' # 0xC5 -> LATIN CAPITAL LETTER E
u'F' # 0xC6 -> LATIN CAPITAL LETTER F
u'G' # 0xC7 -> LATIN CAPITAL LETTER G
u'H' # 0xC8 -> LATIN CAPITAL LETTER H
u'I' # 0xC9 -> LATIN CAPITAL LETTER I
u'\xad' # 0xCA -> SOFT HYPHEN
u'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
u'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
u'}' # 0xD0 -> RIGHT CURLY BRACKET
u'J' # 0xD1 -> LATIN CAPITAL LETTER J
u'K' # 0xD2 -> LATIN CAPITAL LETTER K
u'L' # 0xD3 -> LATIN CAPITAL LETTER L
u'M' # 0xD4 -> LATIN CAPITAL LETTER M
u'N' # 0xD5 -> LATIN CAPITAL LETTER N
u'O' # 0xD6 -> LATIN CAPITAL LETTER O
u'P' # 0xD7 -> LATIN CAPITAL LETTER P
u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
u'R' # 0xD9 -> LATIN CAPITAL LETTER R
u'\xb9' # 0xDA -> SUPERSCRIPT ONE
u'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
u'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\\' # 0xE0 -> REVERSE SOLIDUS
u'\xf7' # 0xE1 -> DIVISION SIGN
u'S' # 0xE2 -> LATIN CAPITAL LETTER S
u'T' # 0xE3 -> LATIN CAPITAL LETTER T
u'U' # 0xE4 -> LATIN CAPITAL LETTER U
u'V' # 0xE5 -> LATIN CAPITAL LETTER V
u'W' # 0xE6 -> LATIN CAPITAL LETTER W
u'X' # 0xE7 -> LATIN CAPITAL LETTER X
u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
u'\xb2' # 0xEA -> SUPERSCRIPT TWO
u'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
u'0' # 0xF0 -> DIGIT ZERO
u'1' # 0xF1 -> DIGIT ONE
u'2' # 0xF2 -> DIGIT TWO
u'3' # 0xF3 -> DIGIT THREE
u'4' # 0xF4 -> DIGIT FOUR
u'5' # 0xF5 -> DIGIT FIVE
u'6' # 0xF6 -> DIGIT SIX
u'7' # 0xF7 -> DIGIT SEVEN
u'8' # 0xF8 -> DIGIT EIGHT
u'9' # 0xF9 -> DIGIT NINE
u'\xb3' # 0xFA -> SUPERSCRIPT THREE
u'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
u'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit |
gpfreitas/bokeh | sphinx/source/conf.py | 1 | 11340 | # -*- coding: utf-8 -*-
#
# Bokeh documentation build configuration file, created by
# sphinx-quickstart on Sat Oct 12 23:43:03 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import sphinx_bootstrap_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.graphviz',
'sphinx.ext.ifconfig',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'bokeh.sphinxext.bokeh_autodoc',
'bokeh.sphinxext.bokeh_gallery',
'bokeh.sphinxext.bokeh_github',
'bokeh.sphinxext.bokeh_jinja',
'bokeh.sphinxext.bokeh_model',
'bokeh.sphinxext.bokeh_palette',
'bokeh.sphinxext.bokeh_plot',
'bokeh.sphinxext.bokeh_prop',
'bokeh.sphinxext.collapsible_code_block',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Bokeh'
copyright = u'2013, Continuum Analytics'
# Get the standard computed Bokeh version string to use for |version|
# and |release|
from bokeh import __version__
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# Check for version override (e.g. when re-deploying a previously released
# docs, or when pushing test docs that do not have a corresponding BokehJS
# available on CDN)
from bokeh.settings import settings
if settings.docs_version():
version = release = settings.docs_version()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# Sort members by type
autodoc_member_order = 'groupwise'
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
#'navbar_title': "Demo",
# Tab name for entire site. (Default: "Site")
'navbar_site_name': "Site",
# A list of tuples containing pages or urls to link to.
# Valid tuples should be in the following forms:
# (name, page) # a link to a page
# (name, "/aa/bb", 1) # a link to an arbitrary relative url
# (name, "http://example.com", True) # arbitrary absolute url
# Note the "1" or "True" value above as the third argument to indicate
# an arbitrary url.
'navbar_links': [
("Gallery", "docs/gallery"),
],
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': False,
# Render the current pages TOC in the navbar. (Default: true)
'navbar_pagenav': True,
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 3,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar navbar-inverse",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "false",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "footer",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing with "" (default) or the name of a valid theme
# such as "amelia" or "cosmo".
#'bootswatch_theme': "united",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/bokeh_white_32.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
standard_sidebars = ['sidebartoc.html', 'sourcelink.html', 'searchbox.html']
html_sidebars = {
'*': standard_sidebars,
'docs/*': standard_sidebars,
'docs/dev_guide/**': standard_sidebars,
'docs/reference/**': standard_sidebars,
'docs/tutorials/**': standard_sidebars,
'docs/user_guide/**': standard_sidebars,
'docs/gallery': [],
'docs/gallery/*': [],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Bokehdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Bokeh.tex', u'Bokeh Documentation',
u'Continuum Analytics', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bokeh', u'Bokeh Documentation',
[u'Continuum Analytics'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Bokeh', u'Bokeh Documentation',
u'Continuum Analytics', 'Bokeh', 'Interactive Web Plotting for Python',
'Graphics'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# intersphinx settings
intersphinx_mapping = {'python': ('https://docs.python.org/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None)}
| bsd-3-clause |
lowRISC/ibex | vendor/lowrisc_ip/util/dvsim/dvsim.py | 2 | 26756 | #!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
"""dvsim is a command line tool to deploy ASIC tool flows such as regressions
for design verification (DV), formal property verification (FPV), linting and
synthesis.
It uses hjson as the format for specifying what to build and run. It is an
end-to-end regression manager that can deploy multiple builds (where some tests
might need different set of compile time options requiring a uniquely build sim
executable) in parallel followed by tests in parallel using the load balancer
of your choice.
dvsim is built to be tool-agnostic so that you can easily switch between the
tools at your disposal. dvsim uses fusesoc as the starting step to resolve all
inter-package dependencies and provide us with a filelist that will be consumed
by the sim tool.
"""
import argparse
import datetime
import logging as log
import os
import shlex
import subprocess
import sys
import textwrap
from pathlib import Path
import Launcher
import LauncherFactory
import LocalLauncher
from CfgFactory import make_cfg
from Deploy import RunTest
from Timer import Timer
from utils import (TS_FORMAT, TS_FORMAT_LONG, VERBOSE, rm_path,
run_cmd_with_timeout)
# TODO: add dvsim_cfg.hjson to retrieve this info
version = 0.1
# The different categories that can be passed to the --list argument.
_LIST_CATEGORIES = ["build_modes", "run_modes", "tests", "regressions"]
# Function to resolve the scratch root directory among the available options:
# If set on the command line, then use that as a preference.
# Else, check if $SCRATCH_ROOT env variable exists and is a directory.
# Else use the default (<proj_root>/scratch)
# Try to create the directory if it does not already exist.
def resolve_scratch_root(arg_scratch_root, proj_root):
default_scratch_root = proj_root + "/scratch"
scratch_root = os.environ.get('SCRATCH_ROOT')
if not arg_scratch_root:
if scratch_root is None:
arg_scratch_root = default_scratch_root
else:
# Scratch space could be mounted in a filesystem (such as NFS) on a network drive.
# If the network is down, it could cause the access access check to hang. So run a
# simple ls command with a timeout to prevent the hang.
(out, status) = run_cmd_with_timeout(cmd="ls -d " + scratch_root,
timeout=1,
exit_on_failure=0)
if status == 0 and out != "":
arg_scratch_root = scratch_root
else:
arg_scratch_root = default_scratch_root
log.warning(
"Env variable $SCRATCH_ROOT=\"{}\" is not accessible.\n"
"Using \"{}\" instead.".format(scratch_root,
arg_scratch_root))
else:
arg_scratch_root = os.path.realpath(arg_scratch_root)
try:
os.makedirs(arg_scratch_root, exist_ok=True)
except PermissionError as e:
log.fatal("Failed to create scratch root {}:\n{}.".format(
arg_scratch_root, e))
sys.exit(1)
if not os.access(arg_scratch_root, os.W_OK):
log.fatal("Scratch root {} is not writable!".format(arg_scratch_root))
sys.exit(1)
return arg_scratch_root
def read_max_parallel(arg):
'''Take value for --max-parallel as an integer'''
try:
int_val = int(arg)
if int_val <= 0:
raise ValueError('bad value')
return int_val
except ValueError:
raise argparse.ArgumentTypeError(
'Bad argument for --max-parallel '
'({!r}): must be a positive integer.'.format(arg))
def resolve_max_parallel(arg):
'''Pick a value of max_parallel, defaulting to 16 or $DVSIM_MAX_PARALLEL'''
if arg is not None:
assert arg > 0
return arg
from_env = os.environ.get('DVSIM_MAX_PARALLEL')
if from_env is not None:
try:
return read_max_parallel(from_env)
except argparse.ArgumentTypeError:
log.warning('DVSIM_MAX_PARALLEL environment variable has value '
'{!r}, which is not a positive integer. Using default '
'value (16).'.format(from_env))
return 16
def resolve_branch(branch):
'''Choose a branch name for output files
If the --branch argument was passed on the command line, the branch
argument is the branch name to use. Otherwise it is None and we use git to
find the name of the current branch in the working directory.
'''
if branch is not None:
return branch
result = subprocess.run(["git", "rev-parse", "--abbrev-ref", "HEAD"],
stdout=subprocess.PIPE)
branch = result.stdout.decode("utf-8").strip()
if not branch:
log.warning("Failed to find current git branch. "
"Setting it to \"default\"")
branch = "default"
return branch
# Get the project root directory path - this is used to construct the full paths
def get_proj_root():
cmd = ["git", "rev-parse", "--show-toplevel"]
result = subprocess.run(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proj_root = result.stdout.decode("utf-8").strip()
if not proj_root:
log.error(
"Attempted to find the root of this GitHub repository by running:\n"
"{}\n"
"But this command has failed:\n"
"{}".format(' '.join(cmd), result.stderr.decode("utf-8")))
sys.exit(1)
return (proj_root)
def resolve_proj_root(args):
'''Update proj_root based on how DVSim is invoked.
If --remote switch is set, a location in the scratch area is chosen as the
new proj_root. The entire repo is copied over to this location. Else, the
proj_root is discovered using get_proj_root() method, unless the user
overrides it on the command line.
This function returns the updated proj_root src and destination path. If
--remote switch is not set, the destination path is identical to the src
path. Likewise, if --dry-run is set.
'''
proj_root_src = args.proj_root or get_proj_root()
# Check if jobs are dispatched to external compute machines. If yes,
# then the repo needs to be copied over to the scratch area
# accessible to those machines.
# If --purge arg is set, then purge the repo_top that was copied before.
if args.remote and not args.dry_run:
proj_root_dest = os.path.join(args.scratch_root, args.branch,
"repo_top")
if args.purge:
rm_path(proj_root_dest)
copy_repo(proj_root_src, proj_root_dest)
else:
proj_root_dest = proj_root_src
return proj_root_src, proj_root_dest
def copy_repo(src, dest):
'''Copy over the repo to a new location.
The repo is copied over from src to dest area. It tentatively uses the
rsync utility which provides the ability to specify a file containing some
exclude patterns to skip certain things from being copied over. With GitHub
repos, an existing `.gitignore` serves this purpose pretty well.
'''
rsync_cmd = [
"rsync", "--recursive", "--links", "--checksum", "--update",
"--inplace", "--no-group"
]
# Supply `.gitignore` from the src area to skip temp files.
ignore_patterns_file = os.path.join(src, ".gitignore")
if os.path.exists(ignore_patterns_file):
# TODO: hack - include hw/foundry since it is excluded in .gitignore.
rsync_cmd += [
"--include=hw/foundry",
"--exclude-from={}".format(ignore_patterns_file), "--exclude=.*"
]
rsync_cmd += [src + "/.", dest]
rsync_str = ' '.join([shlex.quote(w) for w in rsync_cmd])
cmd = ["flock", "--timeout", "600", dest, "--command", rsync_str]
log.info("[copy_repo] [dest]: %s", dest)
log.log(VERBOSE, "[copy_repo] [cmd]: \n%s", ' '.join(cmd))
# Make sure the dest exists first.
os.makedirs(dest, exist_ok=True)
try:
subprocess.run(cmd,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except subprocess.CalledProcessError as e:
log.error("Failed to copy over %s to %s: %s", src, dest,
e.stderr.decode("utf-8").strip())
log.info("Done.")
def wrapped_docstring():
'''Return a text-wrapped version of the module docstring'''
paras = []
para = []
for line in __doc__.strip().split('\n'):
line = line.strip()
if not line:
if para:
paras.append('\n'.join(para))
para = []
else:
para.append(line)
if para:
paras.append('\n'.join(para))
return '\n\n'.join(textwrap.fill(p) for p in paras)
def parse_args():
parser = argparse.ArgumentParser(
description=wrapped_docstring(),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("cfg",
metavar="<cfg-hjson-file>",
help="""Configuration hjson file.""")
parser.add_argument("--version",
action='store_true',
help="Print version and exit")
parser.add_argument(
"--tool",
"-t",
help=("Explicitly set the tool to use. This is "
"optional for running simulations (where it can "
"be set in an .hjson file), but is required for "
"other flows. Possible tools include: vcs, "
"xcelium, ascentlint, veriblelint, verilator, dc."))
parser.add_argument("--list",
"-l",
nargs="*",
metavar='CAT',
choices=_LIST_CATEGORIES,
help=('Parse the the given .hjson config file, list '
'the things that can be run, then exit. The '
'list can be filtered with a space-separated '
'of categories from: {}.'.format(
', '.join(_LIST_CATEGORIES))))
whatg = parser.add_argument_group('Choosing what to run')
whatg.add_argument("-i",
"--items",
nargs="*",
default=["smoke"],
help=('Specify the regressions or tests to run. '
'Defaults to "smoke", but can be a '
'space separated list of test or regression '
'names.'))
whatg.add_argument("--select-cfgs",
nargs="*",
metavar="CFG",
help=('The .hjson file is a primary config. Only run '
'the given configs from it. If this argument is '
'not used, dvsim will process all configs listed '
'in a primary config.'))
disg = parser.add_argument_group('Dispatch options')
disg.add_argument("--job-prefix",
default="",
metavar="PFX",
help=('Prepend this string when running each tool '
'command.'))
disg.add_argument("--local",
action='store_true',
help=('Force jobs to be dispatched locally onto user\'s '
'machine.'))
disg.add_argument("--remote",
action='store_true',
help=('Trigger copying of the repo to scratch area.'))
disg.add_argument("--max-parallel",
"-mp",
type=read_max_parallel,
metavar="N",
help=('Run only up to N builds/tests at a time. '
'Default value 16, unless the DVSIM_MAX_PARALLEL '
'environment variable is set, in which case that '
'is used. Only applicable when launching jobs '
'locally.'))
pathg = parser.add_argument_group('File management')
pathg.add_argument("--scratch-root",
"-sr",
metavar="PATH",
help=('Destination for build / run directories. If not '
'specified, uses the path in the SCRATCH_ROOT '
'environment variable, if set, or ./scratch '
'otherwise.'))
pathg.add_argument("--proj-root",
"-pr",
metavar="PATH",
help=('The root directory of the project. If not '
'specified, dvsim will search for a git '
'repository containing the current directory.'))
pathg.add_argument("--branch",
"-br",
metavar='B',
help=('By default, dvsim creates files below '
'{scratch-root}/{dut}.{flow}.{tool}/{branch}. '
'If --branch is not specified, dvsim assumes the '
'current directory is a git repository and uses '
'the name of the current branch.'))
pathg.add_argument("--max-odirs",
"-mo",
type=int,
default=5,
metavar="N",
help=('When tests are run, older runs are backed '
'up. Discard all but the N most recent (defaults '
'to 5).'))
pathg.add_argument("--purge",
action='store_true',
help="Clean the scratch directory before running.")
buildg = parser.add_argument_group('Options for building')
buildg.add_argument("--build-only",
"-bu",
action='store_true',
help=('Stop after building executables for the given '
'items.'))
buildg.add_argument("--build-unique",
action='store_true',
help=('Append a timestamp to the directory in which '
'files are built. This is suitable for the case '
'when another test is already running and you '
'want to run something else from a different '
'terminal without affecting it.'))
buildg.add_argument("--build-opts",
"-bo",
nargs="+",
default=[],
metavar="OPT",
help=('Additional options passed on the command line '
'each time a build tool is run.'))
buildg.add_argument("--build-modes",
"-bm",
nargs="+",
default=[],
metavar="MODE",
help=('The options for each build_mode in this list '
'are applied to all build and run targets.'))
disg.add_argument("--gui",
action='store_true',
help=('Run the flow in interactive mode instead of the '
'batch mode.'))
rung = parser.add_argument_group('Options for running')
rung.add_argument("--run-only",
"-ru",
action='store_true',
help=('Skip the build step (assume that simulation '
'executables have already been built).'))
rung.add_argument("--run-opts",
"-ro",
nargs="+",
default=[],
metavar="OPT",
help=('Additional options passed on the command line '
'each time a test is run.'))
rung.add_argument("--run-modes",
"-rm",
nargs="+",
default=[],
metavar="MODE",
help=('The options for each run_mode in this list are '
'applied to each simulation run.'))
rung.add_argument("--profile",
"-p",
nargs="?",
choices=['time', 'mem'],
const="time",
metavar="P",
help=('Turn on simulation profiling (where P is time '
'or mem).'))
rung.add_argument("--xprop-off",
action='store_true',
help="Turn off X-propagation in simulation.")
rung.add_argument("--no-rerun",
action='store_true',
help=("Disable the default behaviour, where failing "
"tests are automatically rerun with waves "
"enabled."))
rung.add_argument("--verbosity",
"-v",
choices=['n', 'l', 'm', 'h', 'f', 'd'],
metavar='V',
help=('Set tool/simulation verbosity to none (n), low '
'(l), medium (m), high (h), full (f) or debug (d).'
' The default value is set in config files.'))
seedg = parser.add_argument_group('Test seeds')
seedg.add_argument("--seeds",
"-s",
nargs="+",
default=[],
metavar="S",
help=('A list of seeds for tests. Note that these '
'specific seeds are applied to items being run '
'in the order they are passed.'))
seedg.add_argument("--fixed-seed",
type=int,
metavar='S',
help=('Run all items with the seed S. This implies '
'--reseed 1.'))
seedg.add_argument("--reseed",
"-r",
type=int,
metavar="N",
help=('Override any reseed value in the test '
'configuration and run each test N times, with '
'a new seed each time.'))
seedg.add_argument("--reseed-multiplier",
"-rx",
type=int,
default=1,
metavar="N",
help=('Scale each reseed value in the test '
'configuration by N. This allows e.g. running '
'the tests 10 times as much as normal while '
'maintaining the ratio of numbers of runs '
'between different tests.'))
waveg = parser.add_argument_group('Dumping waves')
waveg.add_argument(
"--waves",
"-w",
nargs="?",
choices=["default", "fsdb", "shm", "vpd", "vcd", "evcd", "fst"],
const="default",
help=("Enable dumping of waves. It takes an optional "
"argument to pick the desired wave format. If "
"the optional argument is not supplied, it picks "
"whatever is the default for the chosen tool. "
"By default, dumping waves is not enabled."))
waveg.add_argument("--max-waves",
"-mw",
type=int,
default=5,
metavar="N",
help=('Only dump waves for the first N tests run. This '
'includes both tests scheduled for run and those '
'that are automatically rerun.'))
covg = parser.add_argument_group('Generating simulation coverage')
covg.add_argument("--cov",
"-c",
action='store_true',
help="Enable collection of coverage data.")
covg.add_argument("--cov-merge-previous",
action='store_true',
help=('Only applicable with --cov. Merge any previous '
'coverage database directory with the new '
'coverage database.'))
covg.add_argument("--cov-unr",
action='store_true',
help=('Run coverage UNR analysis and generate report. '
'This only supports VCS now.'))
covg.add_argument("--cov-analyze",
action='store_true',
help=('Rather than building or running any tests, '
'analyze the coverage from the last run.'))
pubg = parser.add_argument_group('Generating and publishing results')
pubg.add_argument("--map-full-testplan",
action='store_true',
help=("Show complete testplan annotated results "
"at the end."))
pubg.add_argument("--publish",
action='store_true',
help="Publish results to reports.opentitan.org.")
dvg = parser.add_argument_group('Controlling DVSim itself')
dvg.add_argument("--print-interval",
"-pi",
type=int,
default=10,
metavar="N",
help="Print status every N seconds.")
dvg.add_argument("--verbose",
nargs="?",
choices=['default', 'debug'],
const="default",
metavar="D",
help=('With no argument, print verbose dvsim tool '
'messages. With --verbose=debug, the volume of '
'messages is even higher.'))
dvg.add_argument("--dry-run",
"-n",
action='store_true',
help=("Print dvsim tool messages but don't actually "
"run any command"))
args = parser.parse_args()
if args.version:
print(version)
sys.exit()
# We want the --list argument to default to "all categories", but allow
# filtering. If args.list is None, then --list wasn't supplied. If it is
# [], then --list was supplied with no further arguments and we want to
# list all categories.
if args.list == []:
args.list = _LIST_CATEGORIES
# Get max_parallel from environment if it wasn't specified on the command
# line.
args.max_parallel = resolve_max_parallel(args.max_parallel)
assert args.max_parallel > 0
return args
def main():
args = parse_args()
# Add log level 'VERBOSE' between INFO and DEBUG
log.addLevelName(VERBOSE, 'VERBOSE')
log_format = '%(levelname)s: [%(module)s] %(message)s'
log_level = log.INFO
if args.verbose == "default":
log_level = VERBOSE
elif args.verbose == "debug":
log_level = log.DEBUG
log.basicConfig(format=log_format, level=log_level)
if not os.path.exists(args.cfg):
log.fatal("Path to config file %s appears to be invalid.", args.cfg)
sys.exit(1)
# If publishing results, then force full testplan mapping of results.
if args.publish:
args.map_full_testplan = True
args.branch = resolve_branch(args.branch)
proj_root_src, proj_root = resolve_proj_root(args)
args.scratch_root = resolve_scratch_root(args.scratch_root, proj_root)
log.info("[proj_root]: %s", proj_root)
# Create an empty FUSESOC_IGNORE file in scratch_root. This ensures that
# any fusesoc invocation from a job won't search within scratch_root for
# core files.
(Path(args.scratch_root) / 'FUSESOC_IGNORE').touch()
args.cfg = os.path.abspath(args.cfg)
if args.remote:
cfg_path = args.cfg.replace(proj_root_src + "/", "")
args.cfg = os.path.join(proj_root, cfg_path)
# Add timestamp to args that all downstream objects can use.
curr_ts = datetime.datetime.utcnow()
setattr(args, "timestamp_long", curr_ts.strftime(TS_FORMAT_LONG))
setattr(args, "timestamp", curr_ts.strftime(TS_FORMAT))
# Register the seeds from command line with RunTest class.
RunTest.seeds = args.seeds
# If we are fixing a seed value, no point in tests having multiple reseeds.
if args.fixed_seed:
args.reseed = 1
RunTest.fixed_seed = args.fixed_seed
# Register the common deploy settings.
Timer.print_interval = args.print_interval
LocalLauncher.LocalLauncher.max_parallel = args.max_parallel
Launcher.Launcher.max_odirs = args.max_odirs
LauncherFactory.set_launcher_type(args.local)
# Build infrastructure from hjson file and create the list of items to
# be deployed.
global cfg
cfg = make_cfg(args.cfg, args, proj_root)
# List items available for run if --list switch is passed, and exit.
if args.list is not None:
cfg.print_list()
sys.exit(0)
# Purge the scratch path if --purge option is set.
if args.purge:
cfg.purge()
# If --cov-unr is passed, run UNR to generate report for unreachable
# exclusion file.
if args.cov_unr:
cfg.cov_unr()
cfg.deploy_objects()
sys.exit(0)
# In simulation mode: if --cov-analyze switch is passed, then run the GUI
# tool.
if args.cov_analyze:
cfg.cov_analyze()
cfg.deploy_objects()
sys.exit(0)
# Deploy the builds and runs
if args.items != []:
# Create deploy objects.
cfg.create_deploy_objects()
results = cfg.deploy_objects()
# Generate results.
cfg.gen_results(results)
# Publish results
if args.publish:
cfg.publish_results()
else:
log.info("No items specified to be run.")
# Exit with non-zero status if there were errors or failures.
if cfg.has_errors():
log.error("Errors were encountered in this run.")
sys.exit(1)
if __name__ == '__main__':
main()
| apache-2.0 |
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/mne-python-0.10/mne/io/brainvision/tests/test_brainvision.py | 2 | 8037 | """Data Equivalence Tests"""
from __future__ import print_function
# Author: Teon Brooks <teon.brooks@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import inspect
from nose.tools import assert_equal, assert_raises, assert_true
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose)
from mne.utils import _TempDir, run_tests_if_main
from mne import pick_types, concatenate_raws, find_events
from mne.io.constants import FIFF
from mne.io import Raw, read_raw_brainvision
FILE = inspect.getfile(inspect.currentframe())
data_dir = op.join(op.dirname(op.abspath(FILE)), 'data')
vhdr_path = op.join(data_dir, 'test.vhdr')
vmrk_path = op.join(data_dir, 'test.vmrk')
vhdr_highpass_path = op.join(data_dir, 'test_highpass.vhdr')
montage = op.join(data_dir, 'test.hpts')
eeg_bin = op.join(data_dir, 'test_bin_raw.fif')
eog = ['HL', 'HR', 'Vb']
def test_brainvision_data_filters():
"""Test reading raw Brain Vision files
"""
raw = read_raw_brainvision(vhdr_highpass_path, montage, eog=eog,
preload=True)
assert_equal(raw.info['highpass'], 0.1)
assert_equal(raw.info['lowpass'], 250.)
raw.info["lowpass"] = None
raw.filter(1, 30)
def test_brainvision_data():
"""Test reading raw Brain Vision files
"""
assert_raises(IOError, read_raw_brainvision, vmrk_path)
assert_raises(ValueError, read_raw_brainvision, vhdr_path, montage,
preload=True, scale="foo")
raw_py = read_raw_brainvision(vhdr_path, montage, eog=eog, preload=True)
raw_py.load_data() # currently does nothing
assert_true('RawBrainVision' in repr(raw_py))
assert_equal(raw_py.info['highpass'], 0.)
assert_equal(raw_py.info['lowpass'], 250.)
picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')
data_py, times_py = raw_py[picks]
print(raw_py) # to test repr
print(raw_py.info) # to test Info repr
# compare with a file that was generated using MNE-C
raw_bin = Raw(eeg_bin, preload=True)
picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')
data_bin, times_bin = raw_bin[picks]
assert_array_almost_equal(data_py, data_bin)
assert_array_almost_equal(times_py, times_bin)
# Make sure EOG channels are marked correctly
raw_py = read_raw_brainvision(vhdr_path, montage, eog=eog,
preload=True)
for ch in raw_py.info['chs']:
if ch['ch_name'] in eog:
assert_equal(ch['kind'], FIFF.FIFFV_EOG_CH)
elif ch['ch_name'] == 'STI 014':
assert_equal(ch['kind'], FIFF.FIFFV_STIM_CH)
elif ch['ch_name'] in raw_py.info['ch_names']:
assert_equal(ch['kind'], FIFF.FIFFV_EEG_CH)
else:
raise RuntimeError("Unknown Channel: %s" % ch['ch_name'])
# Make sure concatenation works
raw_concat = concatenate_raws([raw_py.copy(), raw_py])
assert_equal(raw_concat.n_times, 2 * raw_py.n_times)
def test_events():
"""Test reading and modifying events"""
tempdir = _TempDir()
# check that events are read and stim channel is synthesized correcly
raw = read_raw_brainvision(vhdr_path, eog=eog, preload=True)
events = raw.get_brainvision_events()
assert_array_equal(events, [[487, 1, 253],
[497, 1, 255],
[1770, 1, 254],
[1780, 1, 255],
[3253, 1, 254],
[3263, 1, 255],
[4936, 1, 253],
[4946, 1, 255],
[6000, 1, 255],
[6620, 1, 254],
[6630, 1, 255]])
# check that events are read and stim channel is synthesized correcly and
# response triggers are shifted like they're supposed to be.
raw = read_raw_brainvision(vhdr_path, eog=eog, preload=True,
response_trig_shift=1000)
events = raw.get_brainvision_events()
assert_array_equal(events, [[487, 1, 253],
[497, 1, 255],
[1770, 1, 254],
[1780, 1, 255],
[3253, 1, 254],
[3263, 1, 255],
[4936, 1, 253],
[4946, 1, 255],
[6000, 1, 1255],
[6620, 1, 254],
[6630, 1, 255]])
# check that events are read and stim channel is synthesized correcly and
# response triggers are ignored.
raw = read_raw_brainvision(vhdr_path, eog=eog, preload=True,
response_trig_shift=None)
events = raw.get_brainvision_events()
assert_array_equal(events, [[487, 1, 253],
[497, 1, 255],
[1770, 1, 254],
[1780, 1, 255],
[3253, 1, 254],
[3263, 1, 255],
[4936, 1, 253],
[4946, 1, 255],
[6620, 1, 254],
[6630, 1, 255]])
assert_raises(TypeError, read_raw_brainvision, vhdr_path, eog=eog,
preload=True, response_trig_shift=0.1)
assert_raises(TypeError, read_raw_brainvision, vhdr_path, eog=eog,
preload=True, response_trig_shift=np.nan)
mne_events = find_events(raw, stim_channel='STI 014')
assert_array_equal(events[:, [0, 2]], mne_events[:, [0, 2]])
# modify events and check that stim channel is updated
index = events[:, 2] == 255
events = events[index]
raw.set_brainvision_events(events)
mne_events = find_events(raw, stim_channel='STI 014')
assert_array_equal(events[:, [0, 2]], mne_events[:, [0, 2]])
# remove events
nchan = raw.info['nchan']
ch_name = raw.info['chs'][-2]['ch_name']
events = np.empty((0, 3))
raw.set_brainvision_events(events)
assert_equal(raw.info['nchan'], nchan)
assert_equal(len(raw._data), nchan)
assert_equal(raw.info['chs'][-2]['ch_name'], ch_name)
assert_equal(len(find_events(raw, 'STI 014')), 0)
assert_allclose(raw[-1][0], 0.)
fname = op.join(tempdir, 'evt_raw.fif')
raw.save(fname)
# add events back in
events = [[10, 1, 2]]
raw.set_brainvision_events(events)
assert_equal(raw.info['nchan'], nchan)
assert_equal(len(raw._data), nchan)
assert_equal(raw.info['chs'][-1]['ch_name'], 'STI 014')
def test_read_segment():
"""Test writing raw eeg files when preload is False
"""
tempdir = _TempDir()
raw1 = read_raw_brainvision(vhdr_path, eog=eog, preload=False)
raw1_file = op.join(tempdir, 'test1-raw.fif')
raw1.save(raw1_file, overwrite=True)
raw11 = Raw(raw1_file, preload=True)
data1, times1 = raw1[:, :]
data11, times11 = raw11[:, :]
assert_array_almost_equal(data1, data11, 8)
assert_array_almost_equal(times1, times11)
assert_equal(sorted(raw1.info.keys()), sorted(raw11.info.keys()))
raw2 = read_raw_brainvision(vhdr_path, eog=eog, preload=True)
raw2_file = op.join(tempdir, 'test2-raw.fif')
raw2.save(raw2_file, overwrite=True)
data2, times2 = raw2[:, :]
assert_array_equal(data1, data2)
assert_array_equal(times1, times2)
raw1 = Raw(raw1_file, preload=True)
raw2 = Raw(raw2_file, preload=True)
assert_array_equal(raw1._data, raw2._data)
# save with buffer size smaller than file
raw3_file = op.join(tempdir, 'test3-raw.fif')
raw3 = read_raw_brainvision(vhdr_path, eog=eog)
raw3.save(raw3_file, buffer_size_sec=2)
raw3 = Raw(raw3_file, preload=True)
assert_array_equal(raw3._data, raw1._data)
run_tests_if_main()
| bsd-3-clause |
ojengwa/oh-mainline | vendor/packages/scrapy/scrapy/xlib/ordereddict.py | 254 | 4221 | # Copyright (c) 2009 Raymond Hettinger
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from UserDict import DictMixin
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
if len(self) != len(other):
return False
for p, q in zip(self.items(), other.items()):
if p != q:
return False
return True
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
| agpl-3.0 |
ychfan/tensorflow | tensorflow/contrib/keras/api/keras/applications/vgg19/__init__.py | 74 | 1127 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""VGG19 Keras application."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras._impl.keras.applications.vgg19 import decode_predictions
from tensorflow.python.keras._impl.keras.applications.vgg19 import preprocess_input
from tensorflow.python.keras._impl.keras.applications.vgg19 import VGG19
del absolute_import
del division
del print_function
| apache-2.0 |
izonder/intellij-community | python/lib/Lib/BaseHTTPServer.py | 91 | 21289 | """HTTP server base class.
Note: the class in this module doesn't implement any HTTP request; see
SimpleHTTPServer for simple implementations of GET, HEAD and POST
(including CGI scripts). It does, however, optionally implement HTTP/1.1
persistent connections, as of version 0.3.
Contents:
- BaseHTTPRequestHandler: HTTP request handler base class
- test: test function
XXX To do:
- log requests even later (to capture byte count)
- log user-agent header and other interesting goodies
- send error log to separate file
"""
# See also:
#
# HTTP Working Group T. Berners-Lee
# INTERNET-DRAFT R. T. Fielding
# <draft-ietf-http-v10-spec-00.txt> H. Frystyk Nielsen
# Expires September 8, 1995 March 8, 1995
#
# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt
#
# and
#
# Network Working Group R. Fielding
# Request for Comments: 2616 et al
# Obsoletes: 2068 June 1999
# Category: Standards Track
#
# URL: http://www.faqs.org/rfcs/rfc2616.html
# Log files
# ---------
#
# Here's a quote from the NCSA httpd docs about log file format.
#
# | The logfile format is as follows. Each line consists of:
# |
# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb
# |
# | host: Either the DNS name or the IP number of the remote client
# | rfc931: Any information returned by identd for this person,
# | - otherwise.
# | authuser: If user sent a userid for authentication, the user name,
# | - otherwise.
# | DD: Day
# | Mon: Month (calendar name)
# | YYYY: Year
# | hh: hour (24-hour format, the machine's timezone)
# | mm: minutes
# | ss: seconds
# | request: The first line of the HTTP request as sent by the client.
# | ddd: the status code returned by the server, - if not available.
# | bbbb: the total number of bytes sent,
# | *not including the HTTP/1.0 header*, - if not available
# |
# | You can determine the name of the file accessed through request.
#
# (Actually, the latter is only true if you know the server configuration
# at the time the request was made!)
__version__ = "0.3"
__all__ = ["HTTPServer", "BaseHTTPRequestHandler"]
import sys
import time
import socket # For gethostbyaddr()
import mimetools
import SocketServer
# Default error message
DEFAULT_ERROR_MESSAGE = """\
<head>
<title>Error response</title>
</head>
<body>
<h1>Error response</h1>
<p>Error code %(code)d.
<p>Message: %(message)s.
<p>Error code explanation: %(code)s = %(explain)s.
</body>
"""
def _quote_html(html):
return html.replace("&", "&").replace("<", "<").replace(">", ">")
class HTTPServer(SocketServer.TCPServer):
allow_reuse_address = 1 # Seems to make sense in testing environment
def server_bind(self):
"""Override server_bind to store the server name."""
SocketServer.TCPServer.server_bind(self)
host, port = self.socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
class BaseHTTPRequestHandler(SocketServer.StreamRequestHandler):
"""HTTP request handler base class.
The following explanation of HTTP serves to guide you through the
code as well as to expose any misunderstandings I may have about
HTTP (so you don't need to read the code to figure out I'm wrong
:-).
HTTP (HyperText Transfer Protocol) is an extensible protocol on
top of a reliable stream transport (e.g. TCP/IP). The protocol
recognizes three parts to a request:
1. One line identifying the request type and path
2. An optional set of RFC-822-style headers
3. An optional data part
The headers and data are separated by a blank line.
The first line of the request has the form
<command> <path> <version>
where <command> is a (case-sensitive) keyword such as GET or POST,
<path> is a string containing path information for the request,
and <version> should be the string "HTTP/1.0" or "HTTP/1.1".
<path> is encoded using the URL encoding scheme (using %xx to signify
the ASCII character with hex code xx).
The specification specifies that lines are separated by CRLF but
for compatibility with the widest range of clients recommends
servers also handle LF. Similarly, whitespace in the request line
is treated sensibly (allowing multiple spaces between components
and allowing trailing whitespace).
Similarly, for output, lines ought to be separated by CRLF pairs
but most clients grok LF characters just fine.
If the first line of the request has the form
<command> <path>
(i.e. <version> is left out) then this is assumed to be an HTTP
0.9 request; this form has no optional headers and data part and
the reply consists of just the data.
The reply form of the HTTP 1.x protocol again has three parts:
1. One line giving the response code
2. An optional set of RFC-822-style headers
3. The data
Again, the headers and data are separated by a blank line.
The response code line has the form
<version> <responsecode> <responsestring>
where <version> is the protocol version ("HTTP/1.0" or "HTTP/1.1"),
<responsecode> is a 3-digit response code indicating success or
failure of the request, and <responsestring> is an optional
human-readable string explaining what the response code means.
This server parses the request and the headers, and then calls a
function specific to the request type (<command>). Specifically,
a request SPAM will be handled by a method do_SPAM(). If no
such method exists the server sends an error response to the
client. If it exists, it is called with no arguments:
do_SPAM()
Note that the request name is case sensitive (i.e. SPAM and spam
are different requests).
The various request details are stored in instance variables:
- client_address is the client IP address in the form (host,
port);
- command, path and version are the broken-down request line;
- headers is an instance of mimetools.Message (or a derived
class) containing the header information;
- rfile is a file object open for reading positioned at the
start of the optional input data part;
- wfile is a file object open for writing.
IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING!
The first thing to be written must be the response line. Then
follow 0 or more header lines, then a blank line, and then the
actual data (if any). The meaning of the header lines depends on
the command executed by the server; in most cases, when data is
returned, there should be at least one header line of the form
Content-type: <type>/<subtype>
where <type> and <subtype> should be registered MIME types,
e.g. "text/html" or "text/plain".
"""
# The Python system version, truncated to its first component.
sys_version = "Python/" + sys.version.split()[0]
# The server software version. You may want to override this.
# The format is multiple whitespace-separated strings,
# where each string is of the form name[/version].
server_version = "BaseHTTP/" + __version__
def parse_request(self):
"""Parse a request (internal).
The request should be stored in self.raw_requestline; the results
are in self.command, self.path, self.request_version and
self.headers.
Return True for success, False for failure; on failure, an
error is sent back.
"""
self.command = None # set in case of error on the first line
self.request_version = version = "HTTP/0.9" # Default
self.close_connection = 1
requestline = self.raw_requestline
if requestline[-2:] == '\r\n':
requestline = requestline[:-2]
elif requestline[-1:] == '\n':
requestline = requestline[:-1]
self.requestline = requestline
words = requestline.split()
if len(words) == 3:
[command, path, version] = words
if version[:5] != 'HTTP/':
self.send_error(400, "Bad request version (%r)" % version)
return False
try:
base_version_number = version.split('/', 1)[1]
version_number = base_version_number.split(".")
# RFC 2145 section 3.1 says there can be only one "." and
# - major and minor numbers MUST be treated as
# separate integers;
# - HTTP/2.4 is a lower version than HTTP/2.13, which in
# turn is lower than HTTP/12.3;
# - Leading zeros MUST be ignored by recipients.
if len(version_number) != 2:
raise ValueError
version_number = int(version_number[0]), int(version_number[1])
except (ValueError, IndexError):
self.send_error(400, "Bad request version (%r)" % version)
return False
if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1":
self.close_connection = 0
if version_number >= (2, 0):
self.send_error(505,
"Invalid HTTP Version (%s)" % base_version_number)
return False
elif len(words) == 2:
[command, path] = words
self.close_connection = 1
if command != 'GET':
self.send_error(400,
"Bad HTTP/0.9 request type (%r)" % command)
return False
elif not words:
return False
else:
self.send_error(400, "Bad request syntax (%r)" % requestline)
return False
self.command, self.path, self.request_version = command, path, version
# Examine the headers and look for a Connection directive
self.headers = self.MessageClass(self.rfile, 0)
conntype = self.headers.get('Connection', "")
if conntype.lower() == 'close':
self.close_connection = 1
elif (conntype.lower() == 'keep-alive' and
self.protocol_version >= "HTTP/1.1"):
self.close_connection = 0
return True
def handle_one_request(self):
"""Handle a single HTTP request.
You normally don't need to override this method; see the class
__doc__ string for information on how to handle specific HTTP
commands such as GET and POST.
"""
self.raw_requestline = self.rfile.readline()
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request(): # An error code has been sent, just exit
return
mname = 'do_' + self.command
if not hasattr(self, mname):
self.send_error(501, "Unsupported method (%r)" % self.command)
return
method = getattr(self, mname)
method()
def handle(self):
"""Handle multiple requests if necessary."""
self.close_connection = 1
self.handle_one_request()
while not self.close_connection:
self.handle_one_request()
def send_error(self, code, message=None):
"""Send and log an error reply.
Arguments are the error code, and a detailed message.
The detailed message defaults to the short entry matching the
response code.
This sends an error response (so it must be called before any
output has been generated), logs the error, and finally sends
a piece of HTML explaining the error to the user.
"""
try:
short, long = self.responses[code]
except KeyError:
short, long = '???', '???'
if message is None:
message = short
explain = long
self.log_error("code %d, message %s", code, message)
# using _quote_html to prevent Cross Site Scripting attacks (see bug #1100201)
content = (self.error_message_format %
{'code': code, 'message': _quote_html(message), 'explain': explain})
self.send_response(code, message)
self.send_header("Content-Type", "text/html")
self.send_header('Connection', 'close')
self.end_headers()
if self.command != 'HEAD' and code >= 200 and code not in (204, 304):
self.wfile.write(content)
error_message_format = DEFAULT_ERROR_MESSAGE
def send_response(self, code, message=None):
"""Send the response header and log the response code.
Also send two standard headers with the server software
version and the current date.
"""
self.log_request(code)
if message is None:
if code in self.responses:
message = self.responses[code][0]
else:
message = ''
if self.request_version != 'HTTP/0.9':
self.wfile.write("%s %d %s\r\n" %
(self.protocol_version, code, message))
# print (self.protocol_version, code, message)
self.send_header('Server', self.version_string())
self.send_header('Date', self.date_time_string())
def send_header(self, keyword, value):
"""Send a MIME header."""
if self.request_version != 'HTTP/0.9':
self.wfile.write("%s: %s\r\n" % (keyword, value))
if keyword.lower() == 'connection':
if value.lower() == 'close':
self.close_connection = 1
elif value.lower() == 'keep-alive':
self.close_connection = 0
def end_headers(self):
"""Send the blank line ending the MIME headers."""
if self.request_version != 'HTTP/0.9':
self.wfile.write("\r\n")
def log_request(self, code='-', size='-'):
"""Log an accepted request.
This is called by send_response().
"""
self.log_message('"%s" %s %s',
self.requestline, str(code), str(size))
def log_error(self, *args):
"""Log an error.
This is called when a request cannot be fulfilled. By
default it passes the message on to log_message().
Arguments are the same as for log_message().
XXX This should go to the separate error log.
"""
self.log_message(*args)
def log_message(self, format, *args):
"""Log an arbitrary message.
This is used by all other logging functions. Override
it if you have specific logging wishes.
The first argument, FORMAT, is a format string for the
message to be logged. If the format string contains
any % escapes requiring parameters, they should be
specified as subsequent arguments (it's just like
printf!).
The client host and current date/time are prefixed to
every message.
"""
sys.stderr.write("%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
format%args))
def version_string(self):
"""Return the server software version string."""
return self.server_version + ' ' + self.sys_version
def date_time_string(self, timestamp=None):
"""Return the current date and time formatted for a message header."""
if timestamp is None:
timestamp = time.time()
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp)
s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
self.weekdayname[wd],
day, self.monthname[month], year,
hh, mm, ss)
return s
def log_date_time_string(self):
"""Return the current time formatted for logging."""
now = time.time()
year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
s = "%02d/%3s/%04d %02d:%02d:%02d" % (
day, self.monthname[month], year, hh, mm, ss)
return s
weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def address_string(self):
"""Return the client address formatted for logging.
This version looks up the full hostname using gethostbyaddr(),
and tries to find a name that contains at least one dot.
"""
host, port = self.client_address[:2]
return socket.getfqdn(host)
# Essentially static class variables
# The version of the HTTP protocol we support.
# Set this to HTTP/1.1 to enable automatic keepalive
protocol_version = "HTTP/1.0"
# The Message-like class used to parse headers
MessageClass = mimetools.Message
# Table mapping response codes to messages; entries have the
# form {code: (shortmessage, longmessage)}.
# See RFC 2616.
responses = {
100: ('Continue', 'Request received, please continue'),
101: ('Switching Protocols',
'Switching to new protocol; obey Upgrade header'),
200: ('OK', 'Request fulfilled, document follows'),
201: ('Created', 'Document created, URL follows'),
202: ('Accepted',
'Request accepted, processing continues off-line'),
203: ('Non-Authoritative Information', 'Request fulfilled from cache'),
204: ('No Content', 'Request fulfilled, nothing follows'),
205: ('Reset Content', 'Clear input form for further input.'),
206: ('Partial Content', 'Partial content follows.'),
300: ('Multiple Choices',
'Object has several resources -- see URI list'),
301: ('Moved Permanently', 'Object moved permanently -- see URI list'),
302: ('Found', 'Object moved temporarily -- see URI list'),
303: ('See Other', 'Object moved -- see Method and URL list'),
304: ('Not Modified',
'Document has not changed since given time'),
305: ('Use Proxy',
'You must use proxy specified in Location to access this '
'resource.'),
307: ('Temporary Redirect',
'Object moved temporarily -- see URI list'),
400: ('Bad Request',
'Bad request syntax or unsupported method'),
401: ('Unauthorized',
'No permission -- see authorization schemes'),
402: ('Payment Required',
'No payment -- see charging schemes'),
403: ('Forbidden',
'Request forbidden -- authorization will not help'),
404: ('Not Found', 'Nothing matches the given URI'),
405: ('Method Not Allowed',
'Specified method is invalid for this server.'),
406: ('Not Acceptable', 'URI not available in preferred format.'),
407: ('Proxy Authentication Required', 'You must authenticate with '
'this proxy before proceeding.'),
408: ('Request Timeout', 'Request timed out; try again later.'),
409: ('Conflict', 'Request conflict.'),
410: ('Gone',
'URI no longer exists and has been permanently removed.'),
411: ('Length Required', 'Client must specify Content-Length.'),
412: ('Precondition Failed', 'Precondition in headers is false.'),
413: ('Request Entity Too Large', 'Entity is too large.'),
414: ('Request-URI Too Long', 'URI is too long.'),
415: ('Unsupported Media Type', 'Entity body in unsupported format.'),
416: ('Requested Range Not Satisfiable',
'Cannot satisfy request range.'),
417: ('Expectation Failed',
'Expect condition could not be satisfied.'),
500: ('Internal Server Error', 'Server got itself in trouble'),
501: ('Not Implemented',
'Server does not support this operation'),
502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),
503: ('Service Unavailable',
'The server cannot process the request due to a high load'),
504: ('Gateway Timeout',
'The gateway server did not receive a timely response'),
505: ('HTTP Version Not Supported', 'Cannot fulfill request.'),
}
def test(HandlerClass = BaseHTTPRequestHandler,
ServerClass = HTTPServer, protocol="HTTP/1.0"):
"""Test the HTTP request handler class.
This runs an HTTP server on port 8000 (or the first command line
argument).
"""
if sys.argv[1:]:
port = int(sys.argv[1])
else:
port = 8000
server_address = ('', port)
HandlerClass.protocol_version = protocol
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
if __name__ == '__main__':
test()
| apache-2.0 |
ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/django-1.3/django/shortcuts/__init__.py | 254 | 4642 | """
This module collects helper functions and classes that "span" multiple levels
of MVC. In other words, these functions/classes introduce controlled coupling
for convenience's sake.
"""
from django.template import loader, RequestContext
from django.http import HttpResponse, Http404
from django.http import HttpResponseRedirect, HttpResponsePermanentRedirect
from django.db.models.manager import Manager
from django.db.models.query import QuerySet
from django.core import urlresolvers
def render_to_response(*args, **kwargs):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
"""
httpresponse_kwargs = {'mimetype': kwargs.pop('mimetype', None)}
return HttpResponse(loader.render_to_string(*args, **kwargs), **httpresponse_kwargs)
def render(request, *args, **kwargs):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
Uses a RequestContext by default.
"""
httpresponse_kwargs = {
'content_type': kwargs.pop('content_type', None),
'status': kwargs.pop('status', None),
}
if 'context_instance' in kwargs:
context_instance = kwargs.pop('context_instance')
if kwargs.get('current_app', None):
raise ValueError('If you provide a context_instance you must '
'set its current_app before calling render()')
else:
current_app = kwargs.pop('current_app', None)
context_instance = RequestContext(request, current_app=current_app)
kwargs['context_instance'] = context_instance
return HttpResponse(loader.render_to_string(*args, **kwargs),
**httpresponse_kwargs)
def redirect(to, *args, **kwargs):
"""
Returns an HttpResponseRedirect to the apropriate URL for the arguments
passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urlresolvers.reverse()` will
be used to reverse-resolve the name.
* A URL, which will be used as-is for the redirect location.
By default issues a temporary redirect; pass permanent=True to issue a
permanent redirect
"""
if kwargs.pop('permanent', False):
redirect_class = HttpResponsePermanentRedirect
else:
redirect_class = HttpResponseRedirect
# If it's a model, use get_absolute_url()
if hasattr(to, 'get_absolute_url'):
return redirect_class(to.get_absolute_url())
# Next try a reverse URL resolution.
try:
return redirect_class(urlresolvers.reverse(to, args=args, kwargs=kwargs))
except urlresolvers.NoReverseMatch:
# If this is a callable, re-raise.
if callable(to):
raise
# If this doesn't "feel" like a URL, re-raise.
if '/' not in to and '.' not in to:
raise
# Finally, fall back and assume it's a URL
return redirect_class(to)
def _get_queryset(klass):
"""
Returns a QuerySet from a Model, Manager, or QuerySet. Created to make
get_object_or_404 and get_list_or_404 more DRY.
"""
if isinstance(klass, QuerySet):
return klass
elif isinstance(klass, Manager):
manager = klass
else:
manager = klass._default_manager
return manager.all()
def get_object_or_404(klass, *args, **kwargs):
"""
Uses get() to return an object, or raises a Http404 exception if the object
does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Note: Like with get(), an MultipleObjectsReturned will be raised if more than one
object is found.
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
def get_list_or_404(klass, *args, **kwargs):
"""
Uses filter() to return a list of objects, or raise a Http404 exception if
the list is empty.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the filter() query.
"""
queryset = _get_queryset(klass)
obj_list = list(queryset.filter(*args, **kwargs))
if not obj_list:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
return obj_list
| bsd-3-clause |
ufoai/ufoai | contrib/buildbot/ufoai.py | 3 | 4229 | # code to deliver build status through twisted.words (instant messaging
# protocols: irc, etc)
import os, re, shlex, glob
import ConfigParser
from zope.interface import Interface, implements
from twisted.internet import protocol, reactor
from twisted.words.protocols import irc
from twisted.python import log, failure
from twisted.application import internet
from buildbot import interfaces, util
from buildbot import version
from buildbot.sourcestamp import SourceStamp
from buildbot.status import base
from buildbot.status.builder import SUCCESS, WARNINGS, FAILURE, EXCEPTION, SKIPPED
from buildbot import scheduler
from buildbot.steps.shell import ShellCommand
from string import join, capitalize, lower
from CUnitTest import CUnitTest
def get_config(filename):
properties = ConfigParser.SafeConfigParser()
try:
properties.read(filename)
except:
# catch error like that else it can display passwords on stdout
raise Exception("Can't load %s file" % filename)
return properties
class Publish(ShellCommand):
name = "publish"
haltOnFailure = 1
flunkOnFailure = 1
description = [ "publishing" ]
descriptionDone = [ "publish" ]
def __init__(self, **kwargs):
self.src = kwargs["src"]
del kwargs["src"]
self.dest = kwargs["dest"]
del kwargs["dest"]
ShellCommand.__init__(self, **kwargs)
self.addFactoryArguments(src = self.src)
self.addFactoryArguments(dest = self.dest)
def start(self):
properties = self.build.getProperties()
if not properties.has_key("package"):
return SKIPPED
self.command = ""
self.command += "rm -f %s && " % self.dest
self.command += "mkdir -p $(dirname %s) && " % self.dest
self.command += "cp %s %s && " % (self.src, self.dest)
self.command += "chmod 644 %s" % self.dest
ShellCommand.start(self)
class Package(ShellCommand):
name = "package"
haltOnFailure = 1
flunkOnFailure = 1
description = [ "packaging" ]
descriptionDone = [ "package" ]
def __init__(self, **kwargs):
self.workdir = kwargs["workdir"]
del kwargs["workdir"]
self.output = kwargs["output"]
del kwargs["output"]
self.files = kwargs["files"]
del kwargs["files"]
ShellCommand.__init__(self, **kwargs)
self.addFactoryArguments(workdir = self.workdir)
self.addFactoryArguments(output = self.output)
self.addFactoryArguments(files = self.files)
def start(self):
properties = self.build.getProperties()
if not properties.has_key("package"):
return SKIPPED
self.command = ""
self.command += "cd %s && " % self.workdir
self.command += "mkdir -p $(dirname %s) && " % self.output
if self.output.endswith('.zip'):
self.command += "zip %s %s && " % (self.output, " ".join(self.files))
elif self.output.endswith('.tar.bz2'):
self.command += "tar cvjf %s %s && " % (self.output, " ".join(self.files))
elif self.output.endswith('.tar.gz'):
self.command += "tar cvzf %s %s && " % (self.output, " ".join(self.files))
else:
print 'Output extension from "%s" unknown' % self.output
return FAILURE
self.command += "chmod 644 %s" % self.output
ShellCommand.start(self)
class UFOAICUnitTest(CUnitTest):
def __init__(self, suite, **kwargs):
name = suite
result = name + "-Results.xml"
self.name = [ "test_" + suite ]
self.description = [ "testing " + suite ]
self.descriptionDone = [ "test " + suite ]
kwargs["name"] = "test " + suite
kwargs["description"] = [ "testing " + suite ]
kwargs["descriptionDone"] = [ "test " + suite ]
kwargs["command"] = ["./testall", "--automated", "--only-" + suite, "--output-prefix=" + name]
kwargs["cunit_result_file"] = result
CUnitTest.__init__(self, **kwargs)
self.addFactoryArguments(suite = suite)
class UFOAICUnitOtherTests(CUnitTest):
name = "test other tests"
description = [ "testing other tests" ]
descriptionDone = [ "test other tests" ]
def __init__(self, suites, **kwargs):
name = "other"
result = name + "-Results.xml"
command = ["./testall", "--automated", "--output-prefix=" + name]
for suite in suites:
command.append("--disable-" + suite)
kwargs["command"] = command
kwargs["description"] = "OtherTests"
kwargs["cunit_result_file"] = result
CUnitTest.__init__(self, **kwargs)
self.addFactoryArguments(suites = suites)
| gpl-2.0 |
l11x0m7/Wangdao | Wangdao.py | 1 | 3055 | # -*- coding: utf-8 -*-
import re,sys,os
import urllib, urllib2
from docx import Document
class PhotoSpider:
def __init__(self, page_from, page_to):
self.start = page_from
self.to = page_to
self.url = 'http://m.byr.cn/board/Friends'
self.head = 'http://m.byr.cn'
self.dirname = 'photos'
if not os.path.exists('photos'):
os.mkdir('photos')
def PagePhoto(self, page):
real_url = self.url + '?p=' + str(page)
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
headers = { 'User-Agent' : user_agent }
req = urllib2.Request(real_url, headers = headers)
try:
myResponse = urllib2.urlopen(req)
except urllib2.URLError, e:
print '[PagePhoto]:', e.code
html = myResponse.read()
match = re.findall(r'<li.*?<div><a href="(.*?)".*?>(.*?)</a>', html, re.S)
for each in match:
if '王道' in each[1].decode():
now_url = self.head + each[0]
self.SaveImg(now_url, each[1])
def RangePhoto(self):
start = self.start
to = self.to
count = 1
for i in range(start, to+1):
self.PagePhoto(i)
print "Status:%.2f%%" % (float(count*100)/(to-start+1))
count += 1
print "Finish!"
def SaveImg(self, url, tiezi):
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
headers = { 'User-Agent' : user_agent }
req = urllib2.Request(url, headers = headers)
try:
myResponse = urllib2.urlopen(req)
except urllib2.URLError, e:
print '[SaveImg]:', e
html = myResponse.read()
match = re.findall(r'a target="_blank" href="(/att.*?)">单击此查看原图', html, re.S)
for each in match:
name = each.strip().split('/')
name = tiezi.decode() + name[-2] + name[-1]
path1 = self.dirname + '/' + name + '.jpg'
path2 = self.dirname + '/' + name[-2] + name[-1] + '.jpg'
imgurl = self.head + each
if os.path.exists(path1) or os.path.exists(path2):
continue
try:
urllib.urlretrieve(imgurl, path1)
except IOError, e:
try:
urllib.urlretrieve(imgurl, path2)
except IOError, e:
print '[SaveImg]:', e
if __name__ == '__main__':
reload(sys)
sys.setdefaultencoding('utf-8')
print u"""
---------------------------------------
程序:BYR论坛照片爬取程序
版本:0.1
作者:lxm
日期:2016-01-02
语言:Python 2.7
功能:爬取北邮人论坛上的照片
使用:输入要爬取的网页范围(一页30个帖子)
---------------------------------------
""".decode().encode('gbk')
start = input("where to start? ")
to = input("where to end? ")
spider = PhotoSpider(start, to)
spider.RangePhoto()
| lgpl-3.0 |
h5py/h5py | h5py/tests/common.py | 1 | 6454 | # This file is part of h5py, a Python interface to the HDF5 library.
#
# http://www.h5py.org
#
# Copyright 2008-2013 Andrew Collette and contributors
#
# License: Standard 3-clause BSD; see "license.txt" for full license terms
# and contributor agreement.
import sys
import os
import shutil
import inspect
import tempfile
import subprocess
from contextlib import contextmanager
from functools import wraps
import numpy as np
import h5py
import unittest as ut
# Check if non-ascii filenames are supported
# Evidently this is the most reliable way to check
# See also h5py issue #263 and ipython #466
# To test for this, run the testsuite with LC_ALL=C
try:
testfile, fname = tempfile.mkstemp(chr(0x03b7))
except UnicodeError:
UNICODE_FILENAMES = False
else:
UNICODE_FILENAMES = True
os.close(testfile)
os.unlink(fname)
del fname
del testfile
class TestCase(ut.TestCase):
"""
Base class for unit tests.
"""
@classmethod
def setUpClass(cls):
cls.tempdir = tempfile.mkdtemp(prefix='h5py-test_')
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tempdir)
def mktemp(self, suffix='.hdf5', prefix='', dir=None):
if dir is None:
dir = self.tempdir
return tempfile.mktemp(suffix, prefix, dir=dir)
def mktemp_mpi(self, comm=None, suffix='.hdf5', prefix='', dir=None):
if comm is None:
from mpi4py import MPI
comm = MPI.COMM_WORLD
fname = None
if comm.Get_rank() == 0:
fname = self.mktemp(suffix, prefix, dir)
fname = comm.bcast(fname, 0)
return fname
def setUp(self):
self.f = h5py.File(self.mktemp(), 'w')
def tearDown(self):
try:
if self.f:
self.f.close()
except:
pass
def assertSameElements(self, a, b):
for x in a:
match = False
for y in b:
if x == y:
match = True
if not match:
raise AssertionError("Item '%s' appears in a but not b" % x)
for x in b:
match = False
for y in a:
if x == y:
match = True
if not match:
raise AssertionError("Item '%s' appears in b but not a" % x)
def assertArrayEqual(self, dset, arr, message=None, precision=None):
""" Make sure dset and arr have the same shape, dtype and contents, to
within the given precision.
Note that dset may be a NumPy array or an HDF5 dataset.
"""
if precision is None:
precision = 1e-5
if message is None:
message = ''
else:
message = ' (%s)' % message
if np.isscalar(dset) or np.isscalar(arr):
assert np.isscalar(dset) and np.isscalar(arr), \
'Scalar/array mismatch ("%r" vs "%r")%s' % (dset, arr, message)
assert dset - arr < precision, \
"Scalars differ by more than %.3f%s" % (precision, message)
return
assert dset.shape == arr.shape, \
"Shape mismatch (%s vs %s)%s" % (dset.shape, arr.shape, message)
assert dset.dtype == arr.dtype, \
"Dtype mismatch (%s vs %s)%s" % (dset.dtype, arr.dtype, message)
if arr.dtype.names is not None:
for n in arr.dtype.names:
message = '[FIELD %s] %s' % (n, message)
self.assertArrayEqual(dset[n], arr[n], message=message, precision=precision)
elif arr.dtype.kind in ('i', 'f'):
assert np.all(np.abs(dset[...] - arr[...]) < precision), \
"Arrays differ by more than %.3f%s" % (precision, message)
else:
assert np.all(dset[...] == arr[...]), \
"Arrays are not equal (dtype %s) %s" % (arr.dtype.str, message)
def assertNumpyBehavior(self, dset, arr, s):
""" Apply slicing arguments "s" to both dset and arr.
Succeeds if the results of the slicing are identical, or the
exception raised is of the same type for both.
"arr" must be a Numpy array; "dset" may be a NumPy array or dataset.
"""
exc = None
try:
arr_result = arr[s]
except Exception as e:
exc = type(e)
if exc is None:
self.assertArrayEqual(dset[s], arr_result)
else:
with self.assertRaises(exc):
dset[s]
NUMPY_RELEASE_VERSION = tuple([int(i) for i in np.__version__.split(".")[0:2]])
@contextmanager
def closed_tempfile(suffix='', text=None):
"""
Context manager which yields the path to a closed temporary file with the
suffix `suffix`. The file will be deleted on exiting the context. An
additional argument `text` can be provided to have the file contain `text`.
"""
with tempfile.NamedTemporaryFile(
'w+t', suffix=suffix, delete=False
) as test_file:
file_name = test_file.name
if text is not None:
test_file.write(text)
test_file.flush()
yield file_name
shutil.rmtree(file_name, ignore_errors=True)
def insubprocess(f):
"""Runs a test in its own subprocess"""
@wraps(f)
def wrapper(request, *args, **kwargs):
curr_test = inspect.getsourcefile(f) + "::" + request.node.name
# get block around test name
insub = "IN_SUBPROCESS_" + curr_test
for c in "/\\,:.":
insub = insub.replace(c, "_")
defined = os.environ.get(insub, None)
# fork process
if defined:
return f(request, *args, **kwargs)
else:
os.environ[insub] = '1'
env = os.environ.copy()
env[insub] = '1'
env.update(getattr(f, 'subproc_env', {}))
with closed_tempfile() as stdout:
with open(stdout, 'w+t') as fh:
rtn = subprocess.call([sys.executable, '-m', 'pytest', curr_test],
stdout=fh, stderr=fh, env=env)
with open(stdout, 'rt') as fh:
out = fh.read()
assert rtn == 0, "\n" + out
return wrapper
def subproc_env(d):
"""Set environment variables for the @insubprocess decorator"""
def decorator(f):
f.subproc_env = d
return f
return decorator
| bsd-3-clause |
wso2-dev/device-cloud-appliances | DigitalDisplay/modules/sequence_runner.py | 5 | 4297 | #!/usr/bin/env python
# encoding: utf-8
# Copyright (c) 2015, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
#
# WSO2 Inc. licenses this file to you under the Apache License,
# Version 2.0 (the "License"); you may not use this file except
# in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
WSO2 Display Agent server implementation for python.
Copyright (c) 2015, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
To use, simply 'import sequence_runner' and run sequence away!
"""
import time
import threading
import modules.kernel_utils as kernel_utils
import modules.resource_types as resource_types
class SequenceRunnerError(Exception):
"""SequenceRunner Exception class"""
pass
class SequenceRunner(object):
"""
SequenceRunner class for executing sequence of resources.
"""
current_phase_resource = None
def __init__(self, resources_conf, web_browser):
if resources_conf is None:
raise SequenceRunnerError("Initialization error: resources_conf cannot be null!")
if web_browser is None:
raise SequenceRunnerError("Initialization error: web_browser cannot be null!")
self.current_resources_conf = resources_conf
self.current_web_browser = web_browser
# get all subclasses implementing ResourceTypeBase
self.resource_types = resource_types.get_all_resource_types()
def __get_resource_type(self, type_):
"""
Returns implementing class for specific resourceTypes.
@return resourceType
@throws NotImplementedError when resourceType is Unknown
"""
try:
resource_type = [cls for cls in self.resource_types if cls.__dict__['name'] == type_][0]
except IndexError:
raise NotImplementedError("Unknown ResourceType found: `" + type_ + "`")
return resource_type
def __create_sequence(self, resources_conf):
"""
Returns a new sequence queue.
@return sequence_queue
@throws ConfigError
"""
sequence_queue = []
if type(resources_conf) is list:
# multiple resources loop through it
for resource in resources_conf:
args = [(key, resource[key]) for key in resource.keys() if key.startswith("@")]
rv = self.__get_resource_type(resource['@type'])
instance = rv(args)
sequence_queue.append(instance)
else:
# just one resource found
args = [(key, resources_conf[key]) for key in resources_conf.keys() if
key.startswith("@")]
rv = self.__get_resource_type(resources_conf['@type'])
instance = rv(args)
sequence_queue.append(instance)
return sequence_queue
def run_sequence(self, sequence=None, sequence_id=None):
if sequence is None:
# lets create a new sequence
sequence = self.__create_sequence(self.current_resources_conf)
sequence_id = 0
# get the resource
resource = sequence[sequence_id]
SequenceRunner.current_phase_resource = resource
# setting arguments
args = {'browser': self.current_web_browser,
'port': self.current_web_browser.port,
'browser_path': self.current_web_browser.path,
'delay': 10}
# invoke run on the resource
resource.run(args)
# sleep for showup time
show_up_time = resource.time
time.sleep(kernel_utils.get_seconds(show_up_time))
# invoke stop on the resource
resource.stop(args)
# increment sequence id
if sequence_id == (len(sequence) - 1):
sequence_id = 0
else:
sequence_id += 1
# letz repeat this!
threading.Timer(0, self.run_sequence, [sequence, sequence_id]).start() | apache-2.0 |
mKeRix/home-assistant | homeassistant/components/bom/weather.py | 7 | 3419 | """Support for Australian BOM (Bureau of Meteorology) weather service."""
import logging
import voluptuous as vol
from homeassistant.components.weather import PLATFORM_SCHEMA, WeatherEntity
from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME, TEMP_CELSIUS
from homeassistant.helpers import config_validation as cv
# Reuse data and API logic from the sensor implementation
from .sensor import CONF_STATION, BOMCurrentData, closest_station, validate_station
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_STATION): validate_station}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the BOM weather platform."""
station = config.get(CONF_STATION) or closest_station(
config.get(CONF_LATITUDE), config.get(CONF_LONGITUDE), hass.config.config_dir
)
if station is None:
_LOGGER.error("Could not get BOM weather station from lat/lon")
return False
bom_data = BOMCurrentData(station)
try:
bom_data.update()
except ValueError as err:
_LOGGER.error("Received error from BOM_Current: %s", err)
return False
add_entities([BOMWeather(bom_data, config.get(CONF_NAME))], True)
class BOMWeather(WeatherEntity):
"""Representation of a weather condition."""
def __init__(self, bom_data, stationname=None):
"""Initialise the platform with a data instance and station name."""
self.bom_data = bom_data
self.stationname = stationname or self.bom_data.latest_data.get("name")
def update(self):
"""Update current conditions."""
self.bom_data.update()
@property
def name(self):
"""Return the name of the sensor."""
return f"BOM {self.stationname or '(unknown station)'}"
@property
def condition(self):
"""Return the current condition."""
return self.bom_data.get_reading("weather")
# Now implement the WeatherEntity interface
@property
def temperature(self):
"""Return the platform temperature."""
return self.bom_data.get_reading("air_temp")
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def pressure(self):
"""Return the mean sea-level pressure."""
return self.bom_data.get_reading("press_msl")
@property
def humidity(self):
"""Return the relative humidity."""
return self.bom_data.get_reading("rel_hum")
@property
def wind_speed(self):
"""Return the wind speed."""
return self.bom_data.get_reading("wind_spd_kmh")
@property
def wind_bearing(self):
"""Return the wind bearing."""
directions = [
"N",
"NNE",
"NE",
"ENE",
"E",
"ESE",
"SE",
"SSE",
"S",
"SSW",
"SW",
"WSW",
"W",
"WNW",
"NW",
"NNW",
]
wind = {name: idx * 360 / 16 for idx, name in enumerate(directions)}
return wind.get(self.bom_data.get_reading("wind_dir"))
@property
def attribution(self):
"""Return the attribution."""
return "Data provided by the Australian Bureau of Meteorology"
| mit |
kevinbaijnath/MultiUserBlog | src/blog.py | 1 | 2222 | """
Contains the WebApphandlers for the MultiUserBlog
"""
import webapp2
from handlers.mainhandler import MainHandler
from handlers.blogfronthandler import BlogFrontHandler
from handlers.newblogposthandler import NewBlogPostHandler
from handlers.blogposthandler import BlogPostHandler
from handlers.editblogposthandler import EditBlogPostHandler
from handlers.deleteblogposthandler import DeleteBlogPostHandler as DelBlog
from handlers.starblogposthandler import StarBlogPostHandler
from handlers.unstarblogposthandler import UnstarBlogPostHandler as Unstar
from handlers.starredblogposthandler import StarredBlogPostHandler
from handlers.newcommenthandler import NewCommentHandler as NewComment
from handlers.deletecommenthandler import DeleteCommentHandler as DelCom
from handlers.editcommenthandler import EditCommentHandler as EditCom
from handlers.registrationhandler import RegistrationHandler
from handlers.welcomehandler import WelcomeHandler
from handlers.logouthandler import LogoutHandler
from handlers.loginhandler import LoginHandler
app = webapp2.WSGIApplication([("/", MainHandler),
(r"/blog/?", BlogFrontHandler),
("/blog/newpost/?", NewBlogPostHandler),
(r"/blog/(\d+)/?", BlogPostHandler),
(r"/blog/edit/(\d+)/?", EditBlogPostHandler),
(r"/blog/delete/(\d+)/?", DelBlog),
(r"/blog/star/(\d+)/?", StarBlogPostHandler),
(r"/blog/unstar/(\d+)/?", Unstar),
("/blog/starred", StarredBlogPostHandler),
(r"/blog/newcomment/(\d+)/?", NewComment),
(r"/blog/deletecomment/(\d+)/(\d+)/?", DelCom),
(r"/blog/editcomment/(\d+)/(\d+)/?", EditCom),
("/register/?", RegistrationHandler),
("/welcome/?", WelcomeHandler),
("/logout/?", LogoutHandler),
("/login/?", LoginHandler)
],
debug=True)
| mit |
mancoast/CPythonPyc_test | cpython/275_test_gl.py | 92 | 6753 | #! /usr/bin/env python
"""Very simple test script for the SGI gl library extension module
taken mostly from the documentation.
Roger E. Masse
"""
import unittest
from test.test_support import verbose, import_module
import time
gl = import_module('gl')
GL = import_module('GL')
glattrs = ['RGBcolor', 'RGBcursor', 'RGBmode', 'RGBrange', 'RGBwritemask',
'__doc__', '__name__', 'addtopup', 'altgetmatrix', 'arc', 'arcf',
'arcfi', 'arcfs', 'arci', 'arcs', 'attachcursor', 'backbuffer',
'backface', 'bbox2', 'bbox2i', 'bbox2s', 'bgnclosedline', 'bgnline',
'bgnpoint', 'bgnpolygon', 'bgnsurface', 'bgntmesh', 'bgntrim',
'blankscreen', 'blanktime', 'blendfunction', 'blink', 'c3f', 'c3i',
'c3s', 'c4f', 'c4i', 'c4s', 'callobj', 'charstr', 'chunksize', 'circ',
'circf', 'circfi', 'circfs', 'circi', 'circs', 'clear',
'clearhitcode', 'clkoff', 'clkon', 'closeobj', 'cmode', 'cmov',
'cmov2', 'cmov2i', 'cmov2s', 'cmovi', 'cmovs', 'color', 'colorf',
'compactify', 'concave', 'cpack', 'crv', 'crvn', 'curorigin',
'cursoff', 'curson', 'curstype', 'curvebasis', 'curveit',
'curveprecision', 'cyclemap', 'czclear', 'defbasis', 'defcursor',
'deflinestyle', 'delobj', 'deltag', 'depthcue', 'devport', 'dglclose',
'dglopen', 'dither', 'dopup', 'doublebuffer', 'draw', 'draw2',
'draw2i', 'draw2s', 'drawi', 'drawmode', 'draws', 'editobj',
'endclosedline', 'endfullscrn', 'endline', 'endpick', 'endpoint',
'endpolygon', 'endpupmode', 'endselect', 'endsurface', 'endtmesh',
'endtrim', 'finish', 'font', 'foreground', 'freepup', 'frontbuffer',
'fudge', 'fullscrn', 'gRGBcolor', 'gRGBmask', 'gammaramp', 'gbegin',
'gconfig', 'genobj', 'gentag', 'getbackface', 'getbuffer',
'getbutton', 'getcmmode', 'getcolor', 'getcpos', 'getcursor',
'getdcm', 'getdepth', 'getdescender', 'getdisplaymode', 'getdrawmode',
'getfont', 'getgdesc', 'getgpos', 'getheight', 'gethitcode',
'getlsbackup', 'getlsrepeat', 'getlstyle', 'getlwidth', 'getmap',
'getmatrix', 'getmcolor', 'getmmode', 'getmonitor',
'getnurbsproperty', 'getopenobj', 'getorigin', 'getothermonitor',
'getpattern', 'getplanes', 'getport', 'getresetls', 'getscrmask',
'getshade', 'getsize', 'getsm', 'gettp', 'getvaluator', 'getvideo',
'getviewport', 'getwritemask', 'getzbuffer', 'gewrite', 'gflush',
'ginit', 'glcompat', 'greset', 'gselect', 'gsync', 'gversion',
'iconsize', 'icontitle', 'imakebackground', 'initnames', 'ismex',
'isobj', 'isqueued', 'istag', 'keepaspect', 'lRGBrange', 'lampoff',
'lampon', 'linesmooth', 'linewidth', 'lmbind', 'lmcolor', 'lmdef',
'loadmatrix', 'loadname', 'logicop', 'lookat', 'lrectread',
'lrectwrite', 'lsbackup', 'lsetdepth', 'lshaderange', 'lsrepeat',
'makeobj', 'maketag', 'mapcolor', 'mapw', 'mapw2', 'maxsize',
'minsize', 'mmode', 'move', 'move2', 'move2i', 'move2s', 'movei',
'moves', 'multimap', 'multmatrix', 'n3f', 'newpup', 'newtag',
'noborder', 'noise', 'noport', 'normal', 'nurbscurve', 'nurbssurface',
'nvarray', 'objdelete', 'objinsert', 'objreplace', 'onemap', 'ortho',
'ortho2', 'overlay', 'packrect', 'pagecolor', 'pagewritemask',
'passthrough', 'patch', 'patchbasis', 'patchcurves', 'patchprecision',
'pclos', 'pdr', 'pdr2', 'pdr2i', 'pdr2s', 'pdri', 'pdrs',
'perspective', 'pick', 'picksize', 'pixmode', 'pmv', 'pmv2', 'pmv2i',
'pmv2s', 'pmvi', 'pmvs', 'pnt', 'pnt2', 'pnt2i', 'pnt2s', 'pnti',
'pnts', 'pntsmooth', 'polarview', 'polf', 'polf2', 'polf2i', 'polf2s',
'polfi', 'polfs', 'poly', 'poly2', 'poly2i', 'poly2s', 'polyi',
'polys', 'popattributes', 'popmatrix', 'popname', 'popviewport',
'prefposition', 'prefsize', 'pupmode', 'pushattributes', 'pushmatrix',
'pushname', 'pushviewport', 'pwlcurve', 'qdevice', 'qenter', 'qgetfd',
'qread', 'qreset', 'qtest', 'rcrv', 'rcrvn', 'rdr', 'rdr2', 'rdr2i',
'rdr2s', 'rdri', 'rdrs', 'readdisplay', 'readsource', 'rect',
'rectcopy', 'rectf', 'rectfi', 'rectfs', 'recti', 'rects', 'rectzoom',
'resetls', 'reshapeviewport', 'ringbell', 'rmv', 'rmv2', 'rmv2i',
'rmv2s', 'rmvi', 'rmvs', 'rot', 'rotate', 'rpatch', 'rpdr', 'rpdr2',
'rpdr2i', 'rpdr2s', 'rpdri', 'rpdrs', 'rpmv', 'rpmv2', 'rpmv2i',
'rpmv2s', 'rpmvi', 'rpmvs', 'sbox', 'sboxf', 'sboxfi', 'sboxfs',
'sboxi', 'sboxs', 'scale', 'screenspace', 'scrmask', 'setbell',
'setcursor', 'setdepth', 'setlinestyle', 'setmap', 'setmonitor',
'setnurbsproperty', 'setpattern', 'setpup', 'setshade', 'setvaluator',
'setvideo', 'shademodel', 'shaderange', 'singlebuffer', 'smoothline',
'spclos', 'splf', 'splf2', 'splf2i', 'splf2s', 'splfi', 'splfs',
'stepunit', 'strwidth', 'subpixel', 'swapbuffers', 'swapinterval',
'swaptmesh', 'swinopen', 'textcolor', 'textinit', 'textport',
'textwritemask', 'tie', 'tpoff', 'tpon', 'translate', 'underlay',
'unpackrect', 'unqdevice', 'v2d', 'v2f', 'v2i', 'v2s', 'v3d', 'v3f',
'v3i', 'v3s', 'v4d', 'v4f', 'v4i', 'v4s', 'varray', 'videocmd',
'viewport', 'vnarray', 'winattach', 'winclose', 'winconstraints',
'windepth', 'window', 'winget', 'winmove', 'winopen', 'winpop',
'winposition', 'winpush', 'winset', 'wintitle', 'wmpack', 'writemask',
'writepixels', 'xfpt', 'xfpt2', 'xfpt2i', 'xfpt2s', 'xfpt4', 'xfpt4i',
'xfpt4s', 'xfpti', 'xfpts', 'zbuffer', 'zclear', 'zdraw', 'zfunction',
'zsource', 'zwritemask']
def test_main():
# insure that we at least have an X display before continuing.
import os
try:
display = os.environ['DISPLAY']
except:
raise unittest.SkipTest, "No $DISPLAY -- skipping gl test"
# touch all the attributes of gl without doing anything
if verbose:
print 'Touching gl module attributes...'
for attr in glattrs:
if verbose:
print 'touching: ', attr
getattr(gl, attr)
# create a small 'Crisscross' window
if verbose:
print 'Creating a small "CrissCross" window...'
print 'foreground'
gl.foreground()
if verbose:
print 'prefposition'
gl.prefposition(500, 900, 500, 900)
if verbose:
print 'winopen "CrissCross"'
w = gl.winopen('CrissCross')
if verbose:
print 'clear'
gl.clear()
if verbose:
print 'ortho2'
gl.ortho2(0.0, 400.0, 0.0, 400.0)
if verbose:
print 'color WHITE'
gl.color(GL.WHITE)
if verbose:
print 'color RED'
gl.color(GL.RED)
if verbose:
print 'bgnline'
gl.bgnline()
if verbose:
print 'v2f'
gl.v2f(0.0, 0.0)
gl.v2f(400.0, 400.0)
if verbose:
print 'endline'
gl.endline()
if verbose:
print 'bgnline'
gl.bgnline()
if verbose:
print 'v2i'
gl.v2i(400, 0)
gl.v2i(0, 400)
if verbose:
print 'endline'
gl.endline()
if verbose:
print 'Displaying window for 2 seconds...'
time.sleep(2)
if verbose:
print 'winclose'
gl.winclose(w)
if __name__ == '__main__':
test_main()
| gpl-3.0 |
reyoung/Paddle | python/paddle/trainer_config_helpers/evaluators.py | 7 | 25289 | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer.config_parser import *
from default_decorators import *
__all__ = [
"evaluator_base",
"classification_error_evaluator",
"auc_evaluator",
"pnpair_evaluator",
"precision_recall_evaluator",
"ctc_error_evaluator",
"chunk_evaluator",
"sum_evaluator",
"column_sum_evaluator",
"value_printer_evaluator",
"gradient_printer_evaluator",
"maxid_printer_evaluator",
"maxframe_printer_evaluator",
"seqtext_printer_evaluator",
"classification_error_printer_evaluator",
"detection_map_evaluator",
]
class EvaluatorAttribute(object):
FOR_CLASSIFICATION = 1
FOR_REGRESSION = 1 << 1
FOR_RANK = 1 << 2
FOR_PRINT = 1 << 3
FOR_UTILS = 1 << 4
FOR_DETECTION = 1 << 5
KEYS = [
"for_classification", "for_regression", "for_rank", "for_print",
"for_utils", "for_detection"
]
@staticmethod
def to_key(idx):
tmp = 1
for i in xrange(0, len(EvaluatorAttribute.KEYS)):
if idx == tmp:
return EvaluatorAttribute.KEYS[i]
else:
tmp = (tmp << 1)
def evaluator(*attrs):
def impl(method):
for attr in attrs:
setattr(method, EvaluatorAttribute.to_key(attr), True)
method.is_evaluator = True
return method
return impl
def evaluator_base(input,
type,
label=None,
weight=None,
name=None,
chunk_scheme=None,
num_chunk_types=None,
classification_threshold=None,
positive_label=None,
dict_file=None,
result_file=None,
num_results=None,
delimited=None,
top_k=None,
excluded_chunk_types=None,
overlap_threshold=None,
background_id=None,
evaluate_difficult=None,
ap_type=None):
"""
Evaluator will evaluate the network status while training/testing.
User can use evaluator by classify/regression job. For example.
.. code-block:: python
classify(prediction, output, evaluator=classification_error_evaluator)
And user could define evaluator separately as follow.
.. code-block:: python
classification_error_evaluator("ErrorRate", prediction, label)
The evaluator often contains a name parameter. It will also be printed when
evaluating network. The printed information may look like the following.
.. code-block:: text
Batch=200 samples=20000 AvgCost=0.679655 CurrentCost=0.662179 Eval:
classification_error_evaluator=0.4486
CurrentEval: ErrorRate=0.3964
:param input: Input layers, a object of LayerOutput or a list of
LayerOutput.
:type input: list|LayerOutput
:param label: An input layer containing the ground truth label.
:type label: LayerOutput|None
:param weight: An input layer which is a weight for each sample.
Each evaluator may calculate differently to use this weight.
:type weight: LayerOutput.
:param top_k: number k in top-k error rate
:type top_k: int
:param overlap_threshold: In detection tasks to filter detection results
:type overlap_threshold: float
:param background_id: Identifier of background class
:type background_id: int
:param evaluate_difficult: Whether to evaluate difficult objects
:type evaluate_difficult: bool
:param ap_type: How to calculate average persicion
:type ap_type: str
"""
# inputs type assertions.
assert classification_threshold is None or isinstance(
classification_threshold, float)
assert positive_label is None or isinstance(positive_label, int)
assert num_results is None or isinstance(num_results, int)
assert top_k is None or isinstance(top_k, int)
if not isinstance(input, list):
input = [input]
if label:
input.append(label)
if weight:
input.append(weight)
Evaluator(
name=name,
type=type,
inputs=[i.name for i in input],
chunk_scheme=chunk_scheme,
num_chunk_types=num_chunk_types,
classification_threshold=classification_threshold,
positive_label=positive_label,
dict_file=dict_file,
result_file=result_file,
delimited=delimited,
num_results=num_results,
top_k=top_k,
excluded_chunk_types=excluded_chunk_types,
overlap_threshold=overlap_threshold,
background_id=background_id,
evaluate_difficult=evaluate_difficult,
ap_type=ap_type)
@evaluator(EvaluatorAttribute.FOR_DETECTION)
@wrap_name_default()
def detection_map_evaluator(input,
label,
overlap_threshold=0.5,
background_id=0,
evaluate_difficult=False,
ap_type="11point",
name=None):
"""
Detection mAP Evaluator. It will print mean Average Precision (mAP) for detection.
The detection mAP Evaluator based on the output of detection_output layer counts
the true positive and the false positive bbox and integral them to get the
mAP.
The simple usage is:
.. code-block:: python
eval = detection_map_evaluator(input=det_output,label=lbl)
:param input: Input layer.
:type input: LayerOutput
:param label: Label layer.
:type label: LayerOutput
:param overlap_threshold: The bbox overlap threshold of a true positive.
:type overlap_threshold: float
:param background_id: The background class index.
:type background_id: int
:param evaluate_difficult: Whether evaluate a difficult ground truth.
:type evaluate_difficult: bool
"""
if not isinstance(input, list):
input = [input]
if label:
input.append(label)
evaluator_base(
name=name,
type="detection_map",
input=input,
label=label,
overlap_threshold=overlap_threshold,
background_id=background_id,
evaluate_difficult=evaluate_difficult,
ap_type=ap_type)
@evaluator(EvaluatorAttribute.FOR_CLASSIFICATION)
@wrap_name_default()
def classification_error_evaluator(input,
label,
name=None,
weight=None,
top_k=None,
threshold=None):
"""
Classification Error Evaluator. It will print error rate for classification.
The classification error is:
.. math::
classification\\_error = \\frac{NumOfWrongPredicts}{NumOfAllSamples}
The simple usage is:
.. code-block:: python
eval = classification_error_evaluator(input=prob,label=lbl)
:param name: Evaluator name.
:type name: basestring
:param input: Input Layer name. The output prediction of network.
:type input: LayerOutput
:param label: Label layer name.
:type label: basestring
:param weight: Weight Layer name. It should be a matrix with size
[sample_num, 1]. And will just multiply to NumOfWrongPredicts
and NumOfAllSamples. So, the elements of weight are all one,
then means not set weight. The larger weight it is, the more
important this sample is.
:type weight: LayerOutput
:param top_k: number k in top-k error rate
:type top_k: int
:param threshold: The classification threshold.
:type threshold: float
:return: None.
"""
evaluator_base(
name=name,
type="classification_error",
input=input,
label=label,
weight=weight,
top_k=top_k,
classification_threshold=threshold, )
@evaluator(EvaluatorAttribute.FOR_CLASSIFICATION)
@wrap_name_default()
def auc_evaluator(
input,
label,
name=None,
weight=None, ):
"""
Auc Evaluator which adapts to binary classification.
The simple usage:
.. code-block:: python
eval = auc_evaluator(input, label)
:param name: Evaluator name.
:type name: None|basestring
:param input: Input Layer name. The output prediction of network.
:type input: LayerOutput
:param label: Label layer name.
:type label: None|basestring
:param weight: Weight Layer name. It should be a matrix with size
[sample_num, 1].
:type weight: LayerOutput
"""
evaluator_base(
name=name,
type="last-column-auc",
input=input,
label=label,
weight=weight)
@evaluator(EvaluatorAttribute.FOR_RANK)
@wrap_name_default()
def pnpair_evaluator(
input,
label,
query_id,
weight=None,
name=None, ):
"""
Positive-negative pair rate Evaluator which adapts to rank task like
learning to rank. This evaluator must contain at least three layers.
The simple usage:
.. code-block:: python
eval = pnpair_evaluator(input, label, query_id)
:param input: Input Layer name. The output prediction of network.
:type input: LayerOutput
:param label: Label layer name.
:type label: LayerOutput
:param query_id: Query_id layer name. Query_id indicates that which query
each sample belongs to. Its shape should be
the same as output of Label layer.
:type query_id: LayerOutput
:param weight: Weight Layer name. It should be a matrix with size
[sample_num, 1] which indicates the weight of each sample.
The default weight of sample is 1 if the weight layer is None.
And the pair weight is the mean of the two samples' weight.
:type weight: LayerOutput
:param name: Evaluator name.
:type name: None|basestring
"""
if not isinstance(input, list):
input = [input]
if label:
input.append(label)
if query_id:
input.append(query_id)
evaluator_base(
input=input,
type="pnpair",
weight=weight,
name=name, )
@evaluator(EvaluatorAttribute.FOR_CLASSIFICATION)
@wrap_name_default()
def precision_recall_evaluator(
input,
label,
positive_label=None,
weight=None,
name=None, ):
"""
An Evaluator to calculate precision and recall, F1-score.
It is adapt to the task with multiple labels.
- If positive_label=-1, it will print the average precision, recall,
F1-score of all labels.
- If use specify positive_label, it will print the precision, recall,
F1-score of this label.
The simple usage:
.. code-block:: python
eval = precision_recall_evaluator(input, label)
:param name: Evaluator name.
:type name: None|basestring
:param input: Input Layer name. The output prediction of network.
:type input: LayerOutput
:param label: Label layer name.
:type label: LayerOutput
:param positive_label: The input label layer.
:type positive_label: LayerOutput.
:param weight: Weight Layer name. It should be a matrix with size
[sample_num, 1]. (TODO, explaination)
:type weight: LayerOutput
"""
evaluator_base(
name=name,
type="precision_recall",
input=input,
label=label,
positive_label=positive_label,
weight=weight)
@evaluator(EvaluatorAttribute.FOR_CLASSIFICATION)
@wrap_name_default()
def ctc_error_evaluator(
input,
label,
name=None, ):
"""
This evaluator is to calculate sequence-to-sequence edit distance.
The simple usage is :
.. code-block:: python
eval = ctc_error_evaluator(input=input, label=lbl)
:param name: Evaluator name.
:type name: None|basestring
:param input: Input Layer. Should be the same as the input for ctc_layer.
:type input: LayerOutput
:param label: input label, which is a data_layer. Should be the same as the
label for ctc_layer
:type label: LayerOutput
"""
evaluator_base(
name=name, type="ctc_edit_distance", input=input, label=label)
@evaluator(EvaluatorAttribute.FOR_CLASSIFICATION)
@wrap_name_default()
def chunk_evaluator(
input,
label,
chunk_scheme,
num_chunk_types,
name=None,
excluded_chunk_types=None, ):
"""
Chunk evaluator is used to evaluate segment labelling accuracy for a
sequence. It calculates precision, recall and F1 scores for the chunk detection.
To use chunk evaluator, several concepts need to be clarified firstly.
* **Chunk type** is the type of the whole chunk and a chunk consists of one or several words. (For example in NER, ORG for organization name, PER for person name etc.)
* **Tag type** indicates the position of a word in a chunk. (B for begin, I for inside, E for end, S for single)
We can name a label by combining tag type and chunk type. (ie. B-ORG for begining of an organization name)
The construction of label dictionary should obey the following rules:
- Use one of the listed labelling schemes. These schemes differ in ways indicating chunk boundry.
.. code-block:: text
Scheme Description
plain Use the same label for the whole chunk.
IOB Two labels for chunk type X, B-X for chunk begining and I-X for chunk inside.
IOE Two labels for chunk type X, E-X for chunk ending and I-X for chunk inside.
IOBES Four labels for chunk type X, B-X for chunk begining, I-X for chunk inside, E-X for chunk end and S-X for single word chunk.
To make it clear, let's illustrate by an NER example.
Assuming that there are three named entity types including ORG, PER and LOC which are called 'chunk type' here,
if 'IOB' scheme were used, the label set will be extended to a set including B-ORG, I-ORG, B-PER, I-PER, B-LOC, I-LOC and O,
in which B-ORG for begining of ORG and I-ORG for inside of ORG.
Prefixes which are called 'tag type' here are added to chunk types and there are two tag types including B and I.
Of course, the training data should be labeled accordingly.
- Mapping is done correctly by the listed equations and assigning protocol.
The following table are equations to extract tag type and chunk type from a label.
.. code-block:: text
tagType = label % numTagType
chunkType = label / numTagType
otherChunkType = numChunkTypes
The following table shows the mapping rule between tagType and tag type in each scheme.
.. code-block:: text
Scheme Begin Inside End Single
plain 0 - - -
IOB 0 1 - -
IOE - 0 1 -
IOBES 0 1 2 3
Continue the NER example, and the label dict should look like this to satify above equations:
.. code-block:: text
B-ORG 0
I-ORG 1
B-PER 2
I-PER 3
B-LOC 4
I-LOC 5
O 6
In this example, chunkType has three values: 0 for ORG, 1 for PER, 2 for LOC, because the scheme is
"IOB" so tagType has two values: 0 for B and 1 for I.
Here we will use I-LOC to explain the above mapping rules in detail.
For I-LOC, the label id is 5, so we can get tagType=1 and chunkType=2, which means I-LOC is a part of NER chunk LOC
and the tag is I.
The simple usage is:
.. code-block:: python
eval = chunk_evaluator(input, label, chunk_scheme, num_chunk_types)
:param input: The input layers.
:type input: LayerOutput
:param label: An input layer containing the ground truth label.
:type label: LayerOutput
:param chunk_scheme: The labelling schemes support 4 types. It is one of
"IOB", "IOE", "IOBES", "plain". It is required.
:type chunk_scheme: basestring
:param num_chunk_types: number of chunk types other than "other"
:param name: The Evaluator name, it is optional.
:type name: basename|None
:param excluded_chunk_types: chunks of these types are not considered
:type excluded_chunk_types: list of integer|None
"""
evaluator_base(
name=name,
type="chunk",
input=input,
label=label,
chunk_scheme=chunk_scheme,
num_chunk_types=num_chunk_types,
excluded_chunk_types=excluded_chunk_types, )
@evaluator(EvaluatorAttribute.FOR_UTILS)
@wrap_name_default()
def sum_evaluator(
input,
name=None,
weight=None, ):
"""
An Evaluator to sum the result of input.
The simple usage:
.. code-block:: python
eval = sum_evaluator(input)
:param name: Evaluator name.
:type name: None|basestring
:param input: Input Layer name.
:type input: LayerOutput
:param weight: Weight Layer name. It should be a matrix with size
[sample_num, 1]. (TODO, explaination)
:type weight: LayerOutput
"""
evaluator_base(name=name, type="sum", input=input, weight=weight)
@evaluator(EvaluatorAttribute.FOR_UTILS)
@wrap_name_default()
def column_sum_evaluator(
input,
name=None,
weight=None, ):
"""
This Evaluator is used to sum the last column of input.
The simple usage is:
.. code-block:: python
eval = column_sum_evaluator(input, label)
:param name: Evaluator name.
:type name: None|basestring
:param input: Input Layer name.
:type input: LayerOutput
"""
evaluator_base(
name=name, type="last-column-sum", input=input, weight=weight)
"""
The following are printer Evaluators which are usually used to
print the result, like value or gradient of input layers, the
results generated in machine translation, the classification error etc.
"""
@evaluator(EvaluatorAttribute.FOR_PRINT)
@wrap_name_default()
def value_printer_evaluator(
input,
name=None, ):
"""
This Evaluator is used to print the values of input layers. It contains
one or more input layers.
The simple usage is:
.. code-block:: python
eval = value_printer_evaluator(input)
:param input: One or more input layers.
:type input: LayerOutput|list
:param name: Evaluator name.
:type name: None|basestring
"""
evaluator_base(name=name, type="value_printer", input=input)
@evaluator(EvaluatorAttribute.FOR_PRINT)
@wrap_name_default()
def gradient_printer_evaluator(
input,
name=None, ):
"""
This Evaluator is used to print the gradient of input layers. It contains
one or more input layers.
The simple usage is:
.. code-block:: python
eval = gradient_printer_evaluator(input)
:param input: One or more input layers.
:type input: LayerOutput|list
:param name: Evaluator name.
:type name: None|basestring
"""
evaluator_base(name=name, type="gradient_printer", input=input)
@evaluator(EvaluatorAttribute.FOR_PRINT)
@wrap_name_default()
def maxid_printer_evaluator(
input,
num_results=None,
name=None, ):
"""
This Evaluator is used to print maximum top k values and their indexes
of each row of input layers. It contains one or more input layers.
k is specified by num_results.
The simple usage is:
.. code-block:: python
eval = maxid_printer_evaluator(input)
:param input: Input Layer name.
:type input: LayerOutput|list
:param num_results: This number is used to specify the top k numbers.
It is 1 by default.
:type num_results: int.
:param name: Evaluator name.
:type name: None|basestring
"""
evaluator_base(
name=name, type="max_id_printer", input=input, num_results=num_results)
@evaluator(EvaluatorAttribute.FOR_PRINT)
@wrap_name_default()
def maxframe_printer_evaluator(
input,
num_results=None,
name=None, ):
"""
This Evaluator is used to print the top k frames of each input layers.
The input layers should contain sequences info or sequences type.
k is specified by num_results.
It contains one or more input layers.
Note:
The width of each frame is 1.
The simple usage is:
.. code-block:: python
eval = maxframe_printer_evaluator(input)
:param input: Input Layer name.
:type input: LayerOutput|list
:param name: Evaluator name.
:type name: None|basestring
"""
evaluator_base(
name=name,
type="max_frame_printer",
input=input,
num_results=num_results)
@evaluator(EvaluatorAttribute.FOR_PRINT)
@wrap_name_default()
def seqtext_printer_evaluator(
input,
result_file,
id_input=None,
dict_file=None,
delimited=None,
name=None, ):
"""
Sequence text printer will print text according to index matrix and a
dictionary. There can be multiple input to this layer:
1. If there is no id_input, the input must be a matrix containing
the sequence of indices;
2. If there is id_input, it should be ids, and interpreted as sample ids.
The output format will be:
1. sequence without sub-sequence, and there is probability.
.. code-block:: python
id \t prob space_seperated_tokens_from_dictionary_according_to_seq
2. sequence without sub-sequence, and there is not probability.
.. code-block:: python
id \t space_seperated_tokens_from_dictionary_according_to_seq
3. sequence with sub-sequence, and there is not probability.
.. code-block:: python
id \t space_seperated_tokens_from_dictionary_according_to_sub_seq
\t \t space_seperated_tokens_from_dictionary_according_to_sub_seq
...
Typically SequenceTextPrinter layer takes output of maxid or RecurrentGroup
with maxid (when generating) as an input.
The simple usage is:
.. code-block:: python
eval = seqtext_printer_evaluator(input=maxid_layer,
id_input=sample_id,
dict_file=dict_file,
result_file=result_file)
:param input: Input Layer name.
:type input: LayerOutput|list
:param result_file: Path of the file to store the generated results.
:type result_file: basestring
:param id_input: Index of the input sequence, and the specified index will
be prited in the gereated results. This an optional
parameter.
:type id_input: LayerOutput
:param dict_file: Path of dictionary. This is an optional parameter.
Every line is a word in the dictionary with
(line number - 1) as the word index.
If this parameter is set to None, or to an empty string,
only word index are printed in the generated results.
:type dict_file: basestring
:param delimited: Whether to use space to separate output tokens.
Default is True. No space is added if set to False.
:type delimited: bool
:param name: Evaluator name.
:type name: None|basestring
:return: The seq_text_printer that prints the generated sequence to a file.
:rtype: evaluator
"""
assert isinstance(result_file, basestring)
if id_input is None:
inputs = [input]
else:
inputs = [id_input, input]
input.parents.append(id_input)
evaluator_base(
name=name,
type="seq_text_printer",
input=inputs,
dict_file=dict_file,
result_file=result_file,
delimited=delimited)
@evaluator(EvaluatorAttribute.FOR_PRINT)
@wrap_name_default()
def classification_error_printer_evaluator(
input,
label,
threshold=0.5,
name=None, ):
"""
This Evaluator is used to print the classification error of each sample.
The simple usage is:
.. code-block:: python
eval = classification_error_printer_evaluator(input)
:param input: Input layer.
:type input: LayerOutput
:param label: Input label layer.
:type label: LayerOutput
:param name: Evaluator name.
:type name: None|basestring
"""
evaluator_base(
name=name,
type="classification_error_printer",
input=input,
label=label,
classification_threshold=threshold)
| apache-2.0 |
schlueter/ansible | contrib/inventory/fleet.py | 46 | 3064 | #!/usr/bin/env python
"""
fleetctl base external inventory script. Automatically finds the IPs of the booted coreos instances and
returns it under the host group 'coreos'
"""
# Copyright (C) 2014 Andrew Rothstein <andrew.rothstein at gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Thanks to the vagrant.py inventory script for giving me the basic structure
# of this.
#
import sys
import subprocess
import re
import string
from optparse import OptionParser
try:
import json
except:
import simplejson as json
# Options
# ------------------------------
parser = OptionParser(usage="%prog [options] --list | --host <machine>")
parser.add_option('--list', default=False, dest="list", action="store_true",
help="Produce a JSON consumable grouping of servers in your fleet")
parser.add_option('--host', default=None, dest="host",
help="Generate additional host specific details for given host for Ansible")
(options, args) = parser.parse_args()
#
# helper functions
#
def get_ssh_config():
configs = []
for box in list_running_boxes():
config = get_a_ssh_config(box)
configs.append(config)
return configs
# list all the running instances in the fleet
def list_running_boxes():
boxes = []
for line in subprocess.check_output(["fleetctl", "list-machines"]).split('\n'):
matcher = re.search(r"[^\s]+[\s]+([^\s]+).+", line)
if matcher and matcher.group(1) != "IP":
boxes.append(matcher.group(1))
return boxes
def get_a_ssh_config(box_name):
config = {}
config['Host'] = box_name
config['ansible_ssh_user'] = 'core'
config['ansible_python_interpreter'] = '/opt/bin/python'
return config
# List out servers that vagrant has running
# ------------------------------
if options.list:
ssh_config = get_ssh_config()
hosts = {'coreos': []}
for data in ssh_config:
hosts['coreos'].append(data['Host'])
print(json.dumps(hosts))
sys.exit(1)
# Get out the host details
# ------------------------------
elif options.host:
result = {}
ssh_config = get_ssh_config()
details = filter(lambda x: (x['Host'] == options.host), ssh_config)
if len(details) > 0:
# pass through the port, in case it's non standard.
result = details[0]
print(json.dumps(result))
sys.exit(1)
# Print out help
# ------------------------------
else:
parser.print_help()
sys.exit(1)
| gpl-3.0 |
AndreasMadsen/tensorflow | tensorflow/contrib/session_bundle/gc_test.py | 24 | 4527 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for session_bundle.gc."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.session_bundle import gc
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
def tearDownModule():
gfile.DeleteRecursively(tf.test.get_temp_dir())
class GcTest(test_util.TensorFlowTestCase):
def testLargestExportVersions(self):
paths = [gc.Path("/foo", 8), gc.Path("/foo", 9), gc.Path("/foo", 10)]
newest = gc.largest_export_versions(2)
n = newest(paths)
self.assertEquals(n, [gc.Path("/foo", 9), gc.Path("/foo", 10)])
def testLargestExportVersionsDoesNotDeleteZeroFolder(self):
paths = [gc.Path("/foo", 0), gc.Path("/foo", 3)]
newest = gc.largest_export_versions(2)
n = newest(paths)
self.assertEquals(n, [gc.Path("/foo", 0), gc.Path("/foo", 3)])
def testModExportVersion(self):
paths = [gc.Path("/foo", 4), gc.Path("/foo", 5), gc.Path("/foo", 6),
gc.Path("/foo", 9)]
mod = gc.mod_export_version(2)
self.assertEquals(mod(paths), [gc.Path("/foo", 4), gc.Path("/foo", 6)])
mod = gc.mod_export_version(3)
self.assertEquals(mod(paths), [gc.Path("/foo", 6), gc.Path("/foo", 9)])
def testOneOfEveryNExportVersions(self):
paths = [gc.Path("/foo", 0), gc.Path("/foo", 1), gc.Path("/foo", 3),
gc.Path("/foo", 5), gc.Path("/foo", 6), gc.Path("/foo", 7),
gc.Path("/foo", 8), gc.Path("/foo", 33)]
one_of = gc.one_of_every_n_export_versions(3)
self.assertEquals(one_of(paths),
[gc.Path("/foo", 3), gc.Path("/foo", 6),
gc.Path("/foo", 8), gc.Path("/foo", 33)])
def testOneOfEveryNExportVersionsZero(self):
# Zero is a special case since it gets rolled into the first interval.
# Test that here.
paths = [gc.Path("/foo", 0), gc.Path("/foo", 4), gc.Path("/foo", 5)]
one_of = gc.one_of_every_n_export_versions(3)
self.assertEquals(one_of(paths),
[gc.Path("/foo", 0), gc.Path("/foo", 5)])
def testUnion(self):
paths = []
for i in xrange(10):
paths.append(gc.Path("/foo", i))
f = gc.union(gc.largest_export_versions(3), gc.mod_export_version(3))
self.assertEquals(
f(paths), [gc.Path("/foo", 0), gc.Path("/foo", 3),
gc.Path("/foo", 6), gc.Path("/foo", 7),
gc.Path("/foo", 8), gc.Path("/foo", 9)])
def testNegation(self):
paths = [gc.Path("/foo", 4), gc.Path("/foo", 5), gc.Path("/foo", 6),
gc.Path("/foo", 9)]
mod = gc.negation(gc.mod_export_version(2))
self.assertEquals(
mod(paths), [gc.Path("/foo", 5), gc.Path("/foo", 9)])
mod = gc.negation(gc.mod_export_version(3))
self.assertEquals(
mod(paths), [gc.Path("/foo", 4), gc.Path("/foo", 5)])
def testPathsWithParse(self):
base_dir = os.path.join(tf.test.get_temp_dir(), "paths_parse")
self.assertFalse(gfile.Exists(base_dir))
for p in xrange(3):
gfile.MakeDirs(os.path.join(base_dir, "%d" % p))
# add a base_directory to ignore
gfile.MakeDirs(os.path.join(base_dir, "ignore"))
# create a simple parser that pulls the export_version from the directory.
def parser(path):
match = re.match("^" + base_dir + "/(\\d+)$", path.path)
if not match:
return None
return path._replace(export_version=int(match.group(1)))
self.assertEquals(
gc.get_paths(base_dir, parser=parser),
[gc.Path(os.path.join(base_dir, "0"), 0),
gc.Path(os.path.join(base_dir, "1"), 1),
gc.Path(os.path.join(base_dir, "2"), 2)])
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
auferack08/edx-platform | common/djangoapps/student/migrations/0027_add_active_flag_and_mode_to_courseware_enrollment.py | 69 | 15223 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CourseEnrollment.is_active'
db.add_column('student_courseenrollment', 'is_active',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
# Adding field 'CourseEnrollment.mode'
db.add_column('student_courseenrollment', 'mode',
self.gf('django.db.models.fields.CharField')(default='honor', max_length=100),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CourseEnrollment.is_active'
db.delete_column('student_courseenrollment', 'is_active')
# Deleting field 'CourseEnrollment.mode'
db.delete_column('student_courseenrollment', 'mode')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollmentallowed': {
'Meta': {'unique_together': "(('email', 'course_id'),)", 'object_name': 'CourseEnrollmentAllowed'},
'auto_enroll': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.testcenterregistration': {
'Meta': {'object_name': 'TestCenterRegistration'},
'accommodation_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'accommodation_request': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'authorization_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'client_authorization_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'confirmed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'eligibility_appointment_date_first': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'eligibility_appointment_date_last': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'exam_series_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'testcenter_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['student.TestCenterUser']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'upload_error_message': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'upload_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'user_updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
'student.testcenteruser': {
'Meta': {'object_name': 'TestCenterUser'},
'address_1': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'address_2': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'address_3': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'candidate_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'client_candidate_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'company_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'confirmed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '3', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'extension': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '35', 'blank': 'True'}),
'fax_country_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'middle_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'phone_country_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'db_index': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}),
'processed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'salutation': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}),
'suffix': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'upload_error_message': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'upload_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'unique': 'True'}),
'user_updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'allow_certificate': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'goals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'year_of_birth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student'] | agpl-3.0 |
leonhong/hadoop-common | src/contrib/hod/hodlib/Common/xmlrpc.py | 182 | 2374 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import xmlrpclib, time, random, signal
from hodlib.Common.util import hodInterrupt, HodInterruptException
class hodXRClient(xmlrpclib.ServerProxy):
def __init__(self, uri, transport=None, encoding=None, verbose=0,
allow_none=0, installSignalHandlers=1, retryRequests=True, timeOut=15):
xmlrpclib.ServerProxy.__init__(self, uri, transport, encoding, verbose,
allow_none)
self.__retryRequests = retryRequests
self.__timeOut = timeOut
if (installSignalHandlers!=0):
self.__set_alarm()
def __set_alarm(self):
def alarm_handler(sigNum, sigHandler):
raise Exception("XML-RPC socket timeout.")
signal.signal(signal.SIGALRM, alarm_handler)
def __request(self, methodname, params):
response = None
retryWaitTime = 5 + random.randint(0, 5)
for i in range(0, 30):
signal.alarm(self.__timeOut)
try:
response = self._ServerProxy__request(methodname, params)
signal.alarm(0)
break
except Exception:
if self.__retryRequests:
if hodInterrupt.isSet():
raise HodInterruptException()
time.sleep(retryWaitTime)
else:
raise Exception("hodXRClientTimeout")
return response
def __getattr__(self, name):
# magic method dispatcher
return xmlrpclib._Method(self.__request, name)
| apache-2.0 |
synergeticsedx/deployment-wipro | common/djangoapps/util/tests/test_django_utils.py | 60 | 2110 | """
THE TESTS IN THIS MODULE SHOULD BE RUN ON THE SAME PROCESS TO BE MEANINGFUL!!!
The tests in this module look kind of goofy, but the idea is to make sure that
cache values can't leak between different TestCase classes and methods. The need
for this will go away whenever Django merges the fix to reset the caches between
tests (https://code.djangoproject.com/ticket/11505).
"""
from django.core.cache import caches
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, SharedModuleStoreTestCase
class CacheCheckMixin(object):
"""Base mixin that does our cache check."""
def check_caches(self, key):
"""Check that caches are empty, and add values."""
for cache in caches.all():
self.assertIsNone(cache.get(key))
cache.set(key, "Not None")
class CacheModuleStoreTestCaseParent(ModuleStoreTestCase, CacheCheckMixin):
"""Make sure that we're clearing cache values between tests."""
def test_cache_reset_1(self):
"""Check to make sure cache is empty, and add values to it."""
self.check_caches("mstc_cache_test_key")
def test_cache_reset_2(self):
"""Check to make sure cache is empty, and add values to it."""
self.check_caches("mstc_cache_test_key")
class CacheModuleStoreTestCaseChild(CacheModuleStoreTestCaseParent): # pylint: disable=test-inherits-tests
"""Make sure that we're clearing cache values between classes."""
class CacheSharedModuleStoreTestCaseParent(SharedModuleStoreTestCase, CacheCheckMixin):
"""Make sure that we're clearing cache values between tests."""
def test_cache_reset_1(self):
"""Check to make sure cache is empty, and add values to it."""
self.check_caches("smstc_cache_test_key")
def test_cache_reset_2(self):
"""Check to make sure cache is empty, and add values to it."""
self.check_caches("smstc_cache_test_key")
class CacheSharedModuleStoreTestCaseChild(CacheSharedModuleStoreTestCaseParent): # pylint: disable=test-inherits-tests
"""Make sure that we're clearing cache values between classes."""
| agpl-3.0 |
israeleriston/scientific-week | backend/venv/lib/python3.5/site-packages/jinja2/nodes.py | 130 | 29392 | # -*- coding: utf-8 -*-
"""
jinja2.nodes
~~~~~~~~~~~~
This module implements additional nodes derived from the ast base node.
It also provides some node tree helper functions like `in_lineno` and
`get_nodes` used by the parser and translator in order to normalize
python and jinja nodes.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import types
import operator
from collections import deque
from jinja2.utils import Markup
from jinja2._compat import izip, with_metaclass, text_type, PY2
#: the types we support for context functions
_context_function_types = (types.FunctionType, types.MethodType)
_binop_to_func = {
'*': operator.mul,
'/': operator.truediv,
'//': operator.floordiv,
'**': operator.pow,
'%': operator.mod,
'+': operator.add,
'-': operator.sub
}
_uaop_to_func = {
'not': operator.not_,
'+': operator.pos,
'-': operator.neg
}
_cmpop_to_func = {
'eq': operator.eq,
'ne': operator.ne,
'gt': operator.gt,
'gteq': operator.ge,
'lt': operator.lt,
'lteq': operator.le,
'in': lambda a, b: a in b,
'notin': lambda a, b: a not in b
}
class Impossible(Exception):
"""Raised if the node could not perform a requested action."""
class NodeType(type):
"""A metaclass for nodes that handles the field and attribute
inheritance. fields and attributes from the parent class are
automatically forwarded to the child."""
def __new__(cls, name, bases, d):
for attr in 'fields', 'attributes':
storage = []
storage.extend(getattr(bases[0], attr, ()))
storage.extend(d.get(attr, ()))
assert len(bases) == 1, 'multiple inheritance not allowed'
assert len(storage) == len(set(storage)), 'layout conflict'
d[attr] = tuple(storage)
d.setdefault('abstract', False)
return type.__new__(cls, name, bases, d)
class EvalContext(object):
"""Holds evaluation time information. Custom attributes can be attached
to it in extensions.
"""
def __init__(self, environment, template_name=None):
self.environment = environment
if callable(environment.autoescape):
self.autoescape = environment.autoescape(template_name)
else:
self.autoescape = environment.autoescape
self.volatile = False
def save(self):
return self.__dict__.copy()
def revert(self, old):
self.__dict__.clear()
self.__dict__.update(old)
def get_eval_context(node, ctx):
if ctx is None:
if node.environment is None:
raise RuntimeError('if no eval context is passed, the '
'node must have an attached '
'environment.')
return EvalContext(node.environment)
return ctx
class Node(with_metaclass(NodeType, object)):
"""Baseclass for all Jinja2 nodes. There are a number of nodes available
of different types. There are four major types:
- :class:`Stmt`: statements
- :class:`Expr`: expressions
- :class:`Helper`: helper nodes
- :class:`Template`: the outermost wrapper node
All nodes have fields and attributes. Fields may be other nodes, lists,
or arbitrary values. Fields are passed to the constructor as regular
positional arguments, attributes as keyword arguments. Each node has
two attributes: `lineno` (the line number of the node) and `environment`.
The `environment` attribute is set at the end of the parsing process for
all nodes automatically.
"""
fields = ()
attributes = ('lineno', 'environment')
abstract = True
def __init__(self, *fields, **attributes):
if self.abstract:
raise TypeError('abstract nodes are not instanciable')
if fields:
if len(fields) != len(self.fields):
if not self.fields:
raise TypeError('%r takes 0 arguments' %
self.__class__.__name__)
raise TypeError('%r takes 0 or %d argument%s' % (
self.__class__.__name__,
len(self.fields),
len(self.fields) != 1 and 's' or ''
))
for name, arg in izip(self.fields, fields):
setattr(self, name, arg)
for attr in self.attributes:
setattr(self, attr, attributes.pop(attr, None))
if attributes:
raise TypeError('unknown attribute %r' %
next(iter(attributes)))
def iter_fields(self, exclude=None, only=None):
"""This method iterates over all fields that are defined and yields
``(key, value)`` tuples. Per default all fields are returned, but
it's possible to limit that to some fields by providing the `only`
parameter or to exclude some using the `exclude` parameter. Both
should be sets or tuples of field names.
"""
for name in self.fields:
if (exclude is only is None) or \
(exclude is not None and name not in exclude) or \
(only is not None and name in only):
try:
yield name, getattr(self, name)
except AttributeError:
pass
def iter_child_nodes(self, exclude=None, only=None):
"""Iterates over all direct child nodes of the node. This iterates
over all fields and yields the values of they are nodes. If the value
of a field is a list all the nodes in that list are returned.
"""
for field, item in self.iter_fields(exclude, only):
if isinstance(item, list):
for n in item:
if isinstance(n, Node):
yield n
elif isinstance(item, Node):
yield item
def find(self, node_type):
"""Find the first node of a given type. If no such node exists the
return value is `None`.
"""
for result in self.find_all(node_type):
return result
def find_all(self, node_type):
"""Find all the nodes of a given type. If the type is a tuple,
the check is performed for any of the tuple items.
"""
for child in self.iter_child_nodes():
if isinstance(child, node_type):
yield child
for result in child.find_all(node_type):
yield result
def set_ctx(self, ctx):
"""Reset the context of a node and all child nodes. Per default the
parser will all generate nodes that have a 'load' context as it's the
most common one. This method is used in the parser to set assignment
targets and other nodes to a store context.
"""
todo = deque([self])
while todo:
node = todo.popleft()
if 'ctx' in node.fields:
node.ctx = ctx
todo.extend(node.iter_child_nodes())
return self
def set_lineno(self, lineno, override=False):
"""Set the line numbers of the node and children."""
todo = deque([self])
while todo:
node = todo.popleft()
if 'lineno' in node.attributes:
if node.lineno is None or override:
node.lineno = lineno
todo.extend(node.iter_child_nodes())
return self
def set_environment(self, environment):
"""Set the environment for all nodes."""
todo = deque([self])
while todo:
node = todo.popleft()
node.environment = environment
todo.extend(node.iter_child_nodes())
return self
def __eq__(self, other):
return type(self) is type(other) and \
tuple(self.iter_fields()) == tuple(other.iter_fields())
def __ne__(self, other):
return not self.__eq__(other)
# Restore Python 2 hashing behavior on Python 3
__hash__ = object.__hash__
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join('%s=%r' % (arg, getattr(self, arg, None)) for
arg in self.fields)
)
def dump(self):
def _dump(node):
if not isinstance(node, Node):
buf.append(repr(node))
return
buf.append('nodes.%s(' % node.__class__.__name__)
if not node.fields:
buf.append(')')
return
for idx, field in enumerate(node.fields):
if idx:
buf.append(', ')
value = getattr(node, field)
if isinstance(value, list):
buf.append('[')
for idx, item in enumerate(value):
if idx:
buf.append(', ')
_dump(item)
buf.append(']')
else:
_dump(value)
buf.append(')')
buf = []
_dump(self)
return ''.join(buf)
class Stmt(Node):
"""Base node for all statements."""
abstract = True
class Helper(Node):
"""Nodes that exist in a specific context only."""
abstract = True
class Template(Node):
"""Node that represents a template. This must be the outermost node that
is passed to the compiler.
"""
fields = ('body',)
class Output(Stmt):
"""A node that holds multiple expressions which are then printed out.
This is used both for the `print` statement and the regular template data.
"""
fields = ('nodes',)
class Extends(Stmt):
"""Represents an extends statement."""
fields = ('template',)
class For(Stmt):
"""The for loop. `target` is the target for the iteration (usually a
:class:`Name` or :class:`Tuple`), `iter` the iterable. `body` is a list
of nodes that are used as loop-body, and `else_` a list of nodes for the
`else` block. If no else node exists it has to be an empty list.
For filtered nodes an expression can be stored as `test`, otherwise `None`.
"""
fields = ('target', 'iter', 'body', 'else_', 'test', 'recursive')
class If(Stmt):
"""If `test` is true, `body` is rendered, else `else_`."""
fields = ('test', 'body', 'else_')
class Macro(Stmt):
"""A macro definition. `name` is the name of the macro, `args` a list of
arguments and `defaults` a list of defaults if there are any. `body` is
a list of nodes for the macro body.
"""
fields = ('name', 'args', 'defaults', 'body')
class CallBlock(Stmt):
"""Like a macro without a name but a call instead. `call` is called with
the unnamed macro as `caller` argument this node holds.
"""
fields = ('call', 'args', 'defaults', 'body')
class FilterBlock(Stmt):
"""Node for filter sections."""
fields = ('body', 'filter')
class With(Stmt):
"""Specific node for with statements. In older versions of Jinja the
with statement was implemented on the base of the `Scope` node instead.
.. versionadded:: 2.9.3
"""
fields = ('targets', 'values', 'body')
class Block(Stmt):
"""A node that represents a block."""
fields = ('name', 'body', 'scoped')
class Include(Stmt):
"""A node that represents the include tag."""
fields = ('template', 'with_context', 'ignore_missing')
class Import(Stmt):
"""A node that represents the import tag."""
fields = ('template', 'target', 'with_context')
class FromImport(Stmt):
"""A node that represents the from import tag. It's important to not
pass unsafe names to the name attribute. The compiler translates the
attribute lookups directly into getattr calls and does *not* use the
subscript callback of the interface. As exported variables may not
start with double underscores (which the parser asserts) this is not a
problem for regular Jinja code, but if this node is used in an extension
extra care must be taken.
The list of names may contain tuples if aliases are wanted.
"""
fields = ('template', 'names', 'with_context')
class ExprStmt(Stmt):
"""A statement that evaluates an expression and discards the result."""
fields = ('node',)
class Assign(Stmt):
"""Assigns an expression to a target."""
fields = ('target', 'node')
class AssignBlock(Stmt):
"""Assigns a block to a target."""
fields = ('target', 'body')
class Expr(Node):
"""Baseclass for all expressions."""
abstract = True
def as_const(self, eval_ctx=None):
"""Return the value of the expression as constant or raise
:exc:`Impossible` if this was not possible.
An :class:`EvalContext` can be provided, if none is given
a default context is created which requires the nodes to have
an attached environment.
.. versionchanged:: 2.4
the `eval_ctx` parameter was added.
"""
raise Impossible()
def can_assign(self):
"""Check if it's possible to assign something to this node."""
return False
class BinExpr(Expr):
"""Baseclass for all binary expressions."""
fields = ('left', 'right')
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if self.environment.sandboxed and \
self.operator in self.environment.intercepted_binops:
raise Impossible()
f = _binop_to_func[self.operator]
try:
return f(self.left.as_const(eval_ctx), self.right.as_const(eval_ctx))
except Exception:
raise Impossible()
class UnaryExpr(Expr):
"""Baseclass for all unary expressions."""
fields = ('node',)
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if self.environment.sandboxed and \
self.operator in self.environment.intercepted_unops:
raise Impossible()
f = _uaop_to_func[self.operator]
try:
return f(self.node.as_const(eval_ctx))
except Exception:
raise Impossible()
class Name(Expr):
"""Looks up a name or stores a value in a name.
The `ctx` of the node can be one of the following values:
- `store`: store a value in the name
- `load`: load that name
- `param`: like `store` but if the name was defined as function parameter.
"""
fields = ('name', 'ctx')
def can_assign(self):
return self.name not in ('true', 'false', 'none',
'True', 'False', 'None')
class Literal(Expr):
"""Baseclass for literals."""
abstract = True
class Const(Literal):
"""All constant values. The parser will return this node for simple
constants such as ``42`` or ``"foo"`` but it can be used to store more
complex values such as lists too. Only constants with a safe
representation (objects where ``eval(repr(x)) == x`` is true).
"""
fields = ('value',)
def as_const(self, eval_ctx=None):
rv = self.value
if PY2 and type(rv) is text_type and \
self.environment.policies['compiler.ascii_str']:
try:
rv = rv.encode('ascii')
except UnicodeError:
pass
return rv
@classmethod
def from_untrusted(cls, value, lineno=None, environment=None):
"""Return a const object if the value is representable as
constant value in the generated code, otherwise it will raise
an `Impossible` exception.
"""
from .compiler import has_safe_repr
if not has_safe_repr(value):
raise Impossible()
return cls(value, lineno=lineno, environment=environment)
class TemplateData(Literal):
"""A constant template string."""
fields = ('data',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
if eval_ctx.autoescape:
return Markup(self.data)
return self.data
class Tuple(Literal):
"""For loop unpacking and some other things like multiple arguments
for subscripts. Like for :class:`Name` `ctx` specifies if the tuple
is used for loading the names or storing.
"""
fields = ('items', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return tuple(x.as_const(eval_ctx) for x in self.items)
def can_assign(self):
for item in self.items:
if not item.can_assign():
return False
return True
class List(Literal):
"""Any list literal such as ``[1, 2, 3]``"""
fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return [x.as_const(eval_ctx) for x in self.items]
class Dict(Literal):
"""Any dict literal such as ``{1: 2, 3: 4}``. The items must be a list of
:class:`Pair` nodes.
"""
fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return dict(x.as_const(eval_ctx) for x in self.items)
class Pair(Helper):
"""A key, value pair for dicts."""
fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key.as_const(eval_ctx), self.value.as_const(eval_ctx)
class Keyword(Helper):
"""A key, value pair for keyword arguments where key is a string."""
fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key, self.value.as_const(eval_ctx)
class CondExpr(Expr):
"""A conditional expression (inline if expression). (``{{
foo if bar else baz }}``)
"""
fields = ('test', 'expr1', 'expr2')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.test.as_const(eval_ctx):
return self.expr1.as_const(eval_ctx)
# if we evaluate to an undefined object, we better do that at runtime
if self.expr2 is None:
raise Impossible()
return self.expr2.as_const(eval_ctx)
class Filter(Expr):
"""This node applies a filter on an expression. `name` is the name of
the filter, the rest of the fields are the same as for :class:`Call`.
If the `node` of a filter is `None` the contents of the last buffer are
filtered. Buffers are created by macros and filter blocks.
"""
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile or self.node is None:
raise Impossible()
# we have to be careful here because we call filter_ below.
# if this variable would be called filter, 2to3 would wrap the
# call in a list beause it is assuming we are talking about the
# builtin filter function here which no longer returns a list in
# python 3. because of that, do not rename filter_ to filter!
filter_ = self.environment.filters.get(self.name)
if filter_ is None or getattr(filter_, 'contextfilter', False):
raise Impossible()
# We cannot constant handle async filters, so we need to make sure
# to not go down this path.
if eval_ctx.environment.is_async and \
getattr(filter_, 'asyncfiltervariant', False):
raise Impossible()
obj = self.node.as_const(eval_ctx)
args = [obj] + [x.as_const(eval_ctx) for x in self.args]
if getattr(filter_, 'evalcontextfilter', False):
args.insert(0, eval_ctx)
elif getattr(filter_, 'environmentfilter', False):
args.insert(0, self.environment)
kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs)
if self.dyn_args is not None:
try:
args.extend(self.dyn_args.as_const(eval_ctx))
except Exception:
raise Impossible()
if self.dyn_kwargs is not None:
try:
kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
except Exception:
raise Impossible()
try:
return filter_(*args, **kwargs)
except Exception:
raise Impossible()
class Test(Expr):
"""Applies a test on an expression. `name` is the name of the test, the
rest of the fields are the same as for :class:`Call`.
"""
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
class Call(Expr):
"""Calls an expression. `args` is a list of arguments, `kwargs` a list
of keyword arguments (list of :class:`Keyword` nodes), and `dyn_args`
and `dyn_kwargs` has to be either `None` or a node that is used as
node for dynamic positional (``*args``) or keyword (``**kwargs``)
arguments.
"""
fields = ('node', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
class Getitem(Expr):
"""Get an attribute or item from an expression and prefer the item."""
fields = ('node', 'arg', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.ctx != 'load':
raise Impossible()
try:
return self.environment.getitem(self.node.as_const(eval_ctx),
self.arg.as_const(eval_ctx))
except Exception:
raise Impossible()
def can_assign(self):
return False
class Getattr(Expr):
"""Get an attribute or item from an expression that is a ascii-only
bytestring and prefer the attribute.
"""
fields = ('node', 'attr', 'ctx')
def as_const(self, eval_ctx=None):
if self.ctx != 'load':
raise Impossible()
try:
eval_ctx = get_eval_context(self, eval_ctx)
return self.environment.getattr(self.node.as_const(eval_ctx),
self.attr)
except Exception:
raise Impossible()
def can_assign(self):
return False
class Slice(Expr):
"""Represents a slice object. This must only be used as argument for
:class:`Subscript`.
"""
fields = ('start', 'stop', 'step')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
def const(obj):
if obj is None:
return None
return obj.as_const(eval_ctx)
return slice(const(self.start), const(self.stop), const(self.step))
class Concat(Expr):
"""Concatenates the list of expressions provided after converting them to
unicode.
"""
fields = ('nodes',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return ''.join(text_type(x.as_const(eval_ctx)) for x in self.nodes)
class Compare(Expr):
"""Compares an expression with some other expressions. `ops` must be a
list of :class:`Operand`\\s.
"""
fields = ('expr', 'ops')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
result = value = self.expr.as_const(eval_ctx)
try:
for op in self.ops:
new_value = op.expr.as_const(eval_ctx)
result = _cmpop_to_func[op.op](value, new_value)
value = new_value
except Exception:
raise Impossible()
return result
class Operand(Helper):
"""Holds an operator and an expression."""
fields = ('op', 'expr')
if __debug__:
Operand.__doc__ += '\nThe following operators are available: ' + \
', '.join(sorted('``%s``' % x for x in set(_binop_to_func) |
set(_uaop_to_func) | set(_cmpop_to_func)))
class Mul(BinExpr):
"""Multiplies the left with the right node."""
operator = '*'
class Div(BinExpr):
"""Divides the left by the right node."""
operator = '/'
class FloorDiv(BinExpr):
"""Divides the left by the right node and truncates conver the
result into an integer by truncating.
"""
operator = '//'
class Add(BinExpr):
"""Add the left to the right node."""
operator = '+'
class Sub(BinExpr):
"""Subtract the right from the left node."""
operator = '-'
class Mod(BinExpr):
"""Left modulo right."""
operator = '%'
class Pow(BinExpr):
"""Left to the power of right."""
operator = '**'
class And(BinExpr):
"""Short circuited AND."""
operator = 'and'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) and self.right.as_const(eval_ctx)
class Or(BinExpr):
"""Short circuited OR."""
operator = 'or'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) or self.right.as_const(eval_ctx)
class Not(UnaryExpr):
"""Negate the expression."""
operator = 'not'
class Neg(UnaryExpr):
"""Make the expression negative."""
operator = '-'
class Pos(UnaryExpr):
"""Make the expression positive (noop for most expressions)"""
operator = '+'
# Helpers for extensions
class EnvironmentAttribute(Expr):
"""Loads an attribute from the environment object. This is useful for
extensions that want to call a callback stored on the environment.
"""
fields = ('name',)
class ExtensionAttribute(Expr):
"""Returns the attribute of an extension bound to the environment.
The identifier is the identifier of the :class:`Extension`.
This node is usually constructed by calling the
:meth:`~jinja2.ext.Extension.attr` method on an extension.
"""
fields = ('identifier', 'name')
class ImportedName(Expr):
"""If created with an import name the import name is returned on node
access. For example ``ImportedName('cgi.escape')`` returns the `escape`
function from the cgi module on evaluation. Imports are optimized by the
compiler so there is no need to assign them to local variables.
"""
fields = ('importname',)
class InternalName(Expr):
"""An internal name in the compiler. You cannot create these nodes
yourself but the parser provides a
:meth:`~jinja2.parser.Parser.free_identifier` method that creates
a new identifier for you. This identifier is not available from the
template and is not threated specially by the compiler.
"""
fields = ('name',)
def __init__(self):
raise TypeError('Can\'t create internal names. Use the '
'`free_identifier` method on a parser.')
class MarkSafe(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`)."""
fields = ('expr',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return Markup(self.expr.as_const(eval_ctx))
class MarkSafeIfAutoescape(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`) but
only if autoescaping is active.
.. versionadded:: 2.5
"""
fields = ('expr',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
expr = self.expr.as_const(eval_ctx)
if eval_ctx.autoescape:
return Markup(expr)
return expr
class ContextReference(Expr):
"""Returns the current template context. It can be used like a
:class:`Name` node, with a ``'load'`` ctx and will return the
current :class:`~jinja2.runtime.Context` object.
Here an example that assigns the current template name to a
variable named `foo`::
Assign(Name('foo', ctx='store'),
Getattr(ContextReference(), 'name'))
"""
class Continue(Stmt):
"""Continue a loop."""
class Break(Stmt):
"""Break a loop."""
class Scope(Stmt):
"""An artificial scope."""
fields = ('body',)
class EvalContextModifier(Stmt):
"""Modifies the eval context. For each option that should be modified,
a :class:`Keyword` has to be added to the :attr:`options` list.
Example to change the `autoescape` setting::
EvalContextModifier(options=[Keyword('autoescape', Const(True))])
"""
fields = ('options',)
class ScopedEvalContextModifier(EvalContextModifier):
"""Modifies the eval context and reverts it later. Works exactly like
:class:`EvalContextModifier` but will only modify the
:class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`.
"""
fields = ('body',)
# make sure nobody creates custom nodes
def _failing_new(*args, **kwargs):
raise TypeError('can\'t create custom node types')
NodeType.__new__ = staticmethod(_failing_new); del _failing_new
| mit |
grevutiu-gabriel/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/watchlist/changedlinepattern_unittest.py | 124 | 3268 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Unit tests for changedlinepattern.py.'''
import re
import unittest2 as unittest
from webkitpy.common.watchlist.changedlinepattern import ChangedLinePattern
class ChangedLinePatternTest(unittest.TestCase):
# A quick note about the diff file structure.
# The first column indicated the old line number.
# The second column indicates the new line number.
# 0 in either column indicates it had no old or new line number.
_DIFF_FILE = ((0, 1, 'hi'),
(1, 0, 'bye'),
(2, 2, 'other'),
(3, 0, 'both'),
(0, 3, 'both'),
)
def run_changed_line_pattern_match(self, pattern, index_for_zero_value):
return ChangedLinePattern(re.compile(pattern), index_for_zero_value).match(None, self._DIFF_FILE)
def test_added_lines(self):
self.assertTrue(self.run_changed_line_pattern_match('hi', 0))
self.assertTrue(self.run_changed_line_pattern_match('h.', 0))
self.assertTrue(self.run_changed_line_pattern_match('both', 0))
self.assertFalse(self.run_changed_line_pattern_match('bye', 0))
self.assertFalse(self.run_changed_line_pattern_match('y', 0))
self.assertFalse(self.run_changed_line_pattern_match('other', 0))
def test_removed_lines(self):
self.assertFalse(self.run_changed_line_pattern_match('hi', 1))
self.assertFalse(self.run_changed_line_pattern_match('h.', 1))
self.assertTrue(self.run_changed_line_pattern_match('both', 1))
self.assertTrue(self.run_changed_line_pattern_match('bye', 1))
self.assertTrue(self.run_changed_line_pattern_match('y', 1))
self.assertFalse(self.run_changed_line_pattern_match('other', 1))
| bsd-3-clause |
OptimusGREEN/repo67beta | service.ogmcupdater/upgrader.py | 1 | 7763 | import xbmcgui, xbmc
import urllib
import os
import shutil
def download(url, dest, dp = None):
if not dp:
dp = xbmcgui.DialogProgress()
dp.create("[COLORgreen]The OptimusGREEN Build[/COLOR]","Downloading & Copying Files",' ', ' ')
dp.update(0)
urllib.urlretrieve(url,dest,lambda nb, bs, fs, url=url: _pbhook(nb,bs,fs,url,dp))
def _pbhook(numblocks, blocksize, filesize, url, dp):
try:
percent = min((numblocks*blocksize*100)/filesize, 100)
dp.update(percent)
except:
percent = 100
dp.update(percent)
if dp.iscanceled():
raise Exception("Canceled")
dp.close()
################################################################################
################################################################################
##### Build Upgrade to new codename version #####
################################################################################
################################################################################
def build_upgrader(url,url2):
dialog = xbmcgui.Dialog()
ogb_check = xbmc.translatePath("special://home/userdata/ogbversion.xml")
add_lst = 'https://www.dropbox.com/s/tqyytq10ylks5uu/addon_list.txt?dl=1'
name = "The_OptimusGREEN_Build"
dp = xbmcgui.DialogProgress()
dp.create("[COLORgreen]The OptimusGREEN Build Installer[/COLOR]", "Checking server 1.......")
if file_exists(url) is True:
OGB_installer(name, url)
else:
dp.update(0,"Checking Server 2......")
if file_exists(url2) is True:
OGB_installer(name, url2)
else:
dp.update(0,"WOW!! Both servers are unreachable, Please try again later.")
time.sleep(5)
dp.update(0,"[COLORred]The app will now close[/COLOR], Closing.......")
xbmc.executebuiltin("Quit")
def OGB_installer(name,url):
dialog = xbmcgui.Dialog()
home_fldr = xbmc.translatePath(os.path.join('special://','home'))
addons_fldr = xbmc.translatePath('special://home/addons')
device_q = dialog.yesno("[COLORgreen]OptimusGREEN Installer[/COLOR]", "Please Choose A Build", "[COLORgreen]FULL FAT[/COLOR] - Everything included. For more powerful devices.", "[COLORgreen]SEMI-SKIMMED[/COLOR] - Lighter for low end devices like fire stick 1 and boxes with 1gb ram.","Full Fat","Semi-Skimmed")
if device_q == True:
path = xbmc.translatePath(os.path.join('special://home/addons','packages'))
dp = xbmcgui.DialogProgress()
dp.create("[COLORgreen]The OptimusGREEN Build[/COLOR]","Processing.... ",'', 'Please Wait')
dp.update(0,"", "[COLORgold]Downloading.....[/COLOR]")
lib=os.path.join(path, name+'.zip')
try:
os.remove(lib)
except:
pass
download(url, lib, dp)
time.sleep(2)
dp.update(0,"", "[COLORgold]Extracting Zip Please Wait[/COLOR]")
print '======================================='
print addonfolder
print '======================================='
extract.all(lib,home_fldr,dp)
LtL_install(addons_fldr)
purge_old_stuff_auto(addons_fldr)
else:
path = xbmc.translatePath(os.path.join('special://home/addons','packages'))
dp = xbmcgui.DialogProgress()
dp.create("[COLORgreen]The OptimusGREEN Build[/COLOR]","Processing.... ",'', 'Please Wait')
dp.update(0,"", "[COLORgold]Downloading.....[/COLOR]")
lib=os.path.join(path, name+'.zip')
try:
os.remove(lib)
except:
pass
download(url, lib, dp)
addonfolder = xbmc.translatePath(os.path.join('special://','home'))
time.sleep(2)
dp.update(0,"", "[COLORgold]Extracting Zip Please Wait[/COLOR]")
print '======================================='
print addonfolder
print '======================================='
extract.all(lib,home_fldr,dp)
purge_old_stuff_auto(addons_fldr)
################################################################################
################################################################################
##### Cleanup tools #####
################################################################################
################################################################################
def LtL_install(local_folder):
remote_file = 'https://www.dropbox.com/s/xs3lltbe5mq3yz1/LtL.txt?dl=1'
ogtools = 'plugin.program.ogtools'
path = xbmc.translatePath('special://home/userdata/addon_data/plugin.program.ogtools')
remote = xbmc.translatePath("special://home/userdata/addon_data/plugin.program.ogtools/ltl.txt")
addon_data = xbmc.translatePath('special://home/userdata/addon_data/')
addons = xbmc.translatePath('special://home/userdata/addons/')
dialog = xbmcgui.Dialog()
dp = xbmcgui.DialogProgress()
remote_list = []
local_list = []
dp.create('[COLORgreen]OptimusGREEN Tools[/COLOR]', 'Lightening The Load...')
if not os.path.exists(path):
os.mkdir(path)
urllib.urlretrieve(remote_file, remote)
xbmc.log('### retrieving %s to %s' % (remote_file, remote))
rfile = open(remote, 'r')
for dirs in os.listdir(local_folder):
if os.path.isfile(dirs):
pass
else:
local_list.append(dirs)
xbmc.log('### adding %s to %s' % ('local dirs', local_list))
for line in rfile.readlines():
line = line.strip('\n')
line = line.strip('\r')
remote_list.append(line)
xbmc.log('### copying %s to %s' % ('file lines', remote_list))
theload = [x for x in local_list if x in remote_list]
for item in theload:
path = os.path.join(local_folder, item)
if os.path.isfile(path) == True:
pass
elif 'packages' in path:
pass
else:
shutil.rmtree(path)
xbmc.log('### removing %s from %s' % (item, local_folder))
rfile.close()
dp.close()
def purge_old_stuff_auto(local_folder):
remote_file = 'https://www.dropbox.com/s/tqyytq10ylks5uu/addon_list.txt?dl=1'
remote = xbmc.translatePath("special://home/userdata/addon_data/plugin.program.ogtools/remote_file.txt")
addons = xbmc.translatePath('special://home/addons/')
addon_data = xbmc.translatePath('special://home/userdata/addon_data/')
dialog = xbmcgui.Dialog()
dp = xbmcgui.DialogProgress()
local_list = []
remote_list = []
dp.create('[COLORgreen]OptimusGREEN Tools[/COLOR]', 'Purging old build data...')
urllib.urlretrieve(remote_file, remote)
xbmc.log('### retrieving %s to %s' % (remote_file, remote))
rfile = open(remote, 'r')
for dirs in os.listdir(local_folder):
if os.path.isfile(dirs):
pass
else:
local_list.append(dirs)
xbmc.log('### adding %s to %s' % ('local dirs', local_list))
for line in rfile.readlines():
line = line.strip('\n')
line = line.strip('\r')
remote_list.append(line)
xbmc.log('### copying %s to %s' % ('file lines', remote_list))
leftovers = [x for x in local_list if x not in remote_list]
for item in leftovers:
path = os.path.join(local_folder, item)
if os.path.isfile(path) == True:
pass
else:
shutil.rmtree(path)
xbmc.log('### removing %s from %s' % (item, local_folder))
else:
pass
rfile.close()
dp.close() | gpl-3.0 |
ltucker/melk.util | melk/util/functional.py | 1 | 2082 | # Copyright (C) 2008 The Open Planning Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301
# USA
## 'any' and 'all' are not in __builtins__ before
## python 2.5, so define them here if necessary
try:
any, all
except NameError:
from itertools import ifilter, ifilterfalse
def all(seq, pred=None):
"Returns True if pred(x) is true for every element in the iterable"
for elem in ifilterfalse(pred, seq):
return False
return True
def any(seq, pred=None):
"Returns True if pred(x) is true for at least one element in the iterable"
for elem in ifilter(pred, seq):
return True
return False
any = any
all = all
def flatten(x):
"""
http://kogs-www.informatik.uni-hamburg.de/~meine/python_tricks
flatten(sequence) -> list
Returns a single, flat list which contains all elements retrieved
from the sequence and all recursively contained sub-sequences
(iterables).
Examples:
>>> [1, 2, [3,4], (5,6)]
[1, 2, [3, 4], (5, 6)]
>>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, MyVector(8,9,10)])
[1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]"""
result = []
for el in x:
#if isinstance(el, (list, tuple)):
if hasattr(el, "__iter__") and not isinstance(el, basestring):
result.extend(flatten(el))
else:
result.append(el)
return result
| gpl-2.0 |
lvapeab/nmt-keras | setup.py | 1 | 2100 | # -*- coding: utf-8 -*-
from setuptools import setup
setup(name='nmt_keras',
version='0.6',
description='Neural Machine Translation with Keras (Theano and Tensorflow).',
author='Marc Bolaños - Alvaro Peris',
author_email='lvapeab@gmail.com',
url='https://github.com/lvapeab/nmt-keras',
download_url='https://github.com/lvapeab/nmt-keras/archive/master.zip',
license='MIT',
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
"License :: OSI Approved :: MIT License"
],
install_requires=[
'cloudpickle',
'future',
'keras @ https://github.com/MarcBS/keras/archive/master.zip',
'keras_applications',
'keras_preprocessing',
'h5py',
'matplotlib',
'multimodal-keras-wrapper',
'numpy',
'scikit-image',
'scikit-learn',
'six',
'tables',
'numpy',
'pandas',
'sacrebleu',
'sacremoses',
'scipy',
'tensorflow<2'
],
package_dir={'nmt_keras': '.',
'nmt_keras.utils': 'utils',
'nmt_keras.data_engine': 'data_engine',
'nmt_keras.nmt_keras': 'nmt_keras',
'nmt_keras.demo-web': 'demo-web',
},
packages=['nmt_keras',
'nmt_keras.utils',
'nmt_keras.data_engine',
'nmt_keras.nmt_keras',
'nmt_keras.demo-web'
],
package_data={
'nmt_keras': ['examples/*']
}
)
| mit |
boneyao/sentry | src/sentry/migrations/0177_fill_member_counters.py | 34 | 36807 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
from sentry.utils.query import RangeQuerySetWrapperWithProgressBar
Organization = orm['sentry.Organization']
OrganizationMember = orm['sentry.OrganizationMember']
queryset = Organization.objects.all()
for org in RangeQuerySetWrapperWithProgressBar(queryset):
for idx, member in enumerate(org.member_set.all()):
OrganizationMember.objects.filter(
id=member.id,
).update(counter=idx + 1)
def backwards(self, orm):
pass
models = {
'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '50'})
},
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.alert': {
'Meta': {'object_name': 'Alert'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': "orm['sentry.AlertRelatedGroup']", 'to': "orm['sentry.Group']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.alertrelatedgroup': {
'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'},
'alert': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Alert']"}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'audit_actors'", 'to': "orm['sentry.User']"}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'badge': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group', 'datetime'),)"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'storage': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'storage_options': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.helppage': {
'Meta': {'object_name': 'HelpPage'},
'content': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True'}),
'priority': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'), ('organization', 'counter'))", 'object_name': 'OrganizationMember'},
'counter': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
}
}
complete_apps = ['sentry']
symmetrical = True
| bsd-3-clause |
nkgilley/home-assistant | homeassistant/components/matrix/notify.py | 10 | 1256 | """Support for Matrix notifications."""
import logging
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_MESSAGE,
ATTR_TARGET,
PLATFORM_SCHEMA,
BaseNotificationService,
)
import homeassistant.helpers.config_validation as cv
from .const import DOMAIN, SERVICE_SEND_MESSAGE
_LOGGER = logging.getLogger(__name__)
CONF_DEFAULT_ROOM = "default_room"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Required(CONF_DEFAULT_ROOM): cv.string})
def get_service(hass, config, discovery_info=None):
"""Get the Matrix notification service."""
return MatrixNotificationService(config[CONF_DEFAULT_ROOM])
class MatrixNotificationService(BaseNotificationService):
"""Send notifications to a Matrix room."""
def __init__(self, default_room):
"""Set up the Matrix notification service."""
self._default_room = default_room
def send_message(self, message="", **kwargs):
"""Send the message to the Matrix server."""
target_rooms = kwargs.get(ATTR_TARGET) or [self._default_room]
service_data = {ATTR_TARGET: target_rooms, ATTR_MESSAGE: message}
return self.hass.services.call(
DOMAIN, SERVICE_SEND_MESSAGE, service_data=service_data
)
| apache-2.0 |
matthewjwolff/LoveLetter | engine/GameEngine.py | 1 | 4596 | '''
Created on Nov 3, 2016
The game loop runner
@author: mjw
'''
from .Deck import Deck
from engine.Princess import Princess
from engine.Action import Action
class GameEngine(object):
'''
The engine registers players, instantiates the game, runs the gameplay
loop, and executes actions that transition the game from state to state.
'''
def __init__(self):
'''
Constructor
'''
self.origplayers = []
self.deck = Deck()
self.running = False
self.grave = []
self.discarded = None
self.eliminatedThisRound = None
def addPlayer(self, player):
self.origplayers.append(player)
def runGame(self):
# make a NEW list
self.players = list(self.origplayers)
assert len(self.players) >= 2
for player in self.players:
player.assignHand(self.deck.getCard(), self.players)
# discard one
self.discarded = self.deck.getCard()
self.running = True
# initialize handmaid flag
for player in self.players:
player.handmaidenFlag = False
while self.running == True :
for player in self.players :
self.eliminatedThisRound = None
player.handmaidenFlag = False
card = self.deck.getCard()
# I changed my mind, no deep copying
# Note, API change. Player is notified of the card dealt,
# remaining cards in the deck, the graveyard, and the
# list of players
nonHandmaid = list(self.players)
for hplayer in nonHandmaid:
if hplayer.handmaidenFlag==True:
nonHandmaid.remove(hplayer)
action = player.getAction(card, len(self.deck.shuffled),
self.grave, nonHandmaid)
# update the player's hand
if action.playedCard == player.hand:
# If the player chose to play the card he had kept,
# then replace the card in his hand with the card from
# the deck
player.hand = card
action.playedCard.perform(action, self.players, self, self.deck)
# Tell other players that a play occurred
for oplayer in self.origplayers:
if oplayer != player:
oplayer.notifyOfAction(action, self.grave)
# Then tell players if someone was eliminated
if self.eliminatedThisRound != None:
self.notifyAllEliminate(self.eliminatedThisRound)
self.grave += [action]
# End the game if nobody remains or the deck is empty
if len(self.players) == 1 or self.deck.size()==0:
# kick out of the loop immediately
self.running = False
break
winner = self.players[0]
# TODO: handle ties?
for player in self.players:
if player.hand.value > winner.hand.value:
winner = player
return winner
def eliminate(self, player):
assert(self.eliminatedThisRound == None)
self.grave.append(Action(player, player.hand, None, None))
self.players.remove(player)
self.eliminatedThisRound = player
def notifyAllEliminate(self, eliminated):
for player in self.players:
player.notifyEliminate(eliminated)
self.eliminatedThisRound = None
def abnormalDiscard(self, player, card):
'''
Safely force a player to discard a card, and apply any effects if necessary.
ex: the prince forces a princess discard, that player should lose
This should only ever be called by the prince...
'''
self.grave.append(Action(player, card, None, None))
if type(card)==Princess:
self.eliminate(player)
else:
# the player must be given another card
newCard = self.deck.getCard()
assert(self.discarded != None)
if newCard != None:
# if the deck gave us a new card
player.hand = newCard
else:
# the deck is out of cards. Give the player the discarded card
# This only ever happens if the last card played is the prince
player.hand = self.discarded
self.discarded = None
| gpl-3.0 |
jagill/treeano | examples/REINFORCE/linear.py | 2 | 2456 | from __future__ import division, absolute_import
from __future__ import print_function, unicode_literals
import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
from treeano.sandbox.nodes import REINFORCE
fX = theano.config.floatX
TARGET_WEIGHT = np.random.randn(10, 2).astype(fX)
TARGET_BIAS = np.random.randn(2).astype(fX)
class RewardNode(treeano.NodeImpl):
input_keys = ("state", "sampled")
def compute_output(self, network, state_vw, sampled_vw):
W = T.constant(TARGET_WEIGHT)
b = T.constant(TARGET_BIAS)
target = T.dot(state_vw.variable, W) + b.dimshuffle("x", 0)
reward = -T.sqr(sampled_vw.variable - target).sum(axis=1)
network.create_vw(
"raw_reward",
variable=T.mean(reward),
shape=(),
)
baseline_reward = 100
network.create_vw(
"default",
variable=reward + baseline_reward,
shape=(state_vw.shape[0],),
tags={"output"},
)
BATCH_SIZE = 64
graph = tn.GraphNode(
"graph",
[[tn.InputNode("state", shape=(BATCH_SIZE, 10)),
tn.DenseNode("mu", num_units=2),
tn.ConstantNode("sigma", value=1.),
REINFORCE.NormalSampleNode("sampled"),
RewardNode("reward"),
REINFORCE.NormalREINFORCECostNode("REINFORCE")],
[{"from": "state", "to": "mu"},
{"from": "mu", "to": "sampled", "to_key": "mu"},
{"from": "sigma", "to": "sampled", "to_key": "sigma"},
{"from": "sampled", "to": "reward", "to_key": "sampled"},
{"from": "state", "to": "reward", "to_key": "state"},
{"from": "state", "to": "REINFORCE", "to_key": "state"},
{"from": "mu", "to": "REINFORCE", "to_key": "mu"},
{"from": "sigma", "to": "REINFORCE", "to_key": "sigma"},
{"from": "reward", "to": "REINFORCE", "to_key": "reward"},
{"from": "sampled", "to": "REINFORCE", "to_key": "sampled"},
{"from": "REINFORCE"}]]
)
network = tn.AdamNode(
"adam",
{"subtree": graph,
"cost": tn.ReferenceNode("cost", reference="REINFORCE")},
learning_rate=0.1
).network()
fn = network.function(
["state"], [("reward", "raw_reward")], include_updates=True)
errors = []
for i in range(5000):
error, = fn(np.random.randn(BATCH_SIZE, 10).astype(fX))
if i % 100 == 0:
print("Iter:", i, "Error:", error)
errors.append(error)
print("mean reward:", np.mean(errors))
| apache-2.0 |
laborautonomo/bitmask_client | pkg/utils.py | 11 | 2846 | # -*- coding: utf-8 -*-
# utils.py
# Copyright (C) 2013 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Utils to help in the setup process
"""
import os
import re
import sys
def get_reqs_from_files(reqfiles):
"""
Returns the contents of the top requirement file listed as a
string list with the lines
@param reqfiles: requirement files to parse
@type reqfiles: list of str
"""
for reqfile in reqfiles:
if os.path.isfile(reqfile):
return open(reqfile, 'r').read().split('\n')
def parse_requirements(reqfiles=['requirements.txt',
'requirements.pip',
'pkg/requirements.pip']):
"""
Parses the requirement files provided.
Checks the value of LEAP_VENV_SKIP_PYSIDE to see if it should
return PySide as a dep or not. Don't set, or set to 0 if you want
to install it through pip.
@param reqfiles: requirement files to parse
@type reqfiles: list of str
"""
requirements = []
skip_pyside = os.getenv("LEAP_VENV_SKIP_PYSIDE", "0") != "0"
for line in get_reqs_from_files(reqfiles):
# -e git://foo.bar/baz/master#egg=foobar
if re.match(r'\s*-e\s+', line):
pass
# do not try to do anything with externals on vcs
#requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1',
#line))
# http://foo.bar/baz/foobar/zipball/master#egg=foobar
elif re.match(r'\s*https?:', line):
requirements.append(re.sub(r'\s*https?:.*#egg=(.*)$', r'\1',
line))
# -f lines are for index locations, and don't get used here
elif re.match(r'\s*-f\s+', line):
pass
# argparse is part of the standard library starting with 2.7
# adding it to the requirements list screws distro installs
elif line == 'argparse' and sys.version_info >= (2, 7):
pass
elif line == 'PySide' and skip_pyside:
pass
# do not include comments
elif line.lstrip().startswith('#'):
pass
else:
if line != '':
requirements.append(line)
return requirements
| gpl-3.0 |
jpasosa/parquelareja | plugins/apostrophePlugin/web/js/fckeditor/editor/filemanager/connectors/py/fckcommands.py | 44 | 6491 | #!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2009 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
"""
import os
try: # Windows needs stdio set for binary mode for file upload to work.
import msvcrt
msvcrt.setmode (0, os.O_BINARY) # stdin = 0
msvcrt.setmode (1, os.O_BINARY) # stdout = 1
except ImportError:
pass
from fckutil import *
from fckoutput import *
import config as Config
class GetFoldersCommandMixin (object):
def getFolders(self, resourceType, currentFolder):
"""
Purpose: command to recieve a list of folders
"""
# Map the virtual path to our local server
serverPath = mapServerFolder(self.userFilesFolder,currentFolder)
s = """<Folders>""" # Open the folders node
for someObject in os.listdir(serverPath):
someObjectPath = mapServerFolder(serverPath, someObject)
if os.path.isdir(someObjectPath):
s += """<Folder name="%s" />""" % (
convertToXmlAttribute(someObject)
)
s += """</Folders>""" # Close the folders node
return s
class GetFoldersAndFilesCommandMixin (object):
def getFoldersAndFiles(self, resourceType, currentFolder):
"""
Purpose: command to recieve a list of folders and files
"""
# Map the virtual path to our local server
serverPath = mapServerFolder(self.userFilesFolder,currentFolder)
# Open the folders / files node
folders = """<Folders>"""
files = """<Files>"""
for someObject in os.listdir(serverPath):
someObjectPath = mapServerFolder(serverPath, someObject)
if os.path.isdir(someObjectPath):
folders += """<Folder name="%s" />""" % (
convertToXmlAttribute(someObject)
)
elif os.path.isfile(someObjectPath):
size = os.path.getsize(someObjectPath)
files += """<File name="%s" size="%s" />""" % (
convertToXmlAttribute(someObject),
os.path.getsize(someObjectPath)
)
# Close the folders / files node
folders += """</Folders>"""
files += """</Files>"""
return folders + files
class CreateFolderCommandMixin (object):
def createFolder(self, resourceType, currentFolder):
"""
Purpose: command to create a new folder
"""
errorNo = 0; errorMsg ='';
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
newFolder = sanitizeFolderName (newFolder)
try:
newFolderPath = mapServerFolder(self.userFilesFolder, combinePaths(currentFolder, newFolder))
self.createServerFolder(newFolderPath)
except Exception, e:
errorMsg = str(e).decode('iso-8859-1').encode('utf-8') # warning with encodigns!!!
if hasattr(e,'errno'):
if e.errno==17: #file already exists
errorNo=0
elif e.errno==13: # permission denied
errorNo = 103
elif e.errno==36 or e.errno==2 or e.errno==22: # filename too long / no such file / invalid name
errorNo = 102
else:
errorNo = 110
else:
errorNo = 102
return self.sendErrorNode ( errorNo, errorMsg )
def createServerFolder(self, folderPath):
"Purpose: physically creates a folder on the server"
# No need to check if the parent exists, just create all hierachy
try:
permissions = Config.ChmodOnFolderCreate
if not permissions:
os.makedirs(folderPath)
except AttributeError: #ChmodOnFolderCreate undefined
permissions = 0755
if permissions:
oldumask = os.umask(0)
os.makedirs(folderPath,mode=0755)
os.umask( oldumask )
class UploadFileCommandMixin (object):
def uploadFile(self, resourceType, currentFolder):
"""
Purpose: command to upload files to server (same as FileUpload)
"""
errorNo = 0
if self.request.has_key("NewFile"):
# newFile has all the contents we need
newFile = self.request.get("NewFile", "")
# Get the file name
newFileName = newFile.filename
newFileName = sanitizeFileName( newFileName )
newFileNameOnly = removeExtension(newFileName)
newFileExtension = getExtension(newFileName).lower()
allowedExtensions = Config.AllowedExtensions[resourceType]
deniedExtensions = Config.DeniedExtensions[resourceType]
if (allowedExtensions):
# Check for allowed
isAllowed = False
if (newFileExtension in allowedExtensions):
isAllowed = True
elif (deniedExtensions):
# Check for denied
isAllowed = True
if (newFileExtension in deniedExtensions):
isAllowed = False
else:
# No extension limitations
isAllowed = True
if (isAllowed):
# Upload to operating system
# Map the virtual path to the local server path
currentFolderPath = mapServerFolder(self.userFilesFolder, currentFolder)
i = 0
while (True):
newFilePath = os.path.join (currentFolderPath,newFileName)
if os.path.exists(newFilePath):
i += 1
newFileName = "%s(%04d).%s" % (
newFileNameOnly, i, newFileExtension
)
errorNo= 201 # file renamed
else:
# Read file contents and write to the desired path (similar to php's move_uploaded_file)
fout = file(newFilePath, 'wb')
while (True):
chunk = newFile.file.read(100000)
if not chunk: break
fout.write (chunk)
fout.close()
if os.path.exists ( newFilePath ):
doChmod = False
try:
doChmod = Config.ChmodOnUpload
permissions = Config.ChmodOnUpload
except AttributeError: #ChmodOnUpload undefined
doChmod = True
permissions = 0755
if ( doChmod ):
oldumask = os.umask(0)
os.chmod( newFilePath, permissions )
os.umask( oldumask )
newFileUrl = self.webUserFilesFolder + currentFolder + newFileName
return self.sendUploadResults( errorNo , newFileUrl, newFileName )
else:
return self.sendUploadResults( errorNo = 203, customMsg = "Extension not allowed" )
else:
return self.sendUploadResults( errorNo = 202, customMsg = "No File" )
| mit |
hwu25/AppPkg | Applications/Python/Python-2.7.2/Lib/test/fork_wait.py | 12 | 2370 | """This test case provides support for checking forking and wait behavior.
To test different wait behavior, override the wait_impl method.
We want fork1() semantics -- only the forking thread survives in the
child after a fork().
On some systems (e.g. Solaris without posix threads) we find that all
active threads survive in the child after a fork(); this is an error.
While BeOS doesn't officially support fork and native threading in
the same application, the present example should work just fine. DC
"""
import os, sys, time, unittest
import test.test_support as test_support
thread = test_support.import_module('thread')
LONGSLEEP = 2
SHORTSLEEP = 0.5
NUM_THREADS = 4
class ForkWait(unittest.TestCase):
def setUp(self):
self.alive = {}
self.stop = 0
def f(self, id):
while not self.stop:
self.alive[id] = os.getpid()
try:
time.sleep(SHORTSLEEP)
except IOError:
pass
def wait_impl(self, cpid):
for i in range(10):
# waitpid() shouldn't hang, but some of the buildbots seem to hang
# in the forking tests. This is an attempt to fix the problem.
spid, status = os.waitpid(cpid, os.WNOHANG)
if spid == cpid:
break
time.sleep(2 * SHORTSLEEP)
self.assertEqual(spid, cpid)
self.assertEqual(status, 0, "cause = %d, exit = %d" % (status&0xff, status>>8))
def test_wait(self):
for i in range(NUM_THREADS):
thread.start_new(self.f, (i,))
time.sleep(LONGSLEEP)
a = self.alive.keys()
a.sort()
self.assertEqual(a, range(NUM_THREADS))
prefork_lives = self.alive.copy()
if sys.platform in ['unixware7']:
cpid = os.fork1()
else:
cpid = os.fork()
if cpid == 0:
# Child
time.sleep(LONGSLEEP)
n = 0
for key in self.alive:
if self.alive[key] != prefork_lives[key]:
n += 1
os._exit(n)
else:
# Parent
self.wait_impl(cpid)
# Tell threads to die
self.stop = 1
time.sleep(2*SHORTSLEEP) # Wait for threads to die
| bsd-2-clause |
larroy/clearskies_core | tools/gyp/test/variables/commands/gyptest-commands-ignore-env.py | 330 | 1466 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test that environment variables are ignored when --ignore-environment is
specified.
"""
import os
import TestGyp
test = TestGyp.TestGyp(format='gypd')
os.environ['GYP_DEFINES'] = 'FOO=BAR'
os.environ['GYP_GENERATORS'] = 'foo'
os.environ['GYP_GENERATOR_FLAGS'] = 'genflag=foo'
os.environ['GYP_GENERATOR_OUTPUT'] = 'somedir'
expect = test.read('commands.gyp.ignore-env.stdout').replace('\r\n', '\n')
test.run_gyp('commands.gyp',
'--debug', 'variables',
'--ignore-environment',
stdout=expect, ignore_line_numbers=True)
# Verify the commands.gypd against the checked-in expected contents.
#
# Normally, we should canonicalize line endings in the expected
# contents file setting the Subversion svn:eol-style to native,
# but that would still fail if multiple systems are sharing a single
# workspace on a network-mounted file system. Consequently, we
# massage the Windows line endings ('\r\n') in the output to the
# checked-in UNIX endings ('\n').
contents = test.read('commands.gypd').replace('\r', '')
expect = test.read('commands.gypd.golden').replace('\r', '')
if not test.match(contents, expect):
print "Unexpected contents of `commands.gypd'"
test.diff(expect, contents, 'commands.gypd ')
test.fail_test()
test.pass_test()
| lgpl-3.0 |
bigfootproject/sahara | sahara/plugins/cdh/v5_3_0/db_helper.py | 6 | 2948 | # Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
import six
from sahara import conductor
from sahara import context
from sahara.utils import files
conductor = conductor.API
def get_hive_db_password(cluster):
ctx = context.ctx()
cluster = conductor.cluster_get(ctx, cluster.id)
passwd = cluster.extra.get('hive_db_password') if cluster.extra else None
if passwd:
return passwd
passwd = six.text_type(uuid.uuid4())
extra = cluster.extra.to_dict() if cluster.extra else {}
extra['hive_db_password'] = passwd
cluster = conductor.cluster_update(ctx, cluster, {'extra': extra})
return passwd
def create_hive_database(cluster, remote):
db_password = get_hive_db_password(cluster)
create_db_script = files.get_file_text(
'plugins/cdh/v5_3_0/resources/create_hive_db.sql')
create_db_script = create_db_script % db_password
script_name = 'create_hive_db.sql'
remote.write_file_to(script_name, create_db_script)
psql_cmd = ('PGPASSWORD=$(sudo head -1 /var/lib/cloudera-scm-server-db'
'/data/generated_password.txt) psql -U cloudera-scm '
'-h localhost -p 7432 -d scm -f %s') % script_name
remote.execute_command(psql_cmd)
remote.execute_command('rm %s' % script_name)
def get_sentry_db_password(cluster):
ctx = context.ctx()
cluster = conductor.cluster_get(ctx, cluster.id)
passwd = cluster.extra.get('sentry_db_password') if cluster.extra else None
if passwd:
return passwd
passwd = six.text_type(uuid.uuid4())
extra = cluster.extra.to_dict() if cluster.extra else {}
extra['sentry_db_password'] = passwd
cluster = conductor.cluster_update(ctx, cluster, {'extra': extra})
return passwd
def create_sentry_database(cluster, remote):
db_password = get_sentry_db_password(cluster)
create_db_script = files.get_file_text(
'plugins/cdh/v5_3_0/resources/create_sentry_db.sql')
create_db_script = create_db_script % db_password
script_name = 'create_sentry_db.sql'
remote.write_file_to(script_name, create_db_script)
psql_cmd = ('PGPASSWORD=$(sudo head -1 /var/lib/cloudera-scm-server-db'
'/data/generated_password.txt) psql -U cloudera-scm '
'-h localhost -p 7432 -d scm -f %s') % script_name
remote.execute_command(psql_cmd)
remote.execute_command('rm %s' % script_name)
| apache-2.0 |
manderson23/NewsBlur | utils/monitor_redis_bgsave.py | 4 | 1284 | #!/srv/newsblur/venv/newsblur/bin/python
import sys
sys.path.append('/srv/newsblur')
import os
import datetime
import requests
import settings
import socket
def main():
t = os.popen('stat -c%Y /var/lib/redis/dump.rdb')
timestamp = t.read().split('\n')[0]
modified = datetime.datetime.fromtimestamp(int(timestamp))
ten_min_ago = datetime.datetime.now() - datetime.timedelta(minutes=10)
hostname = socket.gethostname()
modified_minutes = datetime.datetime.now() - modified
log_tail = os.popen('tail -n 100 /var/log/redis.log').read()
if modified < ten_min_ago:
requests.post(
"https://api.mailgun.net/v2/%s/messages" % settings.MAILGUN_SERVER_NAME,
auth=("api", settings.MAILGUN_ACCESS_KEY),
data={"from": "NewsBlur Redis Monitor: %s <admin@%s.newsblur.com>" % (hostname, hostname),
"to": [settings.ADMINS[0][1]],
"subject": "%s hasn't bgsave'd redis in %s!" % (hostname, modified_minutes),
"text": "Last modified %s: %s ago\n\n----\n\n%s" % (hostname, modified_minutes, log_tail)})
else:
print " ---> Redis bgsave fine: %s / %s ago" % (hostname, modified_minutes)
if __name__ == '__main__':
main()
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.