repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
ptisserand/ansible | lib/ansible/modules/system/svc.py | 63 | 9262 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2015, Brian Coca <bcoca@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: svc
author:
- Brian Coca (@bcoca)
version_added: "1.9"
short_description: Manage daemontools services
description:
- Controls daemontools services on remote hosts using the svc utility.
options:
name:
description:
- Name of the service to manage.
required: true
state:
description:
- C(Started)/C(stopped) are idempotent actions that will not run
commands unless necessary. C(restarted) will always bounce the
svc (svc -t) and C(killed) will always bounce the svc (svc -k).
C(reloaded) will send a sigusr1 (svc -1).
C(once) will run a normally downed svc once (svc -o), not really
an idempotent operation.
choices: [ killed, once, reloaded, restarted, started, stopped ]
downed:
description:
- Should a 'down' file exist or not, if it exists it disables auto startup.
defaults to no. Downed does not imply stopped.
type: bool
default: 'no'
enabled:
description:
- Wheater the service is enabled or not, if disabled it also implies stopped.
Make note that a service can be enabled and downed (no auto restart).
type: bool
service_dir:
description:
- directory svscan watches for services
default: /service
service_src:
description:
- directory where services are defined, the source of symlinks to service_dir.
'''
EXAMPLES = '''
- name: Start svc dnscache, if not running
svc:
name: dnscache
state: started
- name: Stop svc dnscache, if running
svc:
name: dnscache
state: stopped
- name: Kill svc dnscache, in all cases
svc:
name: dnscache
state: killed
- name: Restart svc dnscache, in all cases
svc:
name: dnscache
state: restarted
- name: Reload svc dnscache, in all cases
svc:
name: dnscache
state: reloaded
- name: Using alternative svc directory location
svc:
name: dnscache
state: reloaded
service_dir: /var/service
'''
import os
import re
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
def _load_dist_subclass(cls, *args, **kwargs):
'''
Used for derivative implementations
'''
subclass = None
distro = kwargs['module'].params['distro']
# get the most specific superclass for this platform
if distro is not None:
for sc in cls.__subclasses__():
if sc.distro is not None and sc.distro == distro:
subclass = sc
if subclass is None:
subclass = cls
return super(cls, subclass).__new__(subclass)
class Svc(object):
"""
Main class that handles daemontools, can be subclassed and overridden in case
we want to use a 'derivative' like encore, s6, etc
"""
# def __new__(cls, *args, **kwargs):
# return _load_dist_subclass(cls, args, kwargs)
def __init__(self, module):
self.extra_paths = ['/command', '/usr/local/bin']
self.report_vars = ['state', 'enabled', 'downed', 'svc_full', 'src_full', 'pid', 'duration', 'full_state']
self.module = module
self.name = module.params['name']
self.service_dir = module.params['service_dir']
self.service_src = module.params['service_src']
self.enabled = None
self.downed = None
self.full_state = None
self.state = None
self.pid = None
self.duration = None
self.svc_cmd = module.get_bin_path('svc', opt_dirs=self.extra_paths)
self.svstat_cmd = module.get_bin_path('svstat', opt_dirs=self.extra_paths)
self.svc_full = '/'.join([self.service_dir, self.name])
self.src_full = '/'.join([self.service_src, self.name])
self.enabled = os.path.lexists(self.svc_full)
if self.enabled:
self.downed = os.path.lexists('%s/down' % self.svc_full)
self.get_status()
else:
self.downed = os.path.lexists('%s/down' % self.src_full)
self.state = 'stopped'
def enable(self):
if os.path.exists(self.src_full):
try:
os.symlink(self.src_full, self.svc_full)
except OSError as e:
self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % to_native(e))
else:
self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full)
def disable(self):
try:
os.unlink(self.svc_full)
except OSError as e:
self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % to_native(e))
self.execute_command([self.svc_cmd, '-dx', self.src_full])
src_log = '%s/log' % self.src_full
if os.path.exists(src_log):
self.execute_command([self.svc_cmd, '-dx', src_log])
def get_status(self):
(rc, out, err) = self.execute_command([self.svstat_cmd, self.svc_full])
if err is not None and err:
self.full_state = self.state = err
else:
self.full_state = out
m = re.search(r'\(pid (\d+)\)', out)
if m:
self.pid = m.group(1)
m = re.search(r'(\d+) seconds', out)
if m:
self.duration = m.group(1)
if re.search(' up ', out):
self.state = 'start'
elif re.search(' down ', out):
self.state = 'stopp'
else:
self.state = 'unknown'
return
if re.search(' want ', out):
self.state += 'ing'
else:
self.state += 'ed'
def start(self):
return self.execute_command([self.svc_cmd, '-u', self.svc_full])
def stopp(self):
return self.stop()
def stop(self):
return self.execute_command([self.svc_cmd, '-d', self.svc_full])
def once(self):
return self.execute_command([self.svc_cmd, '-o', self.svc_full])
def reload(self):
return self.execute_command([self.svc_cmd, '-1', self.svc_full])
def restart(self):
return self.execute_command([self.svc_cmd, '-t', self.svc_full])
def kill(self):
return self.execute_command([self.svc_cmd, '-k', self.svc_full])
def execute_command(self, cmd):
try:
(rc, out, err) = self.module.run_command(' '.join(cmd))
except Exception as e:
self.module.fail_json(msg="failed to execute: %s" % to_native(e), exception=traceback.format_exc())
return (rc, out, err)
def report(self):
self.get_status()
states = {}
for k in self.report_vars:
states[k] = self.__dict__[k]
return states
# ===========================================
# Main control flow
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
state=dict(type='str', choices=['killed', 'once', 'reloaded', 'restarted', 'started', 'stopped']),
enabled=dict(type='bool'),
downed=dict(type='bool'),
dist=dict(type='str', default='daemontools'),
service_dir=dict(type='str', default='/service'),
service_src=dict(type='str', default='/etc/service'),
),
supports_check_mode=True,
)
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
state = module.params['state']
enabled = module.params['enabled']
downed = module.params['downed']
svc = Svc(module)
changed = False
orig_state = svc.report()
if enabled is not None and enabled != svc.enabled:
changed = True
if not module.check_mode:
try:
if enabled:
svc.enable()
else:
svc.disable()
except (OSError, IOError) as e:
module.fail_json(msg="Could change service link: %s" % to_native(e))
if state is not None and state != svc.state:
changed = True
if not module.check_mode:
getattr(svc, state[:-2])()
if downed is not None and downed != svc.downed:
changed = True
if not module.check_mode:
d_file = "%s/down" % svc.svc_full
try:
if downed:
open(d_file, "a").close()
else:
os.unlink(d_file)
except (OSError, IOError) as e:
module.fail_json(msg="Could change downed file: %s " % (to_native(e)))
module.exit_json(changed=changed, svc=svc.report())
if __name__ == '__main__':
main()
| gpl-3.0 |
waiducom/jaikuengine | common/component.py | 34 | 2277 | # Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import os.path
from django.conf import settings
# public variable with the intent of referencing it in templates
# and allowing tests to easily adjust the values
loaded = {}
def install_components():
global loaded
root_dir = os.path.dirname(os.path.dirname(__file__))
component_dir = os.path.join(root_dir, 'components')
possibles = os.listdir(component_dir)
logging.info("Trying to load components in %s...", possibles)
for p in possibles:
# verify that we haven't manually disabled this in settings
is_enabled = getattr(settings, 'COMPONENT_%s_DISABLED' % (p.upper()), True)
if not is_enabled:
continue
path = os.path.join(component_dir, p)
if not os.path.isdir(path):
logging.debug("Not a dir %s", p)
continue
try:
loaded[p] = __import__('components.%s' % p, {}, {}, p)
logging.debug('Loaded component: %s', p)
except ValueError:
# bad module name, things like .svn and whatnot trigger this
continue
except ImportError:
import traceback
logging.debug('Exception loading component: %s', traceback.format_exc())
continue
def include(*names):
global loaded
for name in names:
rv = loaded.get(name)
if rv:
return rv
return rv
def require(*names):
mod = include(*names)
if not mod:
raise Exception("Ultimate doom")
return mod
class LoadedOrDummy(object):
def __getitem__(self, key):
rv = include(key, "dummy_%s" % key)
if not rv:
raise KeyError(key)
return rv
def __contains__(self, key):
rv = include(key, "dummy_%s" % key)
if rv:
return True
return False
best = LoadedOrDummy()
| apache-2.0 |
3nids/QGIS | python/plugins/processing/algs/grass7/ext/v_net_distance.py | 30 | 3316 | # -*- coding: utf-8 -*-
"""
***************************************************************************
v_net_distance.py
---------------------
Date : December 2015
Copyright : (C) 2015 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'December 2015'
__copyright__ = '(C) 2015, Médéric Ribreux'
import os
from .v_net import variableOutput
from processing.tools.system import getTempFilename
from qgis.core import QgsProcessingParameterString
def processCommand(alg, parameters, context, feedback):
""" Handle data preparation for v.net.distance:
* Integrate point layers into network vector map.
* Make v.net.distance use those layers.
* Delete the threshold parameter.
* If where statement, connect to the db
"""
# Grab the point layer and delete this parameter
lineLayer = alg.exportedLayers['input']
fromLayer = alg.exportedLayers['flayer']
toLayer = alg.exportedLayers['tlayer']
intLayer = 'bufnet' + os.path.basename(getTempFilename())
netLayer = 'net' + os.path.basename(getTempFilename())
threshold = alg.parameterAsDouble(parameters, 'threshold', context)
# Create the v.net connect command for from_layer integration
command = 'v.net -s input={} points={} output={} operation=connect threshold={} arc_layer=1 node_layer=2'.format(
lineLayer, fromLayer, intLayer, threshold)
alg.commands.append(command)
# Do it again with to_layer
command = 'v.net -s input={} points={} output={} operation=connect threshold={} arc_layer=1 node_layer=3'.format(
intLayer, toLayer, netLayer, threshold)
alg.commands.append(command)
# Connect the point layer database to the layer 2 of the network
command = 'v.db.connect -o map={} table={} layer=2'.format(netLayer, fromLayer)
alg.commands.append(command)
command = 'v.db.connect -o map={} table={} layer=3'.format(netLayer, toLayer)
alg.commands.append(command)
# remove undesired parameters
alg.removeParameter('flayer')
alg.removeParameter('tlayer')
alg.removeParameter('threshold')
alg.exportedLayers['input'] = netLayer
# Add the two new parameters
fLayer = QgsProcessingParameterString('from_layer', None, 2, False, False)
alg.addParameter(fLayer)
tLayer = QgsProcessingParameterString('to_layer', None, 3, False, False)
alg.addParameter(tLayer)
alg.processCommand(parameters, context, feedback)
def processOutputs(alg, parameters, context, feedback):
outputParameter = {'output': ['output', 'line', 1, True]}
variableOutput(alg, outputParameter, parameters, context)
| gpl-2.0 |
geodynamics/gale | boost/libs/python/test/calling_conventions.py | 44 | 1638 | # Copyright Nicolas Lelong, 2010. Distributed under the Boost
# Software License, Version 1.0 (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
"""
>>> from calling_conventions_ext import *
>>> f_0__cdecl()
17041
>>> f_1__cdecl(1)
1
>>> f_2__cdecl(1, 2)
21
>>> f_3__cdecl(1, 2, 3)
321
>>> f_4__cdecl(1, 2, 3, 4)
4321
>>> f_5__cdecl(1, 2, 3, 4, 5)
54321
>>> f_6__cdecl(1, 2, 3, 4, 5, 6)
654321
>>> f_7__cdecl(1, 2, 3, 4, 5, 6, 7)
7654321
>>> f_8__cdecl(1, 2, 3, 4, 5, 6, 7, 8)
87654321
>>> f_9__cdecl(1, 2, 3, 4, 5, 6, 7, 8, 9)
987654321
>>> f_0__stdcall()
17041
>>> f_1__stdcall(1)
1
>>> f_2__stdcall(1, 2)
21
>>> f_3__stdcall(1, 2, 3)
321
>>> f_4__stdcall(1, 2, 3, 4)
4321
>>> f_5__stdcall(1, 2, 3, 4, 5)
54321
>>> f_6__stdcall(1, 2, 3, 4, 5, 6)
654321
>>> f_7__stdcall(1, 2, 3, 4, 5, 6, 7)
7654321
>>> f_8__stdcall(1, 2, 3, 4, 5, 6, 7, 8)
87654321
>>> f_9__stdcall(1, 2, 3, 4, 5, 6, 7, 8, 9)
987654321
>>> f_0__fastcall()
17041
>>> f_1__fastcall(1)
1
>>> f_2__fastcall(1, 2)
21
>>> f_3__fastcall(1, 2, 3)
321
>>> f_4__fastcall(1, 2, 3, 4)
4321
>>> f_5__fastcall(1, 2, 3, 4, 5)
54321
>>> f_6__fastcall(1, 2, 3, 4, 5, 6)
654321
>>> f_7__fastcall(1, 2, 3, 4, 5, 6, 7)
7654321
>>> f_8__fastcall(1, 2, 3, 4, 5, 6, 7, 8)
87654321
>>> f_9__fastcall(1, 2, 3, 4, 5, 6, 7, 8, 9)
987654321
"""
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print "running..."
import sys
status = run()[0]
if (status == 0): print "Done."
sys.exit(status)
| gpl-2.0 |
4Kaylum/SkyBot | cogs/utils/objection_frame.py | 1 | 3991 | class ObjectionFrame(object):
DEFENSE_CHARACTERS = {
"Apollo Justice": 60,
"Mia Fey": 15,
"Miles Edgeworth (Defense)": 690,
"Phoenix Wright": 1,
}
PROSECUTION_CHARACTERS = {
"Franziska von Karma": 21,
"Godot": 175,
"Klavier Gavin": 65,
"Manfred von Karma": 27,
"Miles Edgeworth": 5,
"Miles Edgeworth (Young)": 10,
"Winston Payne": 19,
"Winston Payne (Old)": 438,
"Winston Payne (Young)": 564,
}
COUNSEL_CHARACTERS = {
"Diego Armando": 45,
# "Ema Skye": 353,
"Kristoph Gavin": 459,
"Maggey Byrde": 370,
"Marvin Grossberg": 360,
"Maya Fey": 103,
"Mia (as Maya)": 725,
"Mia Fey": 121,
"Phoenix Wright (Old)": 434,
"Trucy Wright": 560,
}
JUDGE_CHARACTERS = {
"Judge's Brother": 125,
"Judge": 30,
"Judge (AJ)": 606,
}
WITNESS_CHARACTERS = {
"Acro": 322,
"Adrian Andrews": 550,
"Angel Starr": 419,
"April May": 313,
"BellBoy": 548,
"Benjamin": 633,
"Benjamin & Trilo": 634,
"Bikini": 641,
"Cody Hackins": 276,
"Delila Hawthorne": 164,
"Damon Gant": 92,
"Daryan Crescend": 734,
"Dee Vasquez": 426,
"Desiree DeLite": 649,
"Dick Gumshoe": 130,
"Diego Armando": 653,
"Director Hotti": 672,
"Drew Misham": 655,
"Elise Deuxnim": 678,
"Ema Skye": 496,
"Frank Sahwit": 71,
"Franziska von Karma": 361,
"Furio Tigre": 285,
"Godot": 482,
"Guy Eldoon": 659,
"Ini Miney": 245,
"Iris": 261,
"Jake Marshall": 152,
"Jean Armstrong": 489,
"Klavier Gavin": 702,
"Kristoph Gavin": 523,
"Lana Skye": 194,
"Larry Butz": 206,
"Lisa Basil": 675,
"Lotta Hart": 113,
"Luke Atmey": 234,
"Maggey Byrde": 386,
"Manfred von Karma": 366,
"Marvin Grossberg": 668,
"Matt Engarde": 253,
"Maxamillion Galactica": 579,
"Maya Fey": 77,
"Mia (as Maya)": 730,
"Mia Fey": 541,
"Mike Meekins": 391,
"Miles Edgeworth": 220,
"Moe": 572,
"Morgan Fey": 554,
"Pearl Fey": 469,
"Penny Nichols": 630,
"Phoenix Wright": 303,
"Phoenix Wright (Old)": 461,
"Redd White": 107,
"Regina Berry": 294,
"Richard Wellington": 371,
"Ron DeLite": 331,
"Sal Manella": 347,
"Shelly de Killer": 617,
"Shark Brushel": 708,
"Terry Fawles": 620,
"Trucy Wright": 589,
"Vera Misham": 444,
"Victor Kudo": 379,
"Viola Cadaverini": 680,
"Wendy Oldbag": 228,
"Will Powers": 584,
"Winfred Kitaki": 722,
"Wocky Kitaki": 712,
"Yanni Yogi": 338,
}
def __init__(self, text:str, character:str):
self.text = text
self.character = self.get_character_id(character)
@classmethod
def get_character_id(cls, name):
for characters in [cls.DEFENSE_CHARACTERS, cls.PROSECUTION_CHARACTERS, cls.COUNSEL_CHARACTERS, cls.JUDGE_CHARACTERS, cls.WITNESS_CHARACTERS]:
for i, o in characters.items():
if name.lower() == i.lower():
return o
raise KeyError()
def to_json(self):
return {
"id": -1,
"text": self.text,
"poseId": self.character,
"bubbleType": 0,
"username":"",
"mergeNext": False,
"doNotTalk": False,
"goNext": False,
"poseAnimation": False,
"flipped": False,
"frameActions": [
],
"frameFades": [
],
"background": None,
"characterId": None,
"popupId": None
}
| gpl-3.0 |
google-research/language | language/conpono/cpc/preproc/ccnews_preproc_pipeline.py | 1 | 6627 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Beam pipeline to convert CC News to shareded TFRecords."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl import app
from absl import flags
import apache_beam as beam
from bert import tokenization
import numpy as np
from six.moves import range
import tensorflow.compat.v1 as tf
import unidecode
flags.DEFINE_string(
"input_file", None, "Path to raw input files."
"Assumes the filenames wiki.{train|valid|test}.raw")
flags.DEFINE_string("output_file", None, "Output TF example file.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_integer("max_sent_length", 70, "Maximum sequence length.")
flags.DEFINE_integer("max_para_length", 30, "Maximum sequence length.")
flags.DEFINE_integer("random_seed", 12345, "A random seed")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_enum("dataset", "ccnews", ["ccnews", "wiki"], "Dataset name.")
FLAGS = flags.FLAGS
def read_file(filename):
"""Read the contents of filename (str) and split into documents by chapter."""
all_documents = []
# Input file format:
# See internal README for an example
# Each file is a list of lines. Each line is a sentence. Empty lines denote
# delimiters between paragraphs. Each paragraph is its own document.
all_documents = [[]]
with tf.gfile.GFile(filename, "r") as reader:
for line in reader:
line = line.strip()
if not line:
if all_documents[-1]:
all_documents.append([])
continue
all_documents[-1].append(line)
if len(all_documents[-1]) == FLAGS.max_para_length:
all_documents.append([])
# Remove empty documents
all_documents = [x for x in all_documents if len(x) >= 8]
return all_documents
def create_bytes_feature(value):
if isinstance(value, type(tf.constant(0))):
value = value.numpy()
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def create_int_feature(values):
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return feature
def convert_instance_to_tf_example(tokenizer, sent_tokens, max_sent_length,
max_para_length):
"""Convert a list of strings into a tf.Example."""
input_ids_list = [
tokenizer.convert_tokens_to_ids(tokens) for tokens in sent_tokens
]
features = collections.OrderedDict()
# pack or trim sentences to max_sent_length
# pack paragraph to max_para_length
sent_tensor = []
for i in range(max_para_length):
if i >= len(input_ids_list):
sent_tensor.append([0] * max_sent_length)
else:
padded_ids = np.pad(
input_ids_list[i], (0, max_sent_length),
mode="constant")[:max_sent_length]
sent_tensor.append(padded_ids)
sent_tensor = np.ravel(np.stack(sent_tensor))
features["sents"] = create_int_feature(sent_tensor)
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
return tf_example
def preproc_doc(document):
"""Convert document to list of TF Examples for binary order classification.
Args:
document: a CCNews article (ie. a list of sentences)
Returns:
A list of tfexamples of binary orderings of pairs of sentences in the
document. The tfexamples are serialized to string to be written directly
to TFRecord.
"""
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
document = [
tokenization.convert_to_unicode(
unidecode.unidecode(line.decode("utf-8"))) for line in document
]
sent_tokens = [tokenizer.tokenize(sent) for sent in document if sent]
sent_tokens = [sent for sent in sent_tokens if len(sent) > 1]
if len(sent_tokens) < 8:
return []
# Convert token lists into ids and add any needed tokens and padding for BERT
tf_example = convert_instance_to_tf_example(tokenizer, sent_tokens,
FLAGS.max_sent_length,
FLAGS.max_para_length)
# Serialize TFExample for writing to file.
tf_examples = [tf_example.SerializeToString()]
return tf_examples
def ccnews_pipeline():
"""Read CCNews filenames and create Beam pipeline."""
if FLAGS.dataset == "ccnews":
data_filename = "ccnews.txt-%05d-of-01000"
datasize = 1000
testsize = 100
else:
data_filename = "wikipedia.txt-%05d-of-00500"
datasize = 500
testsize = 50
train_files = [
FLAGS.input_file + data_filename % i for i in range(datasize - testsize)
]
test_files = [
FLAGS.input_file + data_filename % i
for i in range(datasize - testsize, testsize)
]
def pipeline(root):
"""Beam pipeline for converting CCNews files to TF Examples."""
_ = (
root | "Create test files" >> beam.Create(test_files)
| "Read test files" >> beam.FlatMap(read_file)
| "test Shuffle" >> beam.Reshuffle()
| "Preproc test docs" >> beam.FlatMap(preproc_doc)
| "record test Shuffle" >> beam.Reshuffle()
| "Write to test tfrecord" >> beam.io.WriteToTFRecord(
FLAGS.output_file + ".cc_cpc.test.tfrecord", num_shards=testsize))
_ = (
root | "Create train files" >> beam.Create(train_files)
| "Read train files" >> beam.FlatMap(read_file)
| "train Shuffle" >> beam.Reshuffle()
| "Preproc train docs" >> beam.FlatMap(preproc_doc)
| "record train Shuffle" >> beam.Reshuffle()
| "Write to train tfrecord" >> beam.io.WriteToTFRecord(
FLAGS.output_file + ".cc_cpc.train.tfrecord",
num_shards=datasize - testsize))
return
return pipeline
def main(_):
# If using Apache BEAM, execute runner here.
if __name__ == "__main__":
app.run(main)
| apache-2.0 |
davidharrigan/django | tests/utils_tests/test_jslex.py | 153 | 9837 | # -*- coding: utf-8 -*-
"""Tests for jslex."""
# originally from https://bitbucket.org/ned/jslex
from __future__ import unicode_literals
from django.test import SimpleTestCase
from django.utils.jslex import JsLexer, prepare_js_for_gettext
class JsTokensTest(SimpleTestCase):
LEX_CASES = [
# ids
("a ABC $ _ a123", ["id a", "id ABC", "id $", "id _", "id a123"]),
("\\u1234 abc\\u0020 \\u0065_\\u0067", ["id \\u1234", "id abc\\u0020", "id \\u0065_\\u0067"]),
# numbers
("123 1.234 0.123e-3 0 1E+40 1e1 .123", [
"dnum 123", "dnum 1.234", "dnum 0.123e-3", "dnum 0", "dnum 1E+40",
"dnum 1e1", "dnum .123",
]),
("0x1 0xabCD 0XABcd", ["hnum 0x1", "hnum 0xabCD", "hnum 0XABcd"]),
("010 0377 090", ["onum 010", "onum 0377", "dnum 0", "dnum 90"]),
("0xa123ghi", ["hnum 0xa123", "id ghi"]),
# keywords
("function Function FUNCTION", ["keyword function", "id Function", "id FUNCTION"]),
("const constructor in inherits", ["keyword const", "id constructor", "keyword in", "id inherits"]),
("true true_enough", ["reserved true", "id true_enough"]),
# strings
(''' 'hello' "hello" ''', ["string 'hello'", 'string "hello"']),
(r""" 'don\'t' "don\"t" '"' "'" '\'' "\"" """, [
r"""string 'don\'t'""", r'''string "don\"t"''', r"""string '"'""",
r'''string "'"''', r"""string '\''""", r'''string "\""'''
]),
(r'"ƃuıxǝ⅂ ʇdıɹɔsɐʌɐſ\""', [r'string "ƃuıxǝ⅂ ʇdıɹɔsɐʌɐſ\""']),
# comments
("a//b", ["id a", "linecomment //b"]),
("/****/a/=2//hello", ["comment /****/", "id a", "punct /=", "dnum 2", "linecomment //hello"]),
("/*\n * Header\n */\na=1;", ["comment /*\n * Header\n */", "id a", "punct =", "dnum 1", "punct ;"]),
# punctuation
("a+++b", ["id a", "punct ++", "punct +", "id b"]),
# regex
(r"a=/a*/,1", ["id a", "punct =", "regex /a*/", "punct ,", "dnum 1"]),
(r"a=/a*[^/]+/,1", ["id a", "punct =", "regex /a*[^/]+/", "punct ,", "dnum 1"]),
(r"a=/a*\[^/,1", ["id a", "punct =", r"regex /a*\[^/", "punct ,", "dnum 1"]),
(r"a=/\//,1", ["id a", "punct =", r"regex /\//", "punct ,", "dnum 1"]),
# next two are from http://www.mozilla.org/js/language/js20-2002-04/rationale/syntax.html#regular-expressions
("""for (var x = a in foo && "</x>" || mot ? z:/x:3;x<5;y</g/i) {xyz(x++);}""",
["keyword for", "punct (", "keyword var", "id x", "punct =", "id a", "keyword in",
"id foo", "punct &&", 'string "</x>"', "punct ||", "id mot", "punct ?", "id z",
"punct :", "regex /x:3;x<5;y</g", "punct /", "id i", "punct )", "punct {",
"id xyz", "punct (", "id x", "punct ++", "punct )", "punct ;", "punct }"]),
("""for (var x = a in foo && "</x>" || mot ? z/x:3;x<5;y</g/i) {xyz(x++);}""",
["keyword for", "punct (", "keyword var", "id x", "punct =", "id a", "keyword in",
"id foo", "punct &&", 'string "</x>"', "punct ||", "id mot", "punct ?", "id z",
"punct /", "id x", "punct :", "dnum 3", "punct ;", "id x", "punct <", "dnum 5",
"punct ;", "id y", "punct <", "regex /g/i", "punct )", "punct {",
"id xyz", "punct (", "id x", "punct ++", "punct )", "punct ;", "punct }"]),
# Various "illegal" regexes that are valid according to the std.
(r"""/????/, /++++/, /[----]/ """, ["regex /????/", "punct ,", "regex /++++/", "punct ,", "regex /[----]/"]),
# Stress cases from http://stackoverflow.com/questions/5533925/what-javascript-constructs-does-jslex-incorrectly-lex/5573409#5573409 # NOQA
(r"""/\[/""", [r"""regex /\[/"""]),
(r"""/[i]/""", [r"""regex /[i]/"""]),
(r"""/[\]]/""", [r"""regex /[\]]/"""]),
(r"""/a[\]]/""", [r"""regex /a[\]]/"""]),
(r"""/a[\]]b/""", [r"""regex /a[\]]b/"""]),
(r"""/[\]/]/gi""", [r"""regex /[\]/]/gi"""]),
(r"""/\[[^\]]+\]/gi""", [r"""regex /\[[^\]]+\]/gi"""]),
("""
rexl.re = {
NAME: /^(?![0-9])(?:\w)+|^"(?:[^"]|"")+"/,
UNQUOTED_LITERAL: /^@(?:(?![0-9])(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/,
QUOTED_LITERAL: /^'(?:[^']|'')*'/,
NUMERIC_LITERAL: /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/,
SYMBOL: /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/
};
""", # NOQA
["id rexl", "punct .", "id re", "punct =", "punct {",
"id NAME", "punct :", r"""regex /^(?![0-9])(?:\w)+|^"(?:[^"]|"")+"/""", "punct ,",
"id UNQUOTED_LITERAL", "punct :", r"""regex /^@(?:(?![0-9])(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/""",
"punct ,",
"id QUOTED_LITERAL", "punct :", r"""regex /^'(?:[^']|'')*'/""", "punct ,",
"id NUMERIC_LITERAL", "punct :", r"""regex /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/""", "punct ,",
"id SYMBOL", "punct :", r"""regex /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/""", # NOQA
"punct }", "punct ;"
]),
("""
rexl.re = {
NAME: /^(?![0-9])(?:\w)+|^"(?:[^"]|"")+"/,
UNQUOTED_LITERAL: /^@(?:(?![0-9])(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/,
QUOTED_LITERAL: /^'(?:[^']|'')*'/,
NUMERIC_LITERAL: /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/,
SYMBOL: /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/
};
str = '"';
""", # NOQA
["id rexl", "punct .", "id re", "punct =", "punct {",
"id NAME", "punct :", r"""regex /^(?![0-9])(?:\w)+|^"(?:[^"]|"")+"/""", "punct ,",
"id UNQUOTED_LITERAL", "punct :", r"""regex /^@(?:(?![0-9])(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/""",
"punct ,",
"id QUOTED_LITERAL", "punct :", r"""regex /^'(?:[^']|'')*'/""", "punct ,",
"id NUMERIC_LITERAL", "punct :", r"""regex /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/""", "punct ,",
"id SYMBOL", "punct :", r"""regex /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/""", # NOQA
"punct }", "punct ;",
"id str", "punct =", """string '"'""", "punct ;",
]),
(r""" this._js = "e.str(\"" + this.value.replace(/\\/g, "\\\\").replace(/"/g, "\\\"") + "\")"; """,
["keyword this", "punct .", "id _js", "punct =", r'''string "e.str(\""''', "punct +", "keyword this",
"punct .", "id value", "punct .", "id replace", "punct (", r"regex /\\/g", "punct ,", r'string "\\\\"',
"punct )",
"punct .", "id replace", "punct (", r'regex /"/g', "punct ,", r'string "\\\""', "punct )", "punct +",
r'string "\")"', "punct ;"]),
]
def make_function(input, toks):
def test_func(self):
lexer = JsLexer()
result = ["%s %s" % (name, tok) for name, tok in lexer.lex(input) if name != 'ws']
self.assertListEqual(result, toks)
return test_func
for i, (input, toks) in enumerate(JsTokensTest.LEX_CASES):
setattr(JsTokensTest, "test_case_%d" % i, make_function(input, toks))
GETTEXT_CASES = (
(
r"""
a = 1; /* /[0-9]+/ */
b = 0x2a0b / 1; // /[0-9]+/
c = 3;
""",
r"""
a = 1; /* /[0-9]+/ */
b = 0x2a0b / 1; // /[0-9]+/
c = 3;
"""
), (
r"""
a = 1.234e-5;
/*
* /[0-9+/
*/
b = .0123;
""",
r"""
a = 1.234e-5;
/*
* /[0-9+/
*/
b = .0123;
"""
), (
r"""
x = y / z;
alert(gettext("hello"));
x /= 3;
""",
r"""
x = y / z;
alert(gettext("hello"));
x /= 3;
"""
), (
r"""
s = "Hello \"th/foo/ere\"";
s = 'He\x23llo \'th/foo/ere\'';
s = 'slash quote \", just quote "';
""",
r"""
s = "Hello \"th/foo/ere\"";
s = "He\x23llo \'th/foo/ere\'";
s = "slash quote \", just quote \"";
"""
), (
r"""
s = "Line continuation\
continued /hello/ still the string";/hello/;
""",
r"""
s = "Line continuation\
continued /hello/ still the string";"REGEX";
"""
), (
r"""
var regex = /pattern/;
var regex2 = /matter/gm;
var regex3 = /[*/]+/gm.foo("hey");
""",
r"""
var regex = "REGEX";
var regex2 = "REGEX";
var regex3 = "REGEX".foo("hey");
"""
), (
r"""
for (var x = a in foo && "</x>" || mot ? z:/x:3;x<5;y</g/i) {xyz(x++);}
for (var x = a in foo && "</x>" || mot ? z/x:3;x<5;y</g/i) {xyz(x++);}
""",
r"""
for (var x = a in foo && "</x>" || mot ? z:"REGEX"/i) {xyz(x++);}
for (var x = a in foo && "</x>" || mot ? z/x:3;x<5;y<"REGEX") {xyz(x++);}
"""
), (
"""
\\u1234xyz = gettext('Hello there');
""", r"""
Uu1234xyz = gettext("Hello there");
"""
)
)
class JsToCForGettextTest(SimpleTestCase):
pass
def make_function(js, c):
def test_func(self):
self.assertMultiLineEqual(prepare_js_for_gettext(js), c)
return test_func
for i, pair in enumerate(GETTEXT_CASES):
setattr(JsToCForGettextTest, "test_case_%d" % i, make_function(*pair))
| bsd-3-clause |
nirs/vdsm | lib/vdsm/momIF.py | 2 | 5451 | #
# Copyright (C) 2012, IBM Corporation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
from __future__ import division
import logging
import socket
import threading
import six
from vdsm.common.units import MiB
from vdsm.config import config
from vdsm import throttledlog
from vdsm.common.cpuarch import PAGE_SIZE_BYTES
from vdsm.common.time import monotonic_time
from vdsm.common import unixrpc
throttledlog.throttle('MomNotAvailable', 100)
throttledlog.throttle('MomNotAvailableKSM', 100)
CONNECTION_TIMEOUT_SEC = 2
THROTTLE_INTERVAL = 5
class ThrottledClient(object):
class Inactive(Exception):
pass
def __init__(self, client):
self._client = client
self._active = True
self._last_active = monotonic_time()
self._lock = threading.Lock()
def __getattr__(self, name):
def method(*args, **kwargs):
now = monotonic_time()
if not self._active and \
self._last_active + THROTTLE_INTERVAL < now:
with self._lock:
if self._last_active + THROTTLE_INTERVAL < now:
self._active = True
if not self._active:
raise ThrottledClient.Inactive()
client_method = getattr(self._client, name)
try:
return client_method(*args, **kwargs)
except Exception:
with self._lock:
self._active = False
self._last_active = monotonic_time()
raise
return method
class MomClient(object):
def __init__(self, sock_path):
self.log = logging.getLogger("MOM")
self.log.info("Preparing MOM interface")
self._sock_path = sock_path
self._mom = None
self._policy = {}
def connect(self):
if self._mom is not None:
return
self.log.info("MOM: Using named unix socket: %s", self._sock_path)
self._mom = ThrottledClient(unixrpc.UnixXmlRpcClient(
self._sock_path,
CONNECTION_TIMEOUT_SEC
))
def getKsmStats(self):
"""
Get information about KSM and convert memory data from page
based values to MiB.
"""
ret = {}
try:
stats = self._mom.getStatistics()['host']
ret['ksmState'] = bool(stats['ksm_run'])
ret['ksmPages'] = stats['ksm_pages_to_scan']
ret['ksmMergeAcrossNodes'] = bool(stats['ksm_merge_across_nodes'])
ret['memShared'] = stats['ksm_pages_sharing'] * PAGE_SIZE_BYTES
ret['memShared'] //= MiB
ret['ksmCpu'] = stats['ksmd_cpu_usage']
except (ThrottledClient.Inactive, AttributeError, socket.error) as e:
throttledlog.warning(
'MomNotAvailableKSM',
"MOM not available, KSM stats will be missing. Error: %s",
str(e)
)
return ret
def setPolicy(self, policyStr):
try:
# mom.setPolicy will raise an exception on failure.
self._mom.setPolicy(policyStr)
except (ThrottledClient.Inactive, AttributeError, socket.error) as e:
self.log.warning(
"MOM not available, Policy could not be set. Error: %s",
str(e)
)
def setPolicyParameters(self, key_value_store):
# mom.setNamedPolicy will raise an exception on failure.
# Prepare in-memory policy file with tuning variables
# this might need to convert certain python types to proper MoM
# policy language
self._policy.update(key_value_store)
# Python bool values are defined in 00-defines.policy so need no
# conversion here
policy_string = "\n".join(["(set %s %r)" % (k, v)
for k, v in six.iteritems(self._policy)])
try:
self._mom.setNamedPolicy(config.get("mom", "tuning_policy"),
policy_string)
except (ThrottledClient.Inactive, AttributeError, socket.error) as e:
self.log.warning(
"MOM not available, Policy could not be set. Error: %s",
str(e)
)
def getStatus(self):
try:
if self._mom.ping():
return 'active'
else:
return 'inactive'
except (ThrottledClient.Inactive, AttributeError, socket.error) as e:
throttledlog.warning(
'MomNotAvailable',
"MOM not available. Error: %s",
str(e)
)
return 'inactive'
| gpl-2.0 |
kennethlove/django | django/core/files/uploadedfile.py | 8 | 4156 | """
Classes representing uploaded files.
"""
import os
from io import BytesIO
from django.conf import settings
from django.core.files.base import File
from django.core.files import temp as tempfile
from django.utils.encoding import smart_str
__all__ = ('UploadedFile', 'TemporaryUploadedFile', 'InMemoryUploadedFile',
'SimpleUploadedFile')
class UploadedFile(File):
"""
A abstract uploaded file (``TemporaryUploadedFile`` and
``InMemoryUploadedFile`` are the built-in concrete subclasses).
An ``UploadedFile`` object behaves somewhat like a file object and
represents some file data that the user submitted with a form.
"""
DEFAULT_CHUNK_SIZE = 64 * 2**10
def __init__(self, file=None, name=None, content_type=None, size=None, charset=None):
super(UploadedFile, self).__init__(file, name)
self.size = size
self.content_type = content_type
self.charset = charset
def __repr__(self):
return smart_str("<%s: %s (%s)>" % (
self.__class__.__name__, self.name, self.content_type))
def _get_name(self):
return self._name
def _set_name(self, name):
# Sanitize the file name so that it can't be dangerous.
if name is not None:
# Just use the basename of the file -- anything else is dangerous.
name = os.path.basename(name)
# File names longer than 255 characters can cause problems on older OSes.
if len(name) > 255:
name, ext = os.path.splitext(name)
name = name[:255 - len(ext)] + ext
self._name = name
name = property(_get_name, _set_name)
class TemporaryUploadedFile(UploadedFile):
"""
A file uploaded to a temporary location (i.e. stream-to-disk).
"""
def __init__(self, name, content_type, size, charset):
if settings.FILE_UPLOAD_TEMP_DIR:
file = tempfile.NamedTemporaryFile(suffix='.upload',
dir=settings.FILE_UPLOAD_TEMP_DIR)
else:
file = tempfile.NamedTemporaryFile(suffix='.upload')
super(TemporaryUploadedFile, self).__init__(file, name, content_type, size, charset)
def temporary_file_path(self):
"""
Returns the full path of this file.
"""
return self.file.name
def close(self):
try:
return self.file.close()
except OSError as e:
if e.errno != 2:
# Means the file was moved or deleted before the tempfile
# could unlink it. Still sets self.file.close_called and
# calls self.file.file.close() before the exception
raise
class InMemoryUploadedFile(UploadedFile):
"""
A file uploaded into memory (i.e. stream-to-memory).
"""
def __init__(self, file, field_name, name, content_type, size, charset):
super(InMemoryUploadedFile, self).__init__(file, name, content_type, size, charset)
self.field_name = field_name
def open(self, mode=None):
self.file.seek(0)
def close(self):
pass
def chunks(self, chunk_size=None):
self.file.seek(0)
yield self.read()
def multiple_chunks(self, chunk_size=None):
# Since it's in memory, we'll never have multiple chunks.
return False
class SimpleUploadedFile(InMemoryUploadedFile):
"""
A simple representation of a file, which just has content, size, and a name.
"""
def __init__(self, name, content, content_type='text/plain'):
content = content or b''
super(SimpleUploadedFile, self).__init__(BytesIO(content), None, name,
content_type, len(content), None)
def from_dict(cls, file_dict):
"""
Creates a SimpleUploadedFile object from
a dictionary object with the following keys:
- filename
- content-type
- content
"""
return cls(file_dict['filename'],
file_dict['content'],
file_dict.get('content-type', 'text/plain'))
from_dict = classmethod(from_dict)
| bsd-3-clause |
adiseshu219/visualisation | django-admin-sortable/adminsortable/templatetags/adminsortable_tags.py | 5 | 1394 | from django import template
register = template.Library()
@register.simple_tag(takes_context=True)
def render_sortable_objects(context, objects,
sortable_objects_template='adminsortable/shared/objects.html'):
context.update({'objects': objects})
tmpl = template.loader.get_template(sortable_objects_template)
return tmpl.render(context)
@register.simple_tag(takes_context=True)
def render_nested_sortable_objects(context, objects, group_expression,
sortable_nested_objects_template='adminsortable/shared/nested_objects.html'):
context.update({'objects': objects, 'group_expression': group_expression})
tmpl = template.loader.get_template(sortable_nested_objects_template)
return tmpl.render(context)
@register.simple_tag(takes_context=True)
def render_list_items(context, list_objects,
sortable_list_items_template='adminsortable/shared/list_items.html'):
context.update({'list_objects': list_objects})
tmpl = template.loader.get_template(sortable_list_items_template)
return tmpl.render(context)
@register.simple_tag(takes_context=True)
def render_object_rep(context, obj,
sortable_object_rep_template='adminsortable/shared/object_rep.html'):
context.update({'object': obj})
tmpl = template.loader.get_template(sortable_object_rep_template)
return tmpl.render(context)
| agpl-3.0 |
JCBarahona/edX | cms/djangoapps/contentstore/tests/test_users_default_role.py | 115 | 5183 | """
Unit tests for checking default forum role "Student" of a user when he creates a course or
after deleting it creates same course again
"""
from contentstore.tests.utils import AjaxEnabledTestClient
from contentstore.utils import delete_course_and_groups, reverse_url
from courseware.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from student.models import CourseEnrollment
class TestUsersDefaultRole(ModuleStoreTestCase):
"""
Unit tests for checking enrollment and default forum role "Student" of a logged in user
"""
def setUp(self):
"""
Add a user and a course
"""
super(TestUsersDefaultRole, self).setUp()
# create and log in a staff user.
self.user = UserFactory(is_staff=True)
self.client = AjaxEnabledTestClient()
self.client.login(username=self.user.username, password='test')
# create a course via the view handler to create course
self.course_key = self.store.make_course_key('Org_1', 'Course_1', 'Run_1')
self._create_course_with_given_location(self.course_key)
def _create_course_with_given_location(self, course_key):
"""
Create course at provided location
"""
resp = self.client.ajax_post(
reverse_url('course_handler'),
{
'org': course_key.org,
'number': course_key.course,
'display_name': 'test course',
'run': course_key.run,
}
)
return resp
def tearDown(self):
"""
Reverse the setup
"""
self.client.logout()
super(TestUsersDefaultRole, self).tearDown()
def test_user_forum_default_role_on_course_deletion(self):
"""
Test that a user enrolls and gets "Student" forum role for that course which he creates and remains
enrolled even the course is deleted and keeps its "Student" forum role for that course
"""
# check that user has enrollment for this course
self.assertTrue(CourseEnrollment.is_enrolled(self.user, self.course_key))
# check that user has his default "Student" forum role for this course
self.assertTrue(self.user.roles.filter(name="Student", course_id=self.course_key))
delete_course_and_groups(self.course_key, self.user.id)
# check that user's enrollment for this course is not deleted
self.assertTrue(CourseEnrollment.is_enrolled(self.user, self.course_key))
# check that user has forum role for this course even after deleting it
self.assertTrue(self.user.roles.filter(name="Student", course_id=self.course_key))
def test_user_role_on_course_recreate(self):
"""
Test that creating same course again after deleting it gives user his default
forum role "Student" for that course
"""
# check that user has enrollment and his default "Student" forum role for this course
self.assertTrue(CourseEnrollment.is_enrolled(self.user, self.course_key))
self.assertTrue(self.user.roles.filter(name="Student", course_id=self.course_key))
# delete this course and recreate this course with same user
delete_course_and_groups(self.course_key, self.user.id)
resp = self._create_course_with_given_location(self.course_key)
self.assertEqual(resp.status_code, 200)
# check that user has his enrollment for this course
self.assertTrue(CourseEnrollment.is_enrolled(self.user, self.course_key))
# check that user has his default "Student" forum role for this course
self.assertTrue(self.user.roles.filter(name="Student", course_id=self.course_key))
def test_user_role_on_course_recreate_with_change_name_case(self):
"""
Test that creating same course again with different name case after deleting it gives user
his default forum role "Student" for that course
"""
# check that user has enrollment and his default "Student" forum role for this course
self.assertTrue(CourseEnrollment.is_enrolled(self.user, self.course_key))
# delete this course and recreate this course with same user
delete_course_and_groups(self.course_key, self.user.id)
# now create same course with different name case ('uppercase')
new_course_key = self.course_key.replace(course=self.course_key.course.upper())
resp = self._create_course_with_given_location(new_course_key)
self.assertEqual(resp.status_code, 200)
# check that user has his default "Student" forum role again for this course (with changed name case)
self.assertTrue(
self.user.roles.filter(name="Student", course_id=new_course_key)
)
# Disabled due to case-sensitive test db (sqlite3)
# # check that there user has only one "Student" forum role (with new updated course_id)
# self.assertEqual(self.user.roles.filter(name='Student').count(), 1)
# self.assertEqual(self.user.roles.filter(name='Student')[0].course_id, new_course_location.course_key)
| agpl-3.0 |
Onager/plaso | plaso/parsers/sqlite_plugins/skype.py | 1 | 37870 | # -*- coding: utf-8 -*-
"""SQLite parser plugin for Skype database files."""
from dfdatetime import posix_time as dfdatetime_posix_time
from plaso.containers import events
from plaso.containers import time_events
from plaso.parsers import sqlite
from plaso.parsers.sqlite_plugins import interface
class SkypeChatEventData(events.EventData):
"""Skype chat event data.
Attributes:
from_account (str): from display name and the author.
text (str): body XML.
title (str): title.
to_account (str): accounts, excluding the author, of the conversation.
"""
DATA_TYPE = 'skype:event:chat'
def __init__(self):
"""Initializes event data."""
super(SkypeChatEventData, self).__init__(data_type=self.DATA_TYPE)
self.from_account = None
self.text = None
self.title = None
self.to_account = None
class SkypeAccountEventData(events.EventData):
"""Skype account event data.
Attributes:
country (str): home country of the account holder.
display_name (str): display name of the account holder.
email (str): registered email address of the account holder.
username (str): full name of the Skype account holder and display name.
"""
DATA_TYPE = 'skype:event:account'
def __init__(self):
"""Initialize event data."""
super(SkypeAccountEventData, self).__init__(data_type=self.DATA_TYPE)
self.country = None
self.display_name = None
self.email = None
self.offset = None
self.username = None
class SkypeSMSEventData(events.EventData):
"""Skype SMS event data.
Attributes:
number (str): phone number where the SMS was sent.
text (str): text (SMS body) that was sent.
"""
DATA_TYPE = 'skype:event:sms'
def __init__(self):
"""Initialize event data."""
super(SkypeSMSEventData, self).__init__(data_type=self.DATA_TYPE)
self.number = None
self.text = None
class SkypeCallEventData(events.EventData):
"""Skype call event data.
Attributes:
call_type (str): call type, such as: WAITING, STARTED, FINISHED.
dst_call (str): account which received the call.
src_call (str): account which started the call.
user_start_call (bool): True if the owner account started the call.
video_conference (bool): True if the call was a video conference.
"""
DATA_TYPE = 'skype:event:call'
def __init__(self):
"""Initialize event data."""
super(SkypeCallEventData, self).__init__(data_type=self.DATA_TYPE)
self.call_type = None
self.dst_call = None
self.src_call = None
self.user_start_call = None
self.video_conference = None
class SkypeTransferFileEventData(events.EventData):
"""Skype file transfer event data.
Attributes:
action_type (str): action type such as: "GETSOLICITUDE", "SENDSOLICITUDE",
"ACCEPTED" or "FINISHED".
destination (str): account that received the file.
source (str): account that sent the file.
transferred_filename (str): name of the file transferred.
transferred_filepath (str): path of the file transferred.
transferred_filesize (int): size of the file transferred.
"""
DATA_TYPE = 'skype:event:transferfile'
def __init__(self):
"""Initialize event data."""
super(SkypeTransferFileEventData, self).__init__(data_type=self.DATA_TYPE)
self.action_type = None
self.destination = None
self.source = None
self.transferred_filename = None
self.transferred_filepath = None
self.transferred_filesize = None
class SkypePlugin(interface.SQLitePlugin):
"""SQLite parser plugin for Skype database files."""
NAME = 'skype'
DATA_FORMAT = 'Skype SQLite database (main.db) file'
REQUIRED_STRUCTURE = {
'Accounts': frozenset([
'id', 'fullname', 'given_displayname', 'emails', 'country',
'profile_timestamp', 'authreq_timestamp', 'lastonline_timestamp',
'mood_timestamp', 'sent_authrequest_time']),
'Calls': frozenset([
'id', 'is_incoming', 'begin_timestamp']),
'CallMembers': frozenset([
'guid', 'call_db_id', 'videostatus', 'start_timestamp',
'call_duration']),
'Chats': frozenset([
'id', 'participants', 'friendlyname', 'dialog_partner', 'name']),
'Messages': frozenset([
'author', 'from_dispname', 'body_xml', 'timestamp', 'chatname']),
'SMSes': frozenset([
'id', 'target_numbers', 'timestamp', 'body']),
'Transfers': frozenset([
'parent_id', 'partner_handle', 'partner_dispname', 'pk_id', 'id',
'offer_send_list', 'starttime', 'accepttime', 'finishtime',
'filepath', 'filename', 'filesize', 'status'])}
# Queries for building cache.
QUERY_DEST_FROM_TRANSFER = (
'SELECT parent_id, partner_handle AS skypeid, '
'partner_dispname AS skypename FROM transfers')
QUERY_SOURCE_FROM_TRANSFER = (
'SELECT pk_id, partner_handle AS skypeid, '
'partner_dispname AS skypename FROM transfers')
QUERIES = [
(('SELECT c.id, c.participants, c.friendlyname AS title, '
'm.author AS author, m.from_dispname AS from_displayname, '
'm.body_xml, m.timestamp, c.dialog_partner FROM Chats c, Messages m '
'WHERE c.name = m.chatname'), 'ParseChat'),
(('SELECT id, fullname, given_displayname, emails, '
'country, profile_timestamp, authreq_timestamp, '
'lastonline_timestamp, mood_timestamp, sent_authrequest_time, '
'lastused_timestamp FROM Accounts'), 'ParseAccountInformation'),
(('SELECT id, target_numbers AS dstnum_sms, timestamp AS time_sms, '
'body AS msg_sms FROM SMSes'), 'ParseSMS'),
(('SELECT id, partner_handle, partner_dispname, offer_send_list, '
'starttime, accepttime, finishtime, filepath, filename, filesize, '
'status, parent_id, pk_id FROM Transfers'), 'ParseFileTransfer'),
(('SELECT c.id, cm.guid, c.is_incoming, '
'cm.call_db_id, cm.videostatus, c.begin_timestamp AS try_call, '
'cm.start_timestamp AS accept_call, cm.call_duration '
'FROM Calls c, CallMembers cm '
'WHERE c.id = cm.call_db_id;'), 'ParseCall')]
SCHEMAS = [{
'Accounts': (
'CREATE TABLE Accounts (id INTEGER NOT NULL PRIMARY KEY, '
'is_permanent INTEGER, status INTEGER, pwdchangestatus INTEGER, '
'logoutreason INTEGER, commitstatus INTEGER, suggested_skypename '
'TEXT, skypeout_balance_currency TEXT, skypeout_balance INTEGER, '
'skypeout_precision INTEGER, skypein_numbers TEXT, subscriptions '
'TEXT, cblsyncstatus INTEGER, offline_callforward TEXT, chat_policy '
'INTEGER, skype_call_policy INTEGER, pstn_call_policy INTEGER, '
'avatar_policy INTEGER, buddycount_policy INTEGER, timezone_policy '
'INTEGER, webpresence_policy INTEGER, phonenumbers_policy INTEGER, '
'voicemail_policy INTEGER, authrequest_policy INTEGER, ad_policy '
'INTEGER, partner_optedout TEXT, service_provider_info TEXT, '
'registration_timestamp INTEGER, nr_of_other_instances INTEGER, '
'partner_channel_status TEXT, flamingo_xmpp_status INTEGER, '
'federated_presence_policy INTEGER, liveid_membername TEXT, '
'roaming_history_enabled INTEGER, cobrand_id INTEGER, '
'owner_under_legal_age INTEGER, type INTEGER, skypename TEXT, '
'pstnnumber TEXT, fullname TEXT, birthday INTEGER, gender INTEGER, '
'languages TEXT, country TEXT, province TEXT, city TEXT, phone_home '
'TEXT, phone_office TEXT, phone_mobile TEXT, emails TEXT, homepage '
'TEXT, about TEXT, profile_timestamp INTEGER, received_authrequest '
'TEXT, displayname TEXT, refreshing INTEGER, given_authlevel '
'INTEGER, aliases TEXT, authreq_timestamp INTEGER, mood_text TEXT, '
'timezone INTEGER, nrof_authed_buddies INTEGER, ipcountry TEXT, '
'given_displayname TEXT, availability INTEGER, lastonline_timestamp '
'INTEGER, capabilities BLOB, avatar_image BLOB, assigned_speeddial '
'TEXT, lastused_timestamp INTEGER, authrequest_count INTEGER, '
'assigned_comment TEXT, alertstring TEXT, avatar_timestamp INTEGER, '
'mood_timestamp INTEGER, rich_mood_text TEXT, synced_email BLOB, '
'set_availability INTEGER, options_change_future BLOB, '
'cbl_profile_blob BLOB, authorized_time INTEGER, sent_authrequest '
'TEXT, sent_authrequest_time INTEGER, sent_authrequest_serial '
'INTEGER, buddyblob BLOB, cbl_future BLOB, node_capabilities '
'INTEGER, node_capabilities_and INTEGER, revoked_auth INTEGER, '
'added_in_shared_group INTEGER, in_shared_group INTEGER, '
'authreq_history BLOB, profile_attachments BLOB, stack_version '
'INTEGER, offline_authreq_id INTEGER, verified_email BLOB, '
'verified_company BLOB, uses_jcs INTEGER)'),
'Alerts': (
'CREATE TABLE Alerts (id INTEGER NOT NULL PRIMARY KEY, is_permanent '
'INTEGER, timestamp INTEGER, partner_name TEXT, is_unseen INTEGER, '
'partner_id INTEGER, partner_event TEXT, partner_history TEXT, '
'partner_header TEXT, partner_logo TEXT, meta_expiry INTEGER, '
'message_header_caption TEXT, message_header_title TEXT, '
'message_header_subject TEXT, message_header_cancel TEXT, '
'message_header_later TEXT, message_content TEXT, message_footer '
'TEXT, message_button_caption TEXT, message_button_uri TEXT, '
'message_type INTEGER, window_size INTEGER, chatmsg_guid BLOB, '
'notification_id INTEGER, event_flags INTEGER, '
'extprop_hide_from_history INTEGER)'),
'AppSchemaVersion': (
'CREATE TABLE AppSchemaVersion (ClientVersion TEXT NOT NULL, '
'SQLiteSchemaVersion INTEGER NOT NULL, SchemaUpdateType INTEGER NOT '
'NULL)'),
'CallMembers': (
'CREATE TABLE CallMembers (id INTEGER NOT NULL PRIMARY KEY, '
'is_permanent INTEGER, identity TEXT, dispname TEXT, languages '
'TEXT, call_duration INTEGER, price_per_minute INTEGER, '
'price_precision INTEGER, price_currency TEXT, payment_category '
'TEXT, type INTEGER, status INTEGER, failurereason INTEGER, '
'sounderror_code INTEGER, soundlevel INTEGER, pstn_statustext TEXT, '
'pstn_feedback TEXT, forward_targets TEXT, forwarded_by TEXT, '
'debuginfo TEXT, videostatus INTEGER, target_identity TEXT, '
'mike_status INTEGER, is_read_only INTEGER, quality_status INTEGER, '
'call_name TEXT, transfer_status INTEGER, transfer_active INTEGER, '
'transferred_by TEXT, transferred_to TEXT, guid TEXT, '
'next_redial_time INTEGER, nrof_redials_done INTEGER, '
'nrof_redials_left INTEGER, transfer_topic TEXT, real_identity '
'TEXT, start_timestamp INTEGER, is_conference INTEGER, '
'quality_problems TEXT, identity_type INTEGER, country TEXT, '
'creation_timestamp INTEGER, stats_xml TEXT, '
'is_premium_video_sponsor INTEGER, is_multiparty_video_capable '
'INTEGER, recovery_in_progress INTEGER, nonse_word TEXT, '
'nr_of_delivered_push_notifications INTEGER, call_session_guid '
'TEXT, version_string TEXT, pk_status INTEGER, call_db_id INTEGER, '
'prime_status INTEGER)'),
'Calls': (
'CREATE TABLE Calls (id INTEGER NOT NULL PRIMARY KEY, is_permanent '
'INTEGER, begin_timestamp INTEGER, topic TEXT, is_muted INTEGER, '
'is_unseen_missed INTEGER, host_identity TEXT, mike_status INTEGER, '
'duration INTEGER, soundlevel INTEGER, access_token TEXT, '
'active_members INTEGER, is_active INTEGER, name TEXT, '
'video_disabled INTEGER, joined_existing INTEGER, server_identity '
'TEXT, vaa_input_status INTEGER, is_incoming INTEGER, is_conference '
'INTEGER, is_on_hold INTEGER, start_timestamp INTEGER, '
'quality_problems TEXT, current_video_audience TEXT, '
'premium_video_status INTEGER, premium_video_is_grace_period '
'INTEGER, is_premium_video_sponsor INTEGER, '
'premium_video_sponsor_list TEXT, old_members BLOB, partner_handle '
'TEXT, partner_dispname TEXT, type INTEGER, status INTEGER, '
'failurereason INTEGER, failurecode INTEGER, pstn_number TEXT, '
'old_duration INTEGER, conf_participants BLOB, pstn_status TEXT, '
'members BLOB, conv_dbid INTEGER)'),
'ChatMembers': (
'CREATE TABLE ChatMembers (id INTEGER NOT NULL PRIMARY KEY, '
'is_permanent INTEGER, chatname TEXT, identity TEXT, role INTEGER, '
'is_active INTEGER, cur_activities INTEGER, adder TEXT)'),
'Chats': (
'CREATE TABLE Chats (id INTEGER NOT NULL PRIMARY KEY, is_permanent '
'INTEGER, name TEXT, options INTEGER, friendlyname TEXT, '
'description TEXT, timestamp INTEGER, activity_timestamp INTEGER, '
'dialog_partner TEXT, adder TEXT, type INTEGER, mystatus INTEGER, '
'myrole INTEGER, posters TEXT, participants TEXT, applicants TEXT, '
'banned_users TEXT, name_text TEXT, topic TEXT, topic_xml TEXT, '
'guidelines TEXT, picture BLOB, alertstring TEXT, is_bookmarked '
'INTEGER, passwordhint TEXT, unconsumed_suppressed_msg INTEGER, '
'unconsumed_normal_msg INTEGER, unconsumed_elevated_msg INTEGER, '
'unconsumed_msg_voice INTEGER, activemembers TEXT, state_data BLOB, '
'lifesigns INTEGER, last_change INTEGER, first_unread_message '
'INTEGER, pk_type INTEGER, dbpath TEXT, split_friendlyname TEXT, '
'conv_dbid INTEGER)'),
'ContactGroups': (
'CREATE TABLE ContactGroups (id INTEGER NOT NULL PRIMARY KEY, '
'is_permanent INTEGER, type INTEGER, custom_group_id INTEGER, '
'given_displayname TEXT, nrofcontacts INTEGER, nrofcontacts_online '
'INTEGER, given_sortorder INTEGER, type_old INTEGER, proposer TEXT, '
'description TEXT, associated_chat TEXT, members TEXT, cbl_id '
'INTEGER, cbl_blob BLOB, fixed INTEGER, keep_sharedgroup_contacts '
'INTEGER, chats TEXT, extprop_is_hidden INTEGER, '
'extprop_sortorder_value INTEGER, extprop_is_expanded INTEGER)'),
'Contacts': (
'CREATE TABLE Contacts (id INTEGER NOT NULL PRIMARY KEY, '
'is_permanent INTEGER, type INTEGER, skypename TEXT, pstnnumber '
'TEXT, aliases TEXT, fullname TEXT, birthday INTEGER, gender '
'INTEGER, languages TEXT, country TEXT, province TEXT, city TEXT, '
'phone_home TEXT, phone_office TEXT, phone_mobile TEXT, emails '
'TEXT, hashed_emails TEXT, homepage TEXT, about TEXT, avatar_image '
'BLOB, mood_text TEXT, rich_mood_text TEXT, timezone INTEGER, '
'capabilities BLOB, profile_timestamp INTEGER, nrof_authed_buddies '
'INTEGER, ipcountry TEXT, avatar_timestamp INTEGER, mood_timestamp '
'INTEGER, received_authrequest TEXT, authreq_timestamp INTEGER, '
'lastonline_timestamp INTEGER, availability INTEGER, displayname '
'TEXT, refreshing INTEGER, given_authlevel INTEGER, '
'given_displayname TEXT, assigned_speeddial TEXT, assigned_comment '
'TEXT, alertstring TEXT, lastused_timestamp INTEGER, '
'authrequest_count INTEGER, assigned_phone1 TEXT, '
'assigned_phone1_label TEXT, assigned_phone2 TEXT, '
'assigned_phone2_label TEXT, assigned_phone3 TEXT, '
'assigned_phone3_label TEXT, buddystatus INTEGER, isauthorized '
'INTEGER, popularity_ord INTEGER, external_id TEXT, '
'external_system_id TEXT, isblocked INTEGER, '
'authorization_certificate BLOB, certificate_send_count INTEGER, '
'account_modification_serial_nr INTEGER, saved_directory_blob BLOB, '
'nr_of_buddies INTEGER, server_synced INTEGER, contactlist_track '
'INTEGER, last_used_networktime INTEGER, authorized_time INTEGER, '
'sent_authrequest TEXT, sent_authrequest_time INTEGER, '
'sent_authrequest_serial INTEGER, buddyblob BLOB, cbl_future BLOB, '
'node_capabilities INTEGER, revoked_auth INTEGER, '
'added_in_shared_group INTEGER, in_shared_group INTEGER, '
'authreq_history BLOB, profile_attachments BLOB, stack_version '
'INTEGER, offline_authreq_id INTEGER, node_capabilities_and '
'INTEGER, authreq_crc INTEGER, authreq_src INTEGER, pop_score '
'INTEGER, authreq_nodeinfo BLOB, main_phone TEXT, unified_servants '
'TEXT, phone_home_normalized TEXT, phone_office_normalized TEXT, '
'phone_mobile_normalized TEXT, sent_authrequest_initmethod INTEGER, '
'authreq_initmethod INTEGER, verified_email BLOB, verified_company '
'BLOB, sent_authrequest_extrasbitmask INTEGER, liveid_cid TEXT, '
'extprop_seen_birthday INTEGER, extprop_sms_target INTEGER, '
'extprop_external_data TEXT, extprop_must_hide_avatar INTEGER)'),
'Conversations': (
'CREATE TABLE Conversations (id INTEGER NOT NULL PRIMARY KEY, '
'is_permanent INTEGER, identity TEXT, type INTEGER, live_host TEXT, '
'live_start_timestamp INTEGER, live_is_muted INTEGER, alert_string '
'TEXT, is_bookmarked INTEGER, given_displayname TEXT, displayname '
'TEXT, local_livestatus INTEGER, inbox_timestamp INTEGER, '
'inbox_message_id INTEGER, unconsumed_suppressed_messages INTEGER, '
'unconsumed_normal_messages INTEGER, unconsumed_elevated_messages '
'INTEGER, unconsumed_messages_voice INTEGER, active_vm_id INTEGER, '
'context_horizon INTEGER, consumption_horizon INTEGER, '
'last_activity_timestamp INTEGER, active_invoice_message INTEGER, '
'spawned_from_convo_id INTEGER, pinned_order INTEGER, creator TEXT, '
'creation_timestamp INTEGER, my_status INTEGER, opt_joining_enabled '
'INTEGER, opt_access_token TEXT, opt_entry_level_rank INTEGER, '
'opt_disclose_history INTEGER, opt_history_limit_in_days INTEGER, '
'opt_admin_only_activities INTEGER, passwordhint TEXT, meta_name '
'TEXT, meta_topic TEXT, meta_guidelines TEXT, meta_picture BLOB, '
'picture TEXT, is_p2p_migrated INTEGER, premium_video_status '
'INTEGER, premium_video_is_grace_period INTEGER, guid TEXT, '
'dialog_partner TEXT, meta_description TEXT, '
'premium_video_sponsor_list TEXT, mcr_caller TEXT, chat_dbid '
'INTEGER, history_horizon INTEGER, history_sync_state TEXT, '
'thread_version TEXT, consumption_horizon_set_at INTEGER, '
'alt_identity TEXT, extprop_profile_height INTEGER, '
'extprop_chat_width INTEGER, extprop_chat_left_margin INTEGER, '
'extprop_chat_right_margin INTEGER, extprop_entry_height INTEGER, '
'extprop_windowpos_x INTEGER, extprop_windowpos_y INTEGER, '
'extprop_windowpos_w INTEGER, extprop_windowpos_h INTEGER, '
'extprop_window_maximized INTEGER, extprop_window_detached INTEGER, '
'extprop_pinned_order INTEGER, extprop_new_in_inbox INTEGER, '
'extprop_tab_order INTEGER, extprop_video_layout INTEGER, '
'extprop_video_chat_height INTEGER, extprop_chat_avatar INTEGER, '
'extprop_consumption_timestamp INTEGER, extprop_form_visible '
'INTEGER, extprop_recovery_mode INTEGER)'),
'DbMeta': (
'CREATE TABLE DbMeta (key TEXT NOT NULL PRIMARY KEY, value TEXT)'),
'LegacyMessages': (
'CREATE TABLE LegacyMessages (id INTEGER NOT NULL PRIMARY KEY, '
'is_permanent INTEGER)'),
'Messages': (
'CREATE TABLE Messages (id INTEGER NOT NULL PRIMARY KEY, '
'is_permanent INTEGER, convo_id INTEGER, chatname TEXT, author '
'TEXT, from_dispname TEXT, author_was_live INTEGER, guid BLOB, '
'dialog_partner TEXT, timestamp INTEGER, type INTEGER, '
'sending_status INTEGER, consumption_status INTEGER, edited_by '
'TEXT, edited_timestamp INTEGER, param_key INTEGER, param_value '
'INTEGER, body_xml TEXT, identities TEXT, reason TEXT, leavereason '
'INTEGER, participant_count INTEGER, error_code INTEGER, '
'chatmsg_type INTEGER, chatmsg_status INTEGER, body_is_rawxml '
'INTEGER, oldoptions INTEGER, newoptions INTEGER, newrole INTEGER, '
'pk_id INTEGER, crc INTEGER, remote_id INTEGER, call_guid TEXT, '
'extprop_contact_review_date TEXT, extprop_contact_received_stamp '
'INTEGER, extprop_contact_reviewed INTEGER)'),
'Participants': (
'CREATE TABLE Participants (id INTEGER NOT NULL PRIMARY KEY, '
'is_permanent INTEGER, convo_id INTEGER, identity TEXT, rank '
'INTEGER, requested_rank INTEGER, text_status INTEGER, voice_status '
'INTEGER, video_status INTEGER, live_identity TEXT, '
'live_price_for_me TEXT, live_fwd_identities TEXT, '
'live_start_timestamp INTEGER, sound_level INTEGER, debuginfo TEXT, '
'next_redial_time INTEGER, nrof_redials_left INTEGER, '
'last_voice_error TEXT, quality_problems TEXT, live_type INTEGER, '
'live_country TEXT, transferred_by TEXT, transferred_to TEXT, adder '
'TEXT, last_leavereason INTEGER, is_premium_video_sponsor INTEGER, '
'is_multiparty_video_capable INTEGER, live_identity_to_use TEXT, '
'livesession_recovery_in_progress INTEGER, '
'is_multiparty_video_updatable INTEGER, real_identity TEXT, '
'extprop_default_identity INTEGER)'),
'SMSes': (
'CREATE TABLE SMSes (id INTEGER NOT NULL PRIMARY KEY, is_permanent '
'INTEGER, type INTEGER, outgoing_reply_type INTEGER, status '
'INTEGER, failurereason INTEGER, is_failed_unseen INTEGER, '
'timestamp INTEGER, price INTEGER, price_precision INTEGER, '
'price_currency TEXT, reply_to_number TEXT, target_numbers TEXT, '
'target_statuses BLOB, body TEXT, chatmsg_id INTEGER, identity '
'TEXT, notification_id INTEGER, event_flags INTEGER, '
'reply_id_number TEXT, convo_name TEXT, extprop_hide_from_history '
'INTEGER, extprop_extended INTEGER)'),
'Transfers': (
'CREATE TABLE Transfers (id INTEGER NOT NULL PRIMARY KEY, '
'is_permanent INTEGER, type INTEGER, partner_handle TEXT, '
'partner_dispname TEXT, status INTEGER, failurereason INTEGER, '
'starttime INTEGER, finishtime INTEGER, filepath TEXT, filename '
'TEXT, filesize TEXT, bytestransferred TEXT, bytespersecond '
'INTEGER, chatmsg_guid BLOB, chatmsg_index INTEGER, convo_id '
'INTEGER, pk_id INTEGER, nodeid BLOB, last_activity INTEGER, flags '
'INTEGER, old_status INTEGER, old_filepath INTEGER, accepttime '
'INTEGER, parent_id INTEGER, offer_send_list TEXT, '
'extprop_localfilename TEXT, extprop_hide_from_history INTEGER, '
'extprop_window_visible INTEGER, extprop_handled_by_chat INTEGER)'),
'VideoMessages': (
'CREATE TABLE VideoMessages (id INTEGER NOT NULL PRIMARY KEY, '
'is_permanent INTEGER, qik_id BLOB, attached_msg_ids TEXT, '
'sharing_id TEXT, status INTEGER, vod_status INTEGER, vod_path '
'TEXT, local_path TEXT, public_link TEXT, progress INTEGER, title '
'TEXT, description TEXT, author TEXT, creation_timestamp INTEGER)'),
'Videos': (
'CREATE TABLE Videos (id INTEGER NOT NULL PRIMARY KEY, is_permanent '
'INTEGER, status INTEGER, error TEXT, debuginfo TEXT, dimensions '
'TEXT, media_type INTEGER, duration_1080 INTEGER, duration_720 '
'INTEGER, duration_hqv INTEGER, duration_vgad2 INTEGER, '
'duration_ltvgad2 INTEGER, timestamp INTEGER, hq_present INTEGER, '
'duration_ss INTEGER, ss_timestamp INTEGER, convo_id INTEGER, '
'device_path TEXT)'),
'Voicemails': (
'CREATE TABLE Voicemails (id INTEGER NOT NULL PRIMARY KEY, '
'is_permanent INTEGER, type INTEGER, partner_handle TEXT, '
'partner_dispname TEXT, status INTEGER, failurereason INTEGER, '
'subject TEXT, timestamp INTEGER, duration INTEGER, '
'allowed_duration INTEGER, playback_progress INTEGER, convo_id '
'INTEGER, chatmsg_guid BLOB, notification_id INTEGER, flags '
'INTEGER, size INTEGER, path TEXT, failures INTEGER, vflags '
'INTEGER, xmsg TEXT, extprop_hide_from_history INTEGER)')}]
def ParseAccountInformation(
self, parser_mediator, query, row, **unused_kwargs):
"""Parses account information.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row with account information.
"""
query_hash = hash(query)
display_name = self._GetRowValue(query_hash, row, 'given_displayname')
fullname = self._GetRowValue(query_hash, row, 'fullname')
# TODO: Move this to the formatter, and ensure username is rendered
# properly when fullname and/or display_name is None.
username = '{0!s} <{1!s}>'.format(fullname, display_name)
event_data = SkypeAccountEventData()
event_data.country = self._GetRowValue(query_hash, row, 'country')
event_data.display_name = display_name
event_data.email = self._GetRowValue(query_hash, row, 'emails')
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
event_data.username = username
timestamp = self._GetRowValue(query_hash, row, 'profile_timestamp')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Profile Changed')
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'authreq_timestamp')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, 'Authenticate Request')
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'lastonline_timestamp')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Last Online')
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'mood_timestamp')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Mood Event')
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'sent_authrequest_time')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Auth Request Sent')
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'lastused_timestamp')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Last Used')
parser_mediator.ProduceEventWithEventData(event, event_data)
def ParseChat(self, parser_mediator, query, row, **unused_kwargs):
"""Parses a chat message.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row resulting from query.
"""
query_hash = hash(query)
participants = self._GetRowValue(query_hash, row, 'participants')
author = self._GetRowValue(query_hash, row, 'author')
dialog_partner = self._GetRowValue(query_hash, row, 'dialog_partner')
from_displayname = self._GetRowValue(query_hash, row, 'from_displayname')
accounts = []
participants = participants.split(' ')
for participant in participants:
if participant != author:
accounts.append(participant)
to_account = ', '.join(accounts)
if not to_account:
to_account = dialog_partner or 'Unknown User'
from_account = '{0:s} <{1:s}>'.format(from_displayname, author)
event_data = SkypeChatEventData()
event_data.from_account = from_account
event_data.query = query
event_data.text = self._GetRowValue(query_hash, row, 'body_xml')
event_data.title = self._GetRowValue(query_hash, row, 'title')
event_data.to_account = to_account
timestamp = self._GetRowValue(query_hash, row, 'timestamp')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Chat from Skype')
parser_mediator.ProduceEventWithEventData(event, event_data)
def ParseSMS(self, parser_mediator, query, row, **unused_kwargs):
"""Parses an SMS.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row resulting from query.
"""
query_hash = hash(query)
phone_number = self._GetRowValue(query_hash, row, 'dstnum_sms')
if phone_number:
phone_number = phone_number.replace(' ', '')
event_data = SkypeSMSEventData()
event_data.number = phone_number
event_data.query = query
event_data.text = self._GetRowValue(query_hash, row, 'msg_sms')
timestamp = self._GetRowValue(query_hash, row, 'time_sms')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'SMS from Skype')
parser_mediator.ProduceEventWithEventData(event, event_data)
def ParseCall(self, parser_mediator, query, row, **unused_kwargs):
"""Parses a call.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row resulting from query.
query (Optional[str]): query.
"""
query_hash = hash(query)
guid = self._GetRowValue(query_hash, row, 'guid')
is_incoming = self._GetRowValue(query_hash, row, 'is_incoming')
videostatus = self._GetRowValue(query_hash, row, 'videostatus')
try:
aux = guid
if aux:
aux_list = aux.split('-')
src_aux = aux_list[0]
dst_aux = aux_list[1]
else:
src_aux = 'Unknown [no GUID]'
dst_aux = 'Unknown [no GUID]'
except IndexError:
src_aux = 'Unknown [{0:s}]'.format(guid)
dst_aux = 'Unknown [{0:s}]'.format(guid)
if is_incoming == '0':
user_start_call = True
source = src_aux
ip_address = self._GetRowValue(query_hash, row, 'ip_address')
if ip_address:
destination = '{0:s} <{1:s}>'.format(dst_aux, ip_address)
else:
destination = dst_aux
else:
user_start_call = False
source = src_aux
destination = dst_aux
call_identifier = self._GetRowValue(query_hash, row, 'id')
event_data = SkypeCallEventData()
event_data.dst_call = destination
event_data.offset = call_identifier
event_data.query = query
event_data.src_call = source
event_data.user_start_call = user_start_call
event_data.video_conference = videostatus == '3'
timestamp = self._GetRowValue(query_hash, row, 'try_call')
event_data.call_type = 'WAITING'
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Call from Skype')
parser_mediator.ProduceEventWithEventData(event, event_data)
try:
timestamp = self._GetRowValue(query_hash, row, 'accept_call')
timestamp = int(timestamp)
except (ValueError, TypeError):
timestamp = None
if timestamp:
event_data.call_type = 'ACCEPTED'
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Call from Skype')
parser_mediator.ProduceEventWithEventData(event, event_data)
try:
call_duration = self._GetRowValue(query_hash, row, 'call_duration')
call_duration = int(call_duration)
except (ValueError, TypeError):
parser_mediator.ProduceExtractionWarning(
'unable to determine when call: {0:s} was finished.'.format(
call_identifier))
call_duration = None
if call_duration:
timestamp += call_duration
event_data.call_type = 'FINISHED'
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Call from Skype')
parser_mediator.ProduceEventWithEventData(event, event_data)
def ParseFileTransfer(
self, parser_mediator, query, row, cache=None, database=None,
**unused_kwargs):
"""Parses a file transfer.
There is no direct relationship between who sends the file and
who accepts the file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row resulting from query.
cache (Optional[SQLiteCache]): cache.
database (Optional[SQLiteDatabase]): database.
"""
query_hash = hash(query)
source_dict = cache.GetResults('source')
if not source_dict:
results = database.Query(self.QUERY_SOURCE_FROM_TRANSFER)
cache.CacheQueryResults(
results, 'source', 'pk_id', ('skypeid', 'skypename'))
source_dict = cache.GetResults('source')
dest_dict = cache.GetResults('destination')
if not dest_dict:
results = database.Query(self.QUERY_DEST_FROM_TRANSFER)
cache.CacheQueryResults(
results, 'destination', 'parent_id', ('skypeid', 'skypename'))
dest_dict = cache.GetResults('destination')
source = 'Unknown'
destination = 'Unknown'
parent_id = self._GetRowValue(query_hash, row, 'parent_id')
partner_dispname = self._GetRowValue(query_hash, row, 'partner_dispname')
partner_handle = self._GetRowValue(query_hash, row, 'partner_handle')
if parent_id:
destination = '{0:s} <{1:s}>'.format(partner_handle, partner_dispname)
skype_id, skype_name = source_dict.get(parent_id, [None, None])
if skype_name:
source = '{0:s} <{1:s}>'.format(skype_id, skype_name)
else:
source = '{0:s} <{1:s}>'.format(partner_handle, partner_dispname)
pk_id = self._GetRowValue(query_hash, row, 'pk_id')
if pk_id:
skype_id, skype_name = dest_dict.get(pk_id, [None, None])
if skype_name:
destination = '{0:s} <{1:s}>'.format(skype_id, skype_name)
filename = self._GetRowValue(query_hash, row, 'filename')
filesize = self._GetRowValue(query_hash, row, 'filesize')
try:
file_size = int(filesize, 10)
except (ValueError, TypeError):
parser_mediator.ProduceExtractionWarning(
'unable to convert file size: {0!s} of file: {1:s}'.format(
filesize, filename))
file_size = 0
event_data = SkypeTransferFileEventData()
event_data.destination = destination
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
event_data.source = source
event_data.transferred_filename = filename
event_data.transferred_filepath = self._GetRowValue(
query_hash, row, 'filepath')
event_data.transferred_filesize = file_size
status = self._GetRowValue(query_hash, row, 'status')
starttime = self._GetRowValue(query_hash, row, 'starttime')
if status == 2:
if starttime:
event_data.action_type = 'SENDSOLICITUDE'
date_time = dfdatetime_posix_time.PosixTime(timestamp=starttime)
event = time_events.DateTimeValuesEvent(
date_time, 'File transfer from Skype')
parser_mediator.ProduceEventWithEventData(event, event_data)
elif status == 8:
if starttime:
event_data.action_type = 'GETSOLICITUDE'
date_time = dfdatetime_posix_time.PosixTime(timestamp=starttime)
event = time_events.DateTimeValuesEvent(
date_time, 'File transfer from Skype')
parser_mediator.ProduceEventWithEventData(event, event_data)
accepttime = self._GetRowValue(query_hash, row, 'accepttime')
if accepttime:
event_data.action_type = 'ACCEPTED'
date_time = dfdatetime_posix_time.PosixTime(timestamp=accepttime)
event = time_events.DateTimeValuesEvent(
date_time, 'File transfer from Skype')
parser_mediator.ProduceEventWithEventData(event, event_data)
finishtime = self._GetRowValue(query_hash, row, 'finishtime')
if finishtime:
event_data.action_type = 'FINISHED'
date_time = dfdatetime_posix_time.PosixTime(timestamp=finishtime)
event = time_events.DateTimeValuesEvent(
date_time, 'File transfer from Skype')
parser_mediator.ProduceEventWithEventData(event, event_data)
sqlite.SQLiteParser.RegisterPlugin(SkypePlugin)
| apache-2.0 |
mitsuhiko/sqlalchemy | examples/adjacency_list/adjacency_list.py | 32 | 3538 | from sqlalchemy import Column, ForeignKey, Integer, String, create_engine
from sqlalchemy.orm import Session, relationship, backref,\
joinedload_all
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm.collections import attribute_mapped_collection
Base = declarative_base()
class TreeNode(Base):
__tablename__ = 'tree'
id = Column(Integer, primary_key=True)
parent_id = Column(Integer, ForeignKey(id))
name = Column(String(50), nullable=False)
children = relationship("TreeNode",
# cascade deletions
cascade="all, delete-orphan",
# many to one + adjacency list - remote_side
# is required to reference the 'remote'
# column in the join condition.
backref=backref("parent", remote_side=id),
# children will be represented as a dictionary
# on the "name" attribute.
collection_class=attribute_mapped_collection('name'),
)
def __init__(self, name, parent=None):
self.name = name
self.parent = parent
def __repr__(self):
return "TreeNode(name=%r, id=%r, parent_id=%r)" % (
self.name,
self.id,
self.parent_id
)
def dump(self, _indent=0):
return " " * _indent + repr(self) + \
"\n" + \
"".join([
c.dump(_indent + 1)
for c in self.children.values()]
)
if __name__ == '__main__':
engine = create_engine('sqlite://', echo=True)
def msg(msg, *args):
msg = msg % args
print("\n\n\n" + "-" * len(msg.split("\n")[0]))
print(msg)
print("-" * len(msg.split("\n")[0]))
msg("Creating Tree Table:")
Base.metadata.create_all(engine)
session = Session(engine)
node = TreeNode('rootnode')
TreeNode('node1', parent=node)
TreeNode('node3', parent=node)
node2 = TreeNode('node2')
TreeNode('subnode1', parent=node2)
node.children['node2'] = node2
TreeNode('subnode2', parent=node.children['node2'])
msg("Created new tree structure:\n%s", node.dump())
msg("flush + commit:")
session.add(node)
session.commit()
msg("Tree After Save:\n %s", node.dump())
TreeNode('node4', parent=node)
TreeNode('subnode3', parent=node.children['node4'])
TreeNode('subnode4', parent=node.children['node4'])
TreeNode('subsubnode1', parent=node.children['node4'].children['subnode3'])
# remove node1 from the parent, which will trigger a delete
# via the delete-orphan cascade.
del node.children['node1']
msg("Removed node1. flush + commit:")
session.commit()
msg("Tree after save:\n %s", node.dump())
msg("Emptying out the session entirely, "
"selecting tree on root, using eager loading to join four levels deep.")
session.expunge_all()
node = session.query(TreeNode).\
options(joinedload_all("children", "children",
"children", "children")).\
filter(TreeNode.name == "rootnode").\
first()
msg("Full Tree:\n%s", node.dump())
msg("Marking root node as deleted, flush + commit:")
session.delete(node)
session.commit()
| mit |
Moriadry/tensorflow | tensorflow/contrib/boosted_trees/estimator_batch/custom_loss_head.py | 87 | 3030 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of `head.Head` with custom loss and link function."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
class CustomLossHead(head_lib._RegressionHead): # pylint: disable=protected-access
"""A Head object with custom loss function and link function."""
def __init__(self,
loss_fn,
link_fn,
logit_dimension,
head_name=None,
weight_column_name=None,
metrics_fn=None):
"""`Head` for specifying arbitrary loss function.
Args:
loss_fn: Loss function.
link_fn: Function that converts logits to prediction.
logit_dimension: Number of dimensions for the logits.
head_name: name of the head. Predictions, summary, metrics keys are
suffixed by `"/" + head_name` and the default variable scope is
`head_name`.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
metrics_fn: a function that takes predictions dict, labels and weights and
returns a dictionary of metrics to be calculated.
"""
def loss_wrapper(labels, logits, weight_tensor):
if weight_tensor is None:
weight_tensor = array_ops.ones(
shape=[array_ops.shape(labels)[0], 1], dtype=dtypes.float32)
weighted_loss, _ = loss_fn(labels, weight_tensor, logits)
average_loss = math_ops.reduce_mean(weighted_loss)
return average_loss, average_loss / math_ops.reduce_mean(weight_tensor)
super(CustomLossHead, self).__init__(
loss_fn=loss_wrapper,
link_fn=link_fn,
head_name=head_name,
weight_column_name=weight_column_name,
enable_centered_bias=False,
label_dimension=logit_dimension)
self._metrics_fn = metrics_fn
def _metrics(self, eval_loss, predictions, labels, weights):
if self._metrics_fn is not None:
return self._metrics_fn(predictions, labels, weights)
| apache-2.0 |
noodle-learns-programming/python-social-auth | social/backends/instagram.py | 61 | 1611 | """
Instagram OAuth2 backend, docs at:
http://psa.matiasaguirre.net/docs/backends/instagram.html
"""
from social.backends.oauth import BaseOAuth2
class InstagramOAuth2(BaseOAuth2):
name = 'instagram'
AUTHORIZATION_URL = 'https://instagram.com/oauth/authorize'
ACCESS_TOKEN_URL = 'https://instagram.com/oauth/access_token'
ACCESS_TOKEN_METHOD = 'POST'
def get_user_id(self, details, response):
# Sometimes Instagram returns 'user', sometimes 'data', but API docs
# says 'data' http://instagram.com/developer/endpoints/users/#get_users
user = response.get('user') or response.get('data') or {}
return user.get('id')
def get_user_details(self, response):
"""Return user details from Instagram account"""
# Sometimes Instagram returns 'user', sometimes 'data', but API docs
# says 'data' http://instagram.com/developer/endpoints/users/#get_users
user = response.get('user') or response.get('data') or {}
username = user['username']
email = user.get('email', '')
fullname, first_name, last_name = self.get_user_names(
user.get('full_name', '')
)
return {'username': username,
'fullname': fullname,
'first_name': first_name,
'last_name': last_name,
'email': email}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
return self.get_json('https://api.instagram.com/v1/users/self',
params={'access_token': access_token})
| bsd-3-clause |
owlabs/incubator-airflow | airflow/dag/base_dag.py | 4 | 2812 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from abc import ABCMeta, abstractmethod, abstractproperty
class BaseDag(object):
"""
Base DAG object that both the SimpleDag and DAG inherit.
"""
__metaclass__ = ABCMeta
@abstractproperty
def dag_id(self):
"""
:return: the DAG ID
:rtype: unicode
"""
raise NotImplementedError()
@abstractproperty
def task_ids(self):
"""
:return: A list of task IDs that are in this DAG
:rtype: List[unicode]
"""
raise NotImplementedError()
@abstractproperty
def full_filepath(self):
"""
:return: The absolute path to the file that contains this DAG's definition
:rtype: unicode
"""
raise NotImplementedError()
@abstractmethod
def concurrency(self):
"""
:return: maximum number of tasks that can run simultaneously from this DAG
:rtype: int
"""
raise NotImplementedError()
@abstractmethod
def is_paused(self):
"""
:return: whether this DAG is paused or not
:rtype: bool
"""
raise NotImplementedError()
@abstractmethod
def pickle_id(self):
"""
:return: The pickle ID for this DAG, if it has one. Otherwise None.
:rtype: unicode
"""
raise NotImplementedError
class BaseDagBag(object):
"""
Base object that both the SimpleDagBag and DagBag inherit.
"""
@abstractproperty
def dag_ids(self):
"""
:return: a list of DAG IDs in this bag
:rtype: List[unicode]
"""
raise NotImplementedError()
@abstractmethod
def get_dag(self, dag_id):
"""
:return: whether the task exists in this bag
:rtype: airflow.dag.base_dag.BaseDag
"""
raise NotImplementedError()
| apache-2.0 |
mbox/django | django/contrib/gis/forms/fields.py | 74 | 4444 | from __future__ import unicode_literals
from django import forms
from django.utils.translation import ugettext_lazy as _
# While this couples the geographic forms to the GEOS library,
# it decouples from database (by not importing SpatialBackend).
from django.contrib.gis.geos import GEOSException, GEOSGeometry
from .widgets import OpenLayersWidget
class GeometryField(forms.Field):
"""
This is the basic form field for a Geometry. Any textual input that is
accepted by GEOSGeometry is accepted by this form. By default,
this includes WKT, HEXEWKB, WKB (in a buffer), and GeoJSON.
"""
widget = OpenLayersWidget
geom_type = 'GEOMETRY'
default_error_messages = {
'required': _('No geometry value provided.'),
'invalid_geom': _('Invalid geometry value.'),
'invalid_geom_type': _('Invalid geometry type.'),
'transform_error': _('An error occurred when transforming the geometry '
'to the SRID of the geometry form field.'),
}
def __init__(self, **kwargs):
# Pop out attributes from the database field, or use sensible
# defaults (e.g., allow None).
self.srid = kwargs.pop('srid', None)
self.geom_type = kwargs.pop('geom_type', self.geom_type)
super(GeometryField, self).__init__(**kwargs)
self.widget.attrs['geom_type'] = self.geom_type
def to_python(self, value):
"""
Transforms the value to a Geometry object.
"""
if value in self.empty_values:
return None
if not isinstance(value, GEOSGeometry):
try:
value = GEOSGeometry(value)
except (GEOSException, ValueError, TypeError):
raise forms.ValidationError(self.error_messages['invalid_geom'], code='invalid_geom')
# Try to set the srid
if not value.srid:
try:
value.srid = self.widget.map_srid
except AttributeError:
if self.srid:
value.srid = self.srid
return value
def clean(self, value):
"""
Validates that the input value can be converted to a Geometry
object (which is returned). A ValidationError is raised if
the value cannot be instantiated as a Geometry.
"""
geom = super(GeometryField, self).clean(value)
if geom is None:
return geom
# Ensuring that the geometry is of the correct type (indicated
# using the OGC string label).
if str(geom.geom_type).upper() != self.geom_type and not self.geom_type == 'GEOMETRY':
raise forms.ValidationError(self.error_messages['invalid_geom_type'], code='invalid_geom_type')
# Transforming the geometry if the SRID was set.
if self.srid and self.srid != -1 and self.srid != geom.srid:
try:
geom.transform(self.srid)
except GEOSException:
raise forms.ValidationError(
self.error_messages['transform_error'], code='transform_error')
return geom
def _has_changed(self, initial, data):
""" Compare geographic value of data with its initial value. """
try:
data = self.to_python(data)
initial = self.to_python(initial)
except forms.ValidationError:
return True
# Only do a geographic comparison if both values are available
if initial and data:
data.transform(initial.srid)
# If the initial value was not added by the browser, the geometry
# provided may be slightly different, the first time it is saved.
# The comparison is done with a very low tolerance.
return not initial.equals_exact(data, tolerance=0.000001)
else:
# Check for change of state of existence
return bool(initial) != bool(data)
class GeometryCollectionField(GeometryField):
geom_type = 'GEOMETRYCOLLECTION'
class PointField(GeometryField):
geom_type = 'POINT'
class MultiPointField(GeometryField):
geom_type = 'MULTIPOINT'
class LineStringField(GeometryField):
geom_type = 'LINESTRING'
class MultiLineStringField(GeometryField):
geom_type = 'MULTILINESTRING'
class PolygonField(GeometryField):
geom_type = 'POLYGON'
class MultiPolygonField(GeometryField):
geom_type = 'MULTIPOLYGON'
| bsd-3-clause |
CasparLi/calibre | src/calibre/utils/chm/__init__.py | 24 | 1221 | ## Copyright (C) 2003-2006 Rubens Ramos <rubensr@users.sourceforge.net>
## pychm is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
## $Id: __init__.py,v 1.8 2006/06/18 10:50:43 rubensr Exp $
'''
chm - A package to manipulate CHM files
The chm package provides four modules: chm, chmlib, extra and
_chmlib. _chmlib and chmlib are very low level libraries generated
from SWIG interface files, and are simple wrappers around the API
defined by the C library chmlib.
The extra module adds full-text search support.
the chm module provides some higher level classes to simplify
access to the CHM files information.
'''
__all__ = ["chm", "chmlib", "_chmlib", "extra"]
__version__ = "0.8.4"
__revision__ = "$Id: __init__.py,v 1.8 2006/06/18 10:50:43 rubensr Exp $"
| gpl-3.0 |
brijeshkesariya/odoo | addons/note/note.py | 223 | 8893 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp.tools import html2plaintext
class note_stage(osv.osv):
""" Category of Note """
_name = "note.stage"
_description = "Note Stage"
_columns = {
'name': fields.char('Stage Name', translate=True, required=True),
'sequence': fields.integer('Sequence', help="Used to order the note stages"),
'user_id': fields.many2one('res.users', 'Owner', help="Owner of the note stage.", required=True, ondelete='cascade'),
'fold': fields.boolean('Folded by Default'),
}
_order = 'sequence asc'
_defaults = {
'fold': 0,
'user_id': lambda self, cr, uid, ctx: uid,
'sequence' : 1,
}
class note_tag(osv.osv):
_name = "note.tag"
_description = "Note Tag"
_columns = {
'name' : fields.char('Tag Name', required=True),
}
class note_note(osv.osv):
""" Note """
_name = 'note.note'
_inherit = ['mail.thread']
_description = "Note"
#writing method (no modification of values)
def name_create(self, cr, uid, name, context=None):
rec_id = self.create(cr, uid, {'memo': name}, context=context)
return self.name_get(cr, uid, [rec_id], context)[0]
#read the first line (convert hml into text)
def _get_note_first_line(self, cr, uid, ids, name="", args={}, context=None):
res = {}
for note in self.browse(cr, uid, ids, context=context):
res[note.id] = (note.memo and html2plaintext(note.memo) or "").strip().replace('*','').split("\n")[0]
return res
def onclick_note_is_done(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'open': False, 'date_done': fields.date.today()}, context=context)
def onclick_note_not_done(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'open': True}, context=context)
#return the default stage for the uid user
def _get_default_stage_id(self,cr,uid,context=None):
ids = self.pool.get('note.stage').search(cr,uid,[('user_id','=',uid)], context=context)
return ids and ids[0] or False
def _set_stage_per_user(self, cr, uid, id, name, value, args=None, context=None):
note = self.browse(cr, uid, id, context=context)
if not value: return False
stage_ids = [value] + [stage.id for stage in note.stage_ids if stage.user_id.id != uid ]
return self.write(cr, uid, [id], {'stage_ids': [(6, 0, set(stage_ids))]}, context=context)
def _get_stage_per_user(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for record in self.browse(cr, uid, ids, context=context):
for stage in record.stage_ids:
if stage.user_id.id == uid:
result[record.id] = stage.id
return result
_columns = {
'name': fields.function(_get_note_first_line,
string='Note Summary',
type='text', store=True),
'user_id': fields.many2one('res.users', 'Owner'),
'memo': fields.html('Note Content'),
'sequence': fields.integer('Sequence'),
'stage_id': fields.function(_get_stage_per_user,
fnct_inv=_set_stage_per_user,
string='Stage',
type='many2one',
relation='note.stage'),
'stage_ids': fields.many2many('note.stage','note_stage_rel','note_id','stage_id','Stages of Users'),
'open': fields.boolean('Active', track_visibility='onchange'),
'date_done': fields.date('Date done'),
'color': fields.integer('Color Index'),
'tag_ids' : fields.many2many('note.tag','note_tags_rel','note_id','tag_id','Tags'),
}
_defaults = {
'user_id': lambda self, cr, uid, ctx=None: uid,
'open' : 1,
'stage_id' : _get_default_stage_id,
}
_order = 'sequence'
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
if groupby and groupby[0]=="stage_id":
#search all stages
current_stage_ids = self.pool.get('note.stage').search(cr,uid,[('user_id','=',uid)], context=context)
if current_stage_ids: #if the user have some stages
stages = self.pool['note.stage'].browse(cr, uid, current_stage_ids, context=context)
result = [{ #notes by stage for stages user
'__context': {'group_by': groupby[1:]},
'__domain': domain + [('stage_ids.id', '=', stage.id)],
'stage_id': (stage.id, stage.name),
'stage_id_count': self.search(cr,uid, domain+[('stage_ids', '=', stage.id)], context=context, count=True),
'__fold': stage.fold,
} for stage in stages]
#note without user's stage
nb_notes_ws = self.search(cr,uid, domain+[('stage_ids', 'not in', current_stage_ids)], context=context, count=True)
if nb_notes_ws:
# add note to the first column if it's the first stage
dom_not_in = ('stage_ids', 'not in', current_stage_ids)
if result and result[0]['stage_id'][0] == current_stage_ids[0]:
dom_in = result[0]['__domain'].pop()
result[0]['__domain'] = domain + ['|', dom_in, dom_not_in]
result[0]['stage_id_count'] += nb_notes_ws
else:
# add the first stage column
result = [{
'__context': {'group_by': groupby[1:]},
'__domain': domain + [dom_not_in],
'stage_id': (stages[0].id, stages[0].name),
'stage_id_count':nb_notes_ws,
'__fold': stages[0].name,
}] + result
else: # if stage_ids is empty
#note without user's stage
nb_notes_ws = self.search(cr,uid, domain, context=context, count=True)
if nb_notes_ws:
result = [{ #notes for unknown stage
'__context': {'group_by': groupby[1:]},
'__domain': domain,
'stage_id': False,
'stage_id_count':nb_notes_ws
}]
else:
result = []
return result
else:
return super(note_note, self).read_group(cr, uid, domain, fields, groupby,
offset=offset, limit=limit, context=context, orderby=orderby,lazy=lazy)
#upgrade config setting page to configure pad, fancy and tags mode
class note_base_config_settings(osv.osv_memory):
_inherit = 'base.config.settings'
_columns = {
'module_note_pad': fields.boolean('Use collaborative pads (etherpad)'),
'group_note_fancy': fields.boolean('Use fancy layouts for notes', implied_group='note.group_note_fancy'),
}
class res_users(osv.Model):
_name = 'res.users'
_inherit = ['res.users']
def create(self, cr, uid, data, context=None):
user_id = super(res_users, self).create(cr, uid, data, context=context)
note_obj = self.pool['note.stage']
data_obj = self.pool['ir.model.data']
is_employee = self.has_group(cr, user_id, 'base.group_user')
if is_employee:
for n in range(5):
xmlid = 'note_stage_%02d' % (n,)
try:
_model, stage_id = data_obj.get_object_reference(cr, SUPERUSER_ID, 'note', xmlid)
except ValueError:
continue
note_obj.copy(cr, SUPERUSER_ID, stage_id, default={'user_id': user_id}, context=context)
return user_id
| agpl-3.0 |
KaranToor/MA450 | google-cloud-sdk/.install/.backup/lib/googlecloudsdk/calliope/display_info.py | 3 | 3141 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource display info for the Calliope display module."""
class DisplayInfo(object):
"""Display info accumulator for priming Displayer.
"legacy" logic will be dropped when the incremental Command class refactor
is complete.
Attributes:
_format: The default format string. args.format takes precedence.
_transforms: The filter/format transforms symbol dict.
_aliases: The resource name alias dict.
_legacy: Use legacy Command methods for display info if True. This will
be deleted when all commands are refactored to use parser.display_info.
"""
def __init__(self):
self._legacy = True
self._format = None
self._transforms = {}
self._aliases = {}
# pylint: disable=redefined-builtin, name matches args.format and --format
def AddFormat(self, format):
"""Adds a format to the display info, newer info takes precedence.
Args:
format: The default format string. args.format takes precedence.
"""
self._legacy = False
if format:
self._format = format
def AddTransforms(self, transforms):
"""Adds transforms to the display info, newer values takes precedence.
Args:
transforms: A filter/format transforms symbol dict.
"""
self._legacy = False
if transforms:
self._transforms.update(transforms)
def AddAliases(self, aliases):
"""Adds aliases to the display info, newer values takes precedence.
Args:
aliases: The resource name alias dict.
"""
self._legacy = False
if aliases:
self._aliases.update(aliases)
def AddLowerDisplayInfo(self, display_info):
"""Add lower precedence display_info to the object.
This method is called by calliope to propagate CLI low precedence parent
info to its high precedence children.
Args:
display_info: The low precedence DisplayInfo object to add.
"""
if not self._format:
self._format = display_info.format
if display_info.transforms:
transforms = dict(display_info.transforms)
transforms.update(self.transforms)
self._transforms = transforms
if display_info.aliases:
aliases = dict(display_info.aliases)
aliases.update(self._aliases)
self._aliases = aliases
@property
def format(self):
return self._format
@property
def aliases(self):
return self._aliases
@property
def transforms(self):
return self._transforms
@property
def legacy(self):
return self._legacy
@legacy.setter
def legacy(self, value):
self._legacy = value
| apache-2.0 |
ThirdProject/android_external_chromium_org | chrome/common/extensions/docs/server2/branch_utility_test.py | 24 | 7646 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import unittest
from branch_utility import BranchUtility, ChannelInfo
from fake_url_fetcher import FakeUrlFetcher
from object_store_creator import ObjectStoreCreator
class BranchUtilityTest(unittest.TestCase):
def setUp(self):
self._branch_util = BranchUtility(
os.path.join('branch_utility', 'first.json'),
os.path.join('branch_utility', 'second.json'),
FakeUrlFetcher(os.path.join(sys.path[0], 'test_data')),
ObjectStoreCreator.ForTest())
def testSplitChannelNameFromPath(self):
self.assertEquals(('stable', 'extensions/stuff.html'),
self._branch_util.SplitChannelNameFromPath(
'stable/extensions/stuff.html'))
self.assertEquals(('dev', 'extensions/stuff.html'),
self._branch_util.SplitChannelNameFromPath(
'dev/extensions/stuff.html'))
self.assertEquals(('beta', 'extensions/stuff.html'),
self._branch_util.SplitChannelNameFromPath(
'beta/extensions/stuff.html'))
self.assertEquals(('trunk', 'extensions/stuff.html'),
self._branch_util.SplitChannelNameFromPath(
'trunk/extensions/stuff.html'))
self.assertEquals((None, 'extensions/stuff.html'),
self._branch_util.SplitChannelNameFromPath(
'extensions/stuff.html'))
self.assertEquals((None, 'apps/stuff.html'),
self._branch_util.SplitChannelNameFromPath(
'apps/stuff.html'))
self.assertEquals((None, 'extensions/dev/stuff.html'),
self._branch_util.SplitChannelNameFromPath(
'extensions/dev/stuff.html'))
self.assertEquals((None, 'stuff.html'),
self._branch_util.SplitChannelNameFromPath(
'stuff.html'))
def testNewestChannel(self):
self.assertEquals('trunk',
self._branch_util.NewestChannel(('trunk', 'dev', 'beta', 'stable')))
self.assertEquals('trunk',
self._branch_util.NewestChannel(('stable', 'beta', 'dev', 'trunk')))
self.assertEquals('dev',
self._branch_util.NewestChannel(('stable', 'beta', 'dev')))
self.assertEquals('dev',
self._branch_util.NewestChannel(('dev', 'beta', 'stable')))
self.assertEquals('beta',
self._branch_util.NewestChannel(('beta', 'stable')))
self.assertEquals('beta',
self._branch_util.NewestChannel(('stable', 'beta')))
self.assertEquals('stable', self._branch_util.NewestChannel(('stable',)))
self.assertEquals('beta', self._branch_util.NewestChannel(('beta',)))
self.assertEquals('dev', self._branch_util.NewestChannel(('dev',)))
self.assertEquals('trunk', self._branch_util.NewestChannel(('trunk',)))
def testNewer(self):
oldest_stable_info = ChannelInfo('stable', '963', 17)
older_stable_info = ChannelInfo('stable', '1025', 18)
old_stable_info = ChannelInfo('stable', '1084', 19)
sort_of_old_stable_info = ChannelInfo('stable', '1364', 25)
stable_info = ChannelInfo('stable', '1410', 26)
beta_info = ChannelInfo('beta', '1453', 27)
dev_info = ChannelInfo('dev', '1500', 28)
trunk_info = ChannelInfo('trunk', 'trunk', 'trunk')
self.assertEquals(older_stable_info,
self._branch_util.Newer(oldest_stable_info))
self.assertEquals(old_stable_info,
self._branch_util.Newer(older_stable_info))
self.assertEquals(stable_info,
self._branch_util.Newer(sort_of_old_stable_info))
self.assertEquals(beta_info, self._branch_util.Newer(stable_info))
self.assertEquals(dev_info, self._branch_util.Newer(beta_info))
self.assertEquals(trunk_info, self._branch_util.Newer(dev_info))
# Test the upper limit.
self.assertEquals(None, self._branch_util.Newer(trunk_info))
def testOlder(self):
trunk_info = ChannelInfo('trunk', 'trunk', 'trunk')
dev_info = ChannelInfo('dev', '1500', 28)
beta_info = ChannelInfo('beta', '1453', 27)
stable_info = ChannelInfo('stable', '1410', 26)
old_stable_info = ChannelInfo('stable', '1364', 25)
older_stable_info = ChannelInfo('stable', '1312', 24)
oldest_stable_info = ChannelInfo('stable', '396', 5)
self.assertEquals(dev_info, self._branch_util.Older(trunk_info))
self.assertEquals(beta_info, self._branch_util.Older(dev_info))
self.assertEquals(stable_info, self._branch_util.Older(beta_info))
self.assertEquals(old_stable_info, self._branch_util.Older(stable_info))
self.assertEquals(older_stable_info,
self._branch_util.Older(old_stable_info))
# Test the lower limit.
self.assertEquals(None, self._branch_util.Older(oldest_stable_info))
def testGetChannelInfo(self):
trunk_info = ChannelInfo('trunk', 'trunk', 'trunk')
self.assertEquals(trunk_info, self._branch_util.GetChannelInfo('trunk'))
dev_info = ChannelInfo('dev', '1500', 28)
self.assertEquals(dev_info, self._branch_util.GetChannelInfo('dev'))
beta_info = ChannelInfo('beta', '1453', 27)
self.assertEquals(beta_info, self._branch_util.GetChannelInfo('beta'))
stable_info = ChannelInfo('stable', '1410', 26)
self.assertEquals(stable_info, self._branch_util.GetChannelInfo('stable'))
def testGetLatestVersionNumber(self):
self.assertEquals(28, self._branch_util.GetLatestVersionNumber())
def testGetBranchForVersion(self):
self.assertEquals('1500',
self._branch_util.GetBranchForVersion(28))
self.assertEquals('1453',
self._branch_util.GetBranchForVersion(27))
self.assertEquals('1410',
self._branch_util.GetBranchForVersion(26))
self.assertEquals('1364',
self._branch_util.GetBranchForVersion(25))
self.assertEquals('1312',
self._branch_util.GetBranchForVersion(24))
self.assertEquals('1271',
self._branch_util.GetBranchForVersion(23))
self.assertEquals('1229',
self._branch_util.GetBranchForVersion(22))
self.assertEquals('1180',
self._branch_util.GetBranchForVersion(21))
self.assertEquals('1132',
self._branch_util.GetBranchForVersion(20))
self.assertEquals('1084',
self._branch_util.GetBranchForVersion(19))
self.assertEquals('1025',
self._branch_util.GetBranchForVersion(18))
self.assertEquals('963',
self._branch_util.GetBranchForVersion(17))
self.assertEquals('696',
self._branch_util.GetBranchForVersion(11))
self.assertEquals('396',
self._branch_util.GetBranchForVersion(5))
def testGetChannelForVersion(self):
self.assertEquals('trunk',
self._branch_util.GetChannelForVersion('trunk'))
self.assertEquals('dev',
self._branch_util.GetChannelForVersion(28))
self.assertEquals('beta',
self._branch_util.GetChannelForVersion(27))
self.assertEquals('stable',
self._branch_util.GetChannelForVersion(26))
self.assertEquals('stable',
self._branch_util.GetChannelForVersion(22))
self.assertEquals('stable',
self._branch_util.GetChannelForVersion(18))
self.assertEquals('stable',
self._branch_util.GetChannelForVersion(14))
self.assertEquals(None,
self._branch_util.GetChannelForVersion(30))
self.assertEquals(None,
self._branch_util.GetChannelForVersion(42))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
elkingtonmcb/sympy | sympy/parsing/ast_parser.py | 122 | 2811 | """
This module implements the functionality to take any Python expression as a
string and fix all numbers and other things before evaluating it,
thus
1/2
returns
Integer(1)/Integer(2)
We use the Python ast module for that, which is in python2.6 and later. It is
well documented at docs.python.org.
Some tips to understand how this works: use dump() to get a nice
representation of any node. Then write a string of what you want to get,
e.g. "Integer(1)", parse it, dump it and you'll see that you need to do
"Call(Name('Integer', Load()), [node], [], None, None)". You don't need
to bother with lineno and col_offset, just call fix_missing_locations()
before returning the node.
"""
from __future__ import print_function, division
from sympy.core.basic import Basic
from sympy.core.compatibility import exec_
from sympy.core.sympify import SympifyError
from ast import parse, NodeTransformer, Call, Name, Load, \
fix_missing_locations, Str, Tuple
class Transform(NodeTransformer):
def __init__(self, local_dict, global_dict):
NodeTransformer.__init__(self)
self.local_dict = local_dict
self.global_dict = global_dict
def visit_Num(self, node):
if isinstance(node.n, int):
return fix_missing_locations(Call(Name('Integer', Load()),
[node], [], None, None))
elif isinstance(node.n, float):
return fix_missing_locations(Call(Name('Float', Load()),
[node], [], None, None))
return node
def visit_Name(self, node):
if node.id in self.local_dict:
return node
elif node.id in self.global_dict:
name_obj = self.global_dict[node.id]
if isinstance(name_obj, (Basic, type)) or callable(name_obj):
return node
elif node.id in ['True', 'False']:
return node
return fix_missing_locations(Call(Name('Symbol', Load()),
[Str(node.id)], [], None, None))
def visit_Lambda(self, node):
args = [self.visit(arg) for arg in node.args.args]
body = self.visit(node.body)
n = Call(Name('Lambda', Load()),
[Tuple(args, Load()), body], [], None, None)
return fix_missing_locations(n)
def parse_expr(s, local_dict):
"""
Converts the string "s" to a SymPy expression, in local_dict.
It converts all numbers to Integers before feeding it to Python and
automatically creates Symbols.
"""
global_dict = {}
exec_('from sympy import *', global_dict)
try:
a = parse(s.strip(), mode="eval")
except SyntaxError:
raise SympifyError("Cannot parse %s." % repr(s))
a = Transform(local_dict, global_dict).visit(a)
e = compile(a, "<string>", "eval")
return eval(e, global_dict, local_dict)
| bsd-3-clause |
nbschool/ecommerce_api | models.py | 2 | 24442 | """
Application ORM Models built with Peewee
"""
import datetime
import os
from exceptions import (InsufficientAvailabilityException,
WrongQuantity, SearchAttributeMismatch)
from uuid import uuid4
from flask_login import UserMixin
from passlib.hash import pbkdf2_sha256
from peewee import (BooleanField, CharField, DateTimeField, DecimalField,
ForeignKeyField, IntegerField, PostgresqlDatabase,
TextField, UUIDField)
from playhouse.signals import Model, post_delete, pre_delete
from schemas import (AddressSchema, BaseSchema, FavoriteSchema, ItemSchema,
OrderItemSchema, OrderSchema, PictureSchema, UserSchema)
import search
from utils import remove_image
ENVIRONMENT = os.getenv('ENVIRONMENT', 'dev')
if ENVIRONMENT != 'dev':
import urllib.parse
urllib.parse.uses_netloc.append('postgres')
url = urllib.parse.urlparse(os.getenv('DATABASE_URL'))
database = PostgresqlDatabase(database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port,
)
else:
from peewee import SqliteDatabase
database = SqliteDatabase('database.db')
class BaseModel(Model):
"""
BaseModel implements all the common logic for all the application models,
Acting as interface for the ``_schema`` methods and implementing common
fields and methods for each model.
.. NOTE::
All models **must** inherit from BaseModel to work properly.
Attributes:
created_at (:any:`datetime.datetime`): creation date of the resource.
updated_at (:any:`datetime.datetime`): updated on every :any:`save` call.
_schema (:mod:`schemas`): Private attribute that each class
that extends ``BaseModel`` should override with a model-specifig schema
that describe how the model is to be validated and parsed for output.
"""
created_at = DateTimeField(default=datetime.datetime.now)
updated_at = DateTimeField(default=datetime.datetime.now)
_schema = BaseSchema
#: Each model that needs to implement the search functionality `should`
#: override this attribute with the fields that needs to be checked while
#: searching.
#: Attribute should be a list of names of class attributes (strings)
_search_attributes = None
#: Attributes weights can be specified with a list of numbers that will
#: map each weight to attributes (:any:`BaseModel._search_attributes`)
#: indexes.
_search_weights = None
def save(self, *args, **kwargs):
"""
Overrides Peewee ``save`` method to automatically update
``updated_at`` time during save.
"""
self.updated_at = datetime.datetime.now()
return super(BaseModel, self).save(*args, **kwargs)
class Meta:
database = database
@classmethod
def json_list(cls, objs_list):
"""
Transform a list of instances of callee class into a jsonapi string
Args:
objs_list (iterable): Model instances to serialize into a json list
Return:
string: jsonapi compliant list representation of all the given resources
"""
return cls._schema.jsonapi_list(objs_list)
def json(self, include_data=[]):
"""
Interface for the class defined ``_schema`` that returns a JSONAPI compliant
string representing the resource.
.. NOTE::
If overridden (while developing or for other reason) the method
should always return a ``string``.
Args:
include_data (list): List of attribute names to be included
Returns:
string: JSONAPI representation of the resource, including optional
included resources (if any requested and present)
"""
parsed, errors = self._schema.jsonapi(self, include_data)
return parsed
@classmethod
def validate_input(cls, data, partial=False):
"""
Validate any python structure against the defined ``_schema`` for the class.
Args:
data (dict|list): The data to validate against the ``class._schema``
partial(bool): Allows to validate partial data structure (missing fields
will be ignored, useful to validate ``PATCH`` requests.)
Return:
``list``, with errors if any, empty if validation passed
"""
return cls._schema.validate_input(data, partial=partial)
@classmethod
def search(cls, query, dataset, limit=-1,
attributes=None, weights=None,
threshold=search.config.THRESHOLD):
"""
Search a list of resources with the callee class.
Arguments:
query (str): Query to lookup for
dataset (iterable): sequence of resource objects to lookup into
limit (int): maximum number of resources to return (default -1, all)
attributes (list): model attribute names. Can be set as default
inside the model definition or specified on the fly while
searching.
weights (list): attributes weights values,indexes should
match the attribute position in the `attributes` argument.
if length does not match it will be ignored.
threshold (float): value between 0 and 1, identify the matching
threshold for a result to be included.
Returns:
list: list of resources that may match the query.
Raises:
SearchAttributeMismatch:
if ``attributes`` are missing, either as model
default in ``<Model>._search_attributes`` or as param
one of the object does not have one of the given attribute(s).
Examples:
.. code-block:: python
results = Item.search('shoes', Item.select(), limit=20)
"""
attributes = attributes or cls._search_attributes
weights = weights or cls._search_weights
if not attributes:
raise SearchAttributeMismatch(
'Attributes to look for not defined for {}. \
Please update the Model or specify during search call.\
'.format(cls.__name__))
return search.search(query, attributes, dataset, limit, threshold, weights)
class Item(BaseModel):
"""
Item describes a product for the e-commerce platform.
Attributes:
uuid (UUID): Item UUID
name (str): Name for the product
price (decimal.Decimal): Price for a single product
description (str): Product description
availability (int): Quantity of items available
category (str): Category group of the item
"""
uuid = UUIDField(unique=True)
name = CharField()
price = DecimalField(auto_round=True)
description = TextField()
availability = IntegerField()
category = TextField()
_schema = ItemSchema
_search_attributes = ['name', 'category', 'description']
def __str__(self):
return '{}, {}, {}, {}'.format(
self.uuid,
self.name,
self.price,
self.description)
def is_favorite(self, item):
for f in self.favorites:
if f.item_id == item.id:
return True
return False
@database.atomic()
@pre_delete(sender=Item)
def on_delete_item_handler(model_class, instance):
"""Delete item pictures in cascade"""
pictures = Picture.select().join(Item).where(
Item.uuid == instance.uuid)
for pic in pictures:
pic.delete_instance()
class Picture(BaseModel):
"""
A Picture model describes and points to a stored image file. Allows linkage
between the image files and one :any:`Item` resource.
Attributes:
uuid (UUID): Picture's uuid
extension (str): Extension of the image file the Picture's model refer to
item (:any:`Item`): Foreign key referencing the Item related to the Picture.
A ``pictures`` field can be used from ``Item`` to access the Item resource
pictures
"""
uuid = UUIDField(unique=True)
extension = CharField()
item = ForeignKeyField(Item, related_name='pictures')
_schema = PictureSchema
@property
def filename(self):
"""Full name (uuid.ext) of the file that the Picture model reference."""
return '{}.{}'.format(
self.uuid,
self.extension)
def __str__(self):
return '{}.{} -> item: {}'.format(
self.uuid,
self.extension,
self.item.uuid)
@post_delete(sender=Picture)
def on_delete_picture_handler(model_class, instance):
"""Delete file picture"""
# TODO log eventual inconsistency
remove_image(instance.uuid, instance.extension)
class User(BaseModel, UserMixin):
"""
User represents an user for the application.
Attributes:
first_name (str): User's first name
last_name (str): User's last name
email (str): User's **valid** email
password (str): User's password
admin (bool): User's admin status. Defaults to ``False``
.. NOTE::
Each User resource must have an unique `email` field, meaning
that there cannot be two user's registered with the same email.
For this reason, when checking for user's existence, the server requires
either the `uuid` of the user or its `email`.
"""
uuid = UUIDField(unique=True)
first_name = CharField()
last_name = CharField()
email = CharField(unique=True)
password = CharField()
admin = BooleanField(default=False)
_schema = UserSchema
@staticmethod
def exists(email):
"""
Check that an user exists by checking the email field.
Args:
email (str): User's email to check
"""
try:
User.get(User.email == email)
except User.DoesNotExist:
return False
return True
@staticmethod
def hash_password(password):
"""
Use passlib to get a crypted password.
Args:
password (str): password to hash
Returns:
str: hashed password
"""
return pbkdf2_sha256.hash(password)
def verify_password(self, password):
"""
Verify a clear password against the stored hashed password of the user
using passlib.
Args:
password (str): Password to verify against the hashed stored password
Returns:
bool: wether the given email matches the stored one
"""
return pbkdf2_sha256.verify(password, self.password)
def add_favorite(user, item):
"""Link the favorite item to user."""
return Favorite.create(
uuid=uuid4(),
item=item,
user=user,
)
def delete_favorite(self, obj):
obj.delete_instance()
class Address(BaseModel):
"""
The model Address represent a user address.
Each address is releated to one user, but one user can have
more addresses.
Attributes:
uuid (UUID): Address unique uuid
user (:any:`User`): Foreign key pointing to the user `owner` of the address
country (str): Country for the address, i.e. ``Italy``
city (str): City name
post_code (str): Postal code for the address
address (str): Full address for the Address resource
phone (str): Phone number for the Address
"""
uuid = UUIDField(unique=True)
user = ForeignKeyField(User, related_name='addresses')
country = CharField()
city = CharField()
post_code = CharField()
address = CharField()
phone = CharField()
_schema = AddressSchema
class Order(BaseModel):
"""
Orders represent an order placed by a `User`, containing one or more `Item`
that have to be delivered to one of the user's `Address`.
Attributes:
uuid (UUID): Order's unique id
total_price (:any:`decimal.Decimal`): Total price for the order
delivery_address (:any:`Address`): Address specified for delivery
user (:any:`User`): User that created the order
"""
uuid = UUIDField(unique=True, default=uuid4)
total_price = DecimalField(default=0)
delivery_address = ForeignKeyField(Address, related_name="orders")
user = ForeignKeyField(User, related_name="orders")
_schema = OrderSchema
class Meta:
order_by = ('created_at',)
@property
def order_items(self):
"""
Property that execute a cross-table query against :class:`models.OrderItem`
to get a list of all OrderItem related to the callee order.
Returns:
list: :class:`models.OrderItem` related to the order.
"""
query = (
OrderItem
.select(OrderItem, Order)
.join(Order)
.where(Order.uuid == self.uuid)
)
return [orderitem for orderitem in query]
def empty_order(self):
"""
Remove all the items from the order deleting all OrderItem related
to this order and resetting the order's total_price value to 0.
Returns:
models.Order: The updated order
"""
self.total_price = 0
OrderItem.delete().where(OrderItem.order == self).execute()
self.save()
return self
@staticmethod
def create_order(user, address, items):
"""
Create an Order and respective OrderItems. OrderItems are created
in a single query as well as the Order. It also updates Items'
availability.
Args:
user (models.User): order owner
address (models.Address): order address
items (dict): item updates entries as a dictionary, keys are
items and values are new quantities to set. Example of
argument:
..code-block:: python
items = {
Item.get(pk=1): 3,
Item.get(pk=2): 1,
}
Returns:
models.Order: The new order
"""
total_price = sum(
item.price * quantity for item, quantity in items.items())
with database.atomic():
order = Order.create(
delivery_address=address,
user=user,
total_price=total_price,
)
order.update_items(items, update_total=False)
return order
def update_items(self, items, update_total=True, new_address=None):
"""
Update Order and respective OrderItems by splitting in creation,
deletion and updating queries, minimizing the interactions with the
database. It also updates Items' availability.
Args:
items (dict): item updates entries as a dictionary, keys are
items and values are new quantities to set. Example of
argument:
..code-block:: python
items = {
Item.get(pk=1): 3,
Item.get(pk=2): 0,
Item.get(pk=3): 1,
}
update_total (bool): if True the procedure updates order's
total price. Default to True.
new_address (models.Address): if not None the procedure updates
the order with the given address. Default to None.
Returns:
models.Order: The new/updated order
"""
to_create = {}
to_remove = {}
to_edit = {}
total_price_difference = 0
orderitems = self.order_items
# split items in insert, delete and update sets
for item, quantity in items.items():
for orderitem in orderitems:
if orderitem.item == item:
difference = quantity - orderitem.quantity
if quantity == 0:
to_remove[item] = orderitem.quantity
elif difference > item.availability:
raise InsufficientAvailabilityException(
item, quantity)
elif quantity < 0:
raise WrongQuantity()
else:
to_edit[item] = difference
total_price_difference += item.price * difference
break
else:
if quantity <= 0:
raise WrongQuantity()
elif quantity > item.availability:
raise InsufficientAvailabilityException(
item, quantity)
else:
to_create[item] = quantity
total_price_difference += item.price * quantity
with database.atomic():
self.edit_items_quantity(to_edit)
self.create_items(to_create)
self.delete_items(to_remove)
if update_total:
self.total_price += total_price_difference
if new_address:
self.address = new_address
if update_total or new_address:
self.save()
return self
@database.atomic()
def edit_items_quantity(self, items):
"""
Update orderitems using a query for each item, and updates
items' availability.
Args:
items (dict): item updates entries as a dictionary, keys are
items and values are new quantities to set. Example of
argument:
..code-block:: python
items = {
Item.get(pk=1): 3,
Item.get(pk=3): 1,
}
Returns:
Order: callee instance
"""
if not items:
return
orderitems = OrderItem.select().where(
OrderItem.item << [k for k in items.keys()],
OrderItem.order == self)
for orderitem in orderitems:
for item, difference in items.items():
if orderitem.item == item:
item.availability -= difference
item.save()
orderitem.quantity += difference
orderitem._calculate_subtotal()
orderitem.save()
break
def delete_items(self, items):
"""
Delete orderitems in a single query and updates items' availability.
Args:
items (dict): item entries as a dictionary, keys are
items to delete and values are previously reserved quantities.
Example of argument:
..code-block:: python
items = {
Item.get(pk=1): 3,
Item.get(pk=2): 2,
}
"""
if not items:
return
with database.atomic():
for item, quantity in items.items():
item.availability += quantity
item.save()
OrderItem.delete().where(
OrderItem.order == self).where(
OrderItem.item << [k for k in items.keys()]).execute()
def create_items(self, items):
"""
Creates orderitems in a single query and updates items' availability.
Args:
items (dict): item entries as a dictionary, keys are
items to create and values are new quantities to set.
Example of argument:
..code-block:: python
items = {
Item.get(pk=1): 3,
Item.get(pk=2): 1,
}
"""
if not items:
return
with database.atomic():
for item, quantity in items.items():
item.availability -= quantity
item.save()
OrderItem.insert_many([
{
'order': self,
'item': item,
'quantity': quantity,
'subtotal': item.price * quantity,
} for item, quantity in items.items()]).execute()
def add_item(self, item, quantity=1):
"""
Add items to the order. It updates item availability.
Args:
item (models.Item): the Item to add
quantity (int): how many items to add
Returns:
order (models.Order): the updated order
"""
return self.update_items({item: quantity})
def remove_item(self, item, quantity=1):
"""
Remove the given item from the order, reducing quantity of the relative
OrderItem entity or deleting it if removing the last item
``(OrderItem.quantity == 0)``.
It also restores the item availability.
Args:
item (models.Item): the Item to remove
quantity (int): how many items to remove
Returns:
order (models.Order): the updated order
"""
return self.update_items({item: -quantity})
class OrderItem(BaseModel):
"""
The model OrderItem is a cross table that contains the order
items - one row for each item on an order(so each order can
generate multiple rows).
Upon creation it needs to know which :class:`models.Order` and
:class:`models.Item` are put in relation.
Attributes:
order (:any:`Order`): Foreign key pointing to the order that created the `OrderItem`
item (:any:`Item`): Foreign key pointing to the Item relative to the OrderItem
quantity (int): Quantity of this Item for the order
subtotal (:any:`decimal.Decimal`): Calculated subtotal for the OrderItem
"""
order = ForeignKeyField(Order)
item = ForeignKeyField(Item)
quantity = IntegerField()
subtotal = DecimalField()
_schema = OrderItemSchema
def add_item(self, quantity=1):
"""
Add one item to the OrderItem, increasing the quantity count and
recalculating the subtotal value for this item(s)
Args:
quantity (int): How many items to add
Returns:
None
Raises:
InsufficientAvailabilityException: If the requested quantity to add
is higher than the :attr:`models.Item.availability`
"""
if quantity > self.item.availability:
raise InsufficientAvailabilityException(self.item, quantity)
self.item.availability -= quantity
self.item.save()
self.quantity += quantity
self._calculate_subtotal()
self.save()
def remove_item(self, quantity=1):
"""
Remove one item from the OrderItem, decreasing the quantity count and
recalculating the subtotal value for this item(s)
Args:
quantity (int): How many items to add
Returns:
int: quantity of items actually removed
Raises:
WrongQuantity: If the request for the quantity to remove is higher
than the quantity present.
"""
if self.quantity < quantity:
raise WrongQuantity('Quantity of items to be removed ({}) higher than availability ({})'
.format(quantity, self.quantity))
elif self.quantity > quantity:
self.quantity -= quantity
self._calculate_subtotal()
self.save()
else: # elif self.quantity == quantity
quantity = self.quantity
self.delete_instance()
return quantity
def _calculate_subtotal(self):
"""
Calculate the subtotal value of the item(s) in the order and update the relative attribute.
"""
self.subtotal = self.item.price * self.quantity
class Favorite(BaseModel):
""" Many to many table to relate an item with a user."""
uuid = UUIDField(unique=True)
user = ForeignKeyField(User, related_name="favorites")
item = ForeignKeyField(Item, related_name="favorites")
_schema = FavoriteSchema
| gpl-3.0 |
cntnboys/cmput410-project | venv/lib/python2.7/site-packages/pip/_vendor/packaging/__about__.py | 257 | 1073 | # Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "packaging"
__summary__ = "Core utilities for Python packages"
__uri__ = "https://github.com/pypa/packaging"
__version__ = "15.0"
__author__ = "Donald Stufft"
__email__ = "donald@stufft.io"
__license__ = "Apache License, Version 2.0"
__copyright__ = "Copyright 2014 %s" % __author__
| apache-2.0 |
alex-docker/zerorpc-python | tests/test_channel.py | 76 | 3964 | # -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2012 DotCloud Inc (opensource@dotcloud.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from zerorpc import zmq
import zerorpc
from testutils import teardown, random_ipc_endpoint
def test_events_channel_client_side():
endpoint = random_ipc_endpoint()
server_events = zerorpc.Events(zmq.ROUTER)
server_events.bind(endpoint)
server = zerorpc.ChannelMultiplexer(server_events)
client_events = zerorpc.Events(zmq.DEALER)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events)
client_channel = client.channel()
client_channel.emit('someevent', (42,))
event = server.recv()
print event
assert list(event.args) == [42]
assert event.header.get('zmqid', None) is not None
server.emit('someanswer', (21,),
xheader=dict(response_to=event.header['message_id'],
zmqid=event.header['zmqid']))
event = client_channel.recv()
assert list(event.args) == [21]
def test_events_channel_client_side_server_send_many():
endpoint = random_ipc_endpoint()
server_events = zerorpc.Events(zmq.ROUTER)
server_events.bind(endpoint)
server = zerorpc.ChannelMultiplexer(server_events)
client_events = zerorpc.Events(zmq.DEALER)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events)
client_channel = client.channel()
client_channel.emit('giveme', (10,))
event = server.recv()
print event
assert list(event.args) == [10]
assert event.header.get('zmqid', None) is not None
for x in xrange(10):
server.emit('someanswer', (x,),
xheader=dict(response_to=event.header['message_id'],
zmqid=event.header['zmqid']))
for x in xrange(10):
event = client_channel.recv()
assert list(event.args) == [x]
def test_events_channel_both_side():
endpoint = random_ipc_endpoint()
server_events = zerorpc.Events(zmq.ROUTER)
server_events.bind(endpoint)
server = zerorpc.ChannelMultiplexer(server_events)
client_events = zerorpc.Events(zmq.DEALER)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events)
client_channel = client.channel()
client_channel.emit('openthat', (42,))
event = server.recv()
print event
assert list(event.args) == [42]
assert event.name == 'openthat'
server_channel = server.channel(event)
server_channel.emit('test', (21,))
event = client_channel.recv()
assert list(event.args) == [21]
assert event.name == 'test'
server_channel.emit('test', (22,))
event = client_channel.recv()
assert list(event.args) == [22]
assert event.name == 'test'
server_events.close()
server_channel.close()
client_channel.close()
client_events.close()
| mit |
jerryhebert/django-rest-framework | rest_framework/authtoken/serializers.py | 80 | 1032 | from django.contrib.auth import authenticate
from django.utils.translation import ugettext_lazy as _
from rest_framework import exceptions, serializers
class AuthTokenSerializer(serializers.Serializer):
username = serializers.CharField()
password = serializers.CharField(style={'input_type': 'password'})
def validate(self, attrs):
username = attrs.get('username')
password = attrs.get('password')
if username and password:
user = authenticate(username=username, password=password)
if user:
if not user.is_active:
msg = _('User account is disabled.')
raise exceptions.ValidationError(msg)
else:
msg = _('Unable to log in with provided credentials.')
raise exceptions.ValidationError(msg)
else:
msg = _('Must include "username" and "password".')
raise exceptions.ValidationError(msg)
attrs['user'] = user
return attrs
| bsd-2-clause |
Vutshi/qutip | examples/ex_floquet_modes.py | 1 | 4002 | #
# Example: Find the floquet modes and quasi energies for a driven system and
# plot the floquet states/quasienergies for one period of the driving.
#
from qutip import *
from pylab import *
import time
def hamiltonian_t(t, args):
""" evaluate the hamiltonian at time t. """
H0 = args[0]
H1 = args[1]
w = args[2]
return H0 + cos(w * t) * H1
def H1coeff_t(t, args):
w = args['w']
return sin(w * t)
def qubit_integrate(delta, eps0, A, omega, psi0, tlist):
# Hamiltonian
sx = sigmax()
sz = sigmaz()
sm = destroy(2)
H0 = - delta/2.0 * sx - eps0/2.0 * sz
H1 = A/2.0 * sz
#H_args = (H0, H1, omega)
H_args = {'w': omega}
H = [H0, [H1, 'sin(w * t)']]
#H = [H0, [H1, H1coeff_t]]
# find the propagator for one driving period
T = 2*pi / omega
f_modes_0,f_energies = floquet_modes(H, T, H_args)
p_ex_0 = zeros(shape(tlist))
p_ex_1 = zeros(shape(tlist))
p_00 = zeros(shape(tlist), dtype=complex)
p_01 = zeros(shape(tlist), dtype=complex)
p_10 = zeros(shape(tlist), dtype=complex)
p_11 = zeros(shape(tlist), dtype=complex)
e_0 = zeros(shape(tlist))
e_1 = zeros(shape(tlist))
f_modes_table_t = floquet_modes_table(f_modes_0, f_energies, tlist, H, T, H_args)
for idx, t in enumerate(tlist):
#f_modes_t = floquet_modes_t(f_modes_0, f_energies, t, H, T, H_args)
f_modes_t = floquet_modes_t_lookup(f_modes_table_t, t, T)
p_ex_0[idx] = expect(sm.dag() * sm, f_modes_t[0])
p_ex_1[idx] = expect(sm.dag() * sm, f_modes_t[1])
p_00[idx] = f_modes_t[0].full()[0][0]
p_01[idx] = f_modes_t[0].full()[1][0]
p_10[idx] = f_modes_t[1].full()[0][0]
p_11[idx] = f_modes_t[1].full()[1][0]
#evals = hamiltonian_t(t, H_args).eigenenergies()
evals = qobj_list_evaluate(H, t, H_args).eigenenergies()
e_0[idx] = min(real(evals))
e_1[idx] = max(real(evals))
return p_ex_0, p_ex_1, e_0, e_1, f_energies, p_00, p_01, p_10, p_11
#
# set up the calculation: a strongly driven two-level system
# (repeated LZ transitions)
#
delta = 0.2 * 2 * pi # qubit sigma_x coefficient
eps0 = 1.0 * 2 * pi # qubit sigma_z coefficient
A = 2.5 * 2 * pi # sweep rate
psi0 = basis(2,0) # initial state
omega = 1.0 * 2 * pi # driving frequency
T = (2*pi)/omega # driving period
tlist = linspace(0.0, T, 101)
start_time = time.time()
p_ex_0, p_ex_1, e_0, e_1, f_e, p_00, p_01, p_10, p_11 = qubit_integrate(delta, eps0, A, omega, psi0, tlist)
print 'dynamics: time elapsed = ' + str(time.time() - start_time)
#
# plot the results
#
figure(figsize=[8,10])
subplot(2,1,1)
plot(tlist, real(p_ex_0), 'b', tlist, real(p_ex_1), 'r')
xlabel('Time ($T$)')
ylabel('Excitation probabilities')
title('Floquet modes')
legend(("Floquet mode 1", "Floquet mode 2"))
subplot(2,1,2)
plot(tlist, real(e_0), 'c', tlist, real(e_1), 'm')
plot(tlist, ones(shape(tlist)) * f_e[0], 'b', tlist, ones(shape(tlist)) * f_e[1], 'r')
xlabel('Time ($T$)')
ylabel('Energy [GHz]')
title('Eigen- and quasi-energies')
legend(("Ground state", "Excited state", "Quasienergy 1", "Quasienergy 2"))
#
# plot the results
#
figure(figsize=[8,12])
subplot(3,1,1)
plot(tlist, real(p_00), 'b', tlist, real(p_01), 'r')
plot(tlist, real(p_10), 'c', tlist, real(p_11), 'm')
xlabel('Time ($T$)')
ylabel('real')
title('Floquet modes')
legend(("FM1 - gnd", "FM1 - exc", "FM2 - gnd", "FM2 - exc"))
subplot(3,1,2)
plot(tlist, imag(p_00), 'b', tlist, imag(p_01), 'r')
plot(tlist, imag(p_10), 'c', tlist, imag(p_11), 'm')
xlabel('Time ($T$)')
ylabel('imag')
legend(("FM1 - gnd", "FM1 - exc", "FM2 - gnd", "FM2 - exc"))
subplot(3,1,3)
plot(tlist, abs(p_00), 'b', tlist, abs(p_01), 'r.')
plot(tlist, abs(p_10), 'c', tlist, abs(p_11), 'm.')
xlabel('Time ($T$)')
ylabel('abs')
legend(("FM1 - gnd", "FM1 - exc", "FM2 - gnd", "FM2 - exc"))
#
# finish by displaying graph windows
#
show()
| gpl-3.0 |
sam-roth/Keypad | keypad/plugins/shell/fish_model.py | 1 | 2290 |
import subprocess
from keypad.api import AttributedString, Cursor
from .bourne_model import BourneCodeModel, ShellCompletionResults
import logging
class GetCompletionsTask:
def __init__(self, prefix):
self.prefix = prefix
def __call__(self, ns):
# print(self.prefix)
# take advantage of safe argument passing by providing the script on stdin rather than through
# the argument array
with subprocess.Popen(['fish',
'/dev/stdin',
self.prefix],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE) as proc:
out, err = proc.communicate(b'complete --do-complete=$argv[1]\n')
output = []
# print(out)
for item in out.splitlines():
left, *right = item.decode().split('\t', maxsplit=2)
if not right:
right = ['']
output.append((AttributedString(left),
AttributedString(right[0])))
# print(output)
return output
class FishShellCompletionResults(ShellCompletionResults):
def __init__(self, token_start, results, prox):
try:
super().__init__(token_start, [], prox)
self.results = results
except:
logging.exception('error')
raise
class FishCodeModel(BourneCodeModel):
def completions_async(self, pos):
'''
Return a future to the completions available at the given position in the document.
Raise NotImplementedError if not implemented.
'''
try:
y0, x0 = pos
c = Cursor(self.buffer).move(pos)
prefix = c.line.text[:c.x]
for ch in c.walk(-1):
if ch.isspace() or c.y != y0:
break
c.advance()
return self._prox.submit(GetCompletionsTask(prefix),
transform=lambda r: FishShellCompletionResults(c.pos, r,
self._prox))
except:
logging.exception('')
raise
| gpl-3.0 |
teltek/edx-platform | common/djangoapps/xblock_django/admin.py | 24 | 2419 | """
Django admin dashboard configuration.
"""
from config_models.admin import ConfigurationModelAdmin, KeyedConfigurationModelAdmin
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from xblock_django.models import XBlockConfiguration, XBlockStudioConfiguration, XBlockStudioConfigurationFlag
class XBlockConfigurationAdmin(KeyedConfigurationModelAdmin):
"""
Admin for XBlockConfiguration.
"""
fieldsets = (
('XBlock Name', {
'fields': ('name',)
}),
('Enable/Disable XBlock', {
'description': _('To disable the XBlock and prevent rendering in the LMS, leave "Enabled" deselected; '
'for clarity, update XBlockStudioConfiguration support state accordingly.'),
'fields': ('enabled',)
}),
('Deprecate XBlock', {
'description': _("Only XBlocks listed in a course's Advanced Module List can be flagged as deprecated. "
"Remember to update XBlockStudioConfiguration support state accordingly, as deprecated "
"does not impact whether or not new XBlock instances can be created in Studio."),
'fields': ('deprecated',)
}),
)
class XBlockStudioConfigurationAdmin(KeyedConfigurationModelAdmin):
"""
Admin for XBlockStudioConfiguration.
"""
fieldsets = (
('', {
'fields': ('name', 'template')
}),
('Enable Studio Authoring', {
'description': _(
'XBlock/template combinations that are disabled cannot be edited in Studio, regardless of support '
'level. Remember to also check if all instances of the XBlock are disabled in XBlockConfiguration.'
),
'fields': ('enabled',)
}),
('Support Level', {
'description': _(
"Enabled XBlock/template combinations with full or provisional support can always be created "
"in Studio. Unsupported XBlock/template combinations require course author opt-in."
),
'fields': ('support_level',)
}),
)
admin.site.register(XBlockConfiguration, XBlockConfigurationAdmin)
admin.site.register(XBlockStudioConfiguration, XBlockStudioConfigurationAdmin)
admin.site.register(XBlockStudioConfigurationFlag, ConfigurationModelAdmin)
| agpl-3.0 |
nodakai/git | contrib/svn-fe/svnrdump_sim.py | 328 | 2044 | #!/usr/bin/python
"""
Simulates svnrdump by replaying an existing dump from a file, taking care
of the specified revision range.
To simulate incremental imports the environment variable SVNRMAX can be set
to the highest revision that should be available.
"""
import sys
import os
if sys.hexversion < 0x02040000:
# The limiter is the ValueError() calls. This may be too conservative
sys.stderr.write("svnrdump-sim.py: requires Python 2.4 or later.\n")
sys.exit(1)
def getrevlimit():
var = 'SVNRMAX'
if var in os.environ:
return os.environ[var]
return None
def writedump(url, lower, upper):
if url.startswith('sim://'):
filename = url[6:]
if filename[-1] == '/':
filename = filename[:-1] # remove terminating slash
else:
raise ValueError('sim:// url required')
f = open(filename, 'r')
state = 'header'
wroterev = False
while(True):
l = f.readline()
if l == '':
break
if state == 'header' and l.startswith('Revision-number: '):
state = 'prefix'
if state == 'prefix' and l == 'Revision-number: %s\n' % lower:
state = 'selection'
if not upper == 'HEAD' and state == 'selection' and \
l == 'Revision-number: %s\n' % upper:
break
if state == 'header' or state == 'selection':
if state == 'selection':
wroterev = True
sys.stdout.write(l)
return wroterev
if __name__ == "__main__":
if not (len(sys.argv) in (3, 4, 5)):
print("usage: %s dump URL -rLOWER:UPPER")
sys.exit(1)
if not sys.argv[1] == 'dump':
raise NotImplementedError('only "dump" is suppported.')
url = sys.argv[2]
r = ('0', 'HEAD')
if len(sys.argv) == 4 and sys.argv[3][0:2] == '-r':
r = sys.argv[3][2:].lstrip().split(':')
if not getrevlimit() is None:
r[1] = getrevlimit()
if writedump(url, r[0], r[1]):
ret = 0
else:
ret = 1
sys.exit(ret)
| gpl-2.0 |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_07_01/aio/operations/_express_route_service_providers_operations.py | 1 | 5138 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteServiceProvidersOperations:
"""ExpressRouteServiceProvidersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs
) -> AsyncIterable["_models.ExpressRouteServiceProviderListResult"]:
"""Gets all the available express route service providers.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteServiceProviderListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_07_01.models.ExpressRouteServiceProviderListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteServiceProviderListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteServiceProviderListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteServiceProviders'} # type: ignore
| mit |
bbci/wyrm | examples/performance.py | 1 | 5907 | #!/usr/bin/env python
from __future__ import division
import time
import logging
import cPickle as pickle
import sys
import argparse
sys.path.append('../')
import numpy as np
from matplotlib import pyplot as plt
from wyrm import processing as proc
from wyrm.types import BlockBuffer, RingBuffer, Data
logger = logging.getLogger()
def online_erp(fs, n_channels, subsample):
logger.debug('Running Online ERP with {fs}Hz, and {channels}channels'.format(fs=fs, channels=n_channels))
target_fs = 100
# blocklen in ms
blocklen = 1000 * 1 / target_fs
# blocksize given the original fs and blocklen
blocksize = fs * (blocklen / 1000)
MRK_DEF = {'target': 'm'}
SEG_IVAL = [0, 700]
JUMPING_MEANS_IVALS = [150, 220], [200, 260], [310, 360], [550, 660]
RING_BUFFER_CAP = 1000
cfy = [0, 0]
fs_n = fs / 2
b_l, a_l = proc.signal.butter(5, [30 / fs_n], btype='low')
b_h, a_h = proc.signal.butter(5, [.4 / fs_n], btype='high')
zi_l = proc.lfilter_zi(b_l, a_l, n_channels)
zi_h = proc.lfilter_zi(b_h, a_h, n_channels)
ax_channels = np.array([str(i) for i in range(n_channels)])
names = ['time', 'channel']
units = ['ms', '#']
blockbuf = BlockBuffer(blocksize)
ringbuf = RingBuffer(RING_BUFFER_CAP)
times = []
# time since the last data was acquired
t_last = time.time()
# time since the last marker
t_last_marker = time.time()
# time since the experiment started
t_start = time.time()
full_iterations = 0
while full_iterations < 500:
t0 = time.time()
dt = time.time() - t_last
samples = int(dt * fs)
if samples == 0:
continue
t_last = time.time()
# get data
data = np.random.random((samples, n_channels))
ax_times = np.linspace(0, 1000 * (samples / fs), samples, endpoint=False)
if t_last_marker + .01 < time.time():
t_last_marker = time.time()
markers = [[ax_times[-1], 'm']]
else:
markers = []
cnt = Data(data, axes=[ax_times, ax_channels], names=names, units=units)
cnt.fs = fs
cnt.markers = markers
# blockbuffer
blockbuf.append(cnt)
cnt = blockbuf.get()
if not cnt:
continue
# filter
cnt, zi_l = proc.lfilter(cnt, b_l, a_l, zi=zi_l)
cnt, zi_h = proc.lfilter(cnt, b_h, a_h, zi=zi_h)
# subsample
if subsample:
cnt = proc.subsample(cnt, target_fs)
newsamples = cnt.data.shape[0]
# ringbuffer
ringbuf.append(cnt)
cnt = ringbuf.get()
# epoch
epo = proc.segment_dat(cnt, MRK_DEF, SEG_IVAL, newsamples=newsamples)
if not epo:
continue
# feature vectors
fv = proc.jumping_means(epo, JUMPING_MEANS_IVALS)
rv = proc.create_feature_vectors(fv)
# classification
proc.lda_apply(fv, cfy)
# don't measure in the first second, where the ringbuffer is not
# full yet.
if time.time() - t_start < (RING_BUFFER_CAP / 1000):
continue
dt = time.time() - t0
times.append(dt)
full_iterations += 1
return np.array(times)
def plot():
BLUE = "#268bd2"
RED = "#d33682"
BLACK = "#002b36"
LGRAY = "#eee8d5"
DGRAY = "#93a1a1"
plt.figure(figsize=(8, 4))
with open('results.pickle', 'rb') as fh:
results = pickle.load(fh)
ranges = []
x, y = [], []
for s, t in results:
ranges.append(t.max() - t.min())
y.append(t)
x.append(s[12:])
x = [50, 100, 500] * 6
bp = plt.boxplot(y, labels=x, whis='range', widths=0.7)
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.set_ylim(bottom=1, top=1000)
plt.setp(bp['whiskers'], lw=2, ls='solid', c=BLUE)
plt.setp(bp['medians'], lw=2, c=RED)
plt.setp(bp['boxes'], lw=2, c=BLUE)
plt.setp(bp['caps'], lw=2, c=BLUE)
plt.ylabel('execution time [ms]')
plt.xlabel('number of channels')
plt.yscale('log')
plt.grid(which='major', axis='y', ls='--', color=DGRAY)
plt.grid(which='minor', axis='y', ls='-.', color=LGRAY)
for i in range(5):
plt.vlines((i+1)*3+.5, 0, 300, color=BLACK)
plt.vlines(9.5, 0, 1000, color=BLACK, lw=3)
plt.text(5, 600, 'with subsampling', color=BLACK, weight='bold', horizontalalignment='center')
plt.text(14, 600, 'without subsampling', color=BLACK, weight='bold', horizontalalignment='center')
for i, t in enumerate(['100Hz', '1kHz', '10kHz']):
plt.text(i*3+2, 200, t, color=BLACK, horizontalalignment='center')
plt.text(i*3+11, 200, t, color=BLACK, horizontalalignment='center')
for i, r in enumerate(ranges):
plt.text(i+1, 1.5, "{range:.1f}".format(range=r),
horizontalalignment='center', size='x-small', color=BLUE, weight='semibold')
plt.tight_layout()
plt.show()
def measure():
target_fs = 100, 1000, 10000
target_chans = 50, 100, 500
results = []
for subsample in 1, 0:
for fs in target_fs:
for chan in target_chans:
t = online_erp(fs, chan, subsample=subsample)
t *= 1000
s = "{ss}subsampling\n{fs}Hz\n{chan} channels".format(ss=subsample, fs=fs, chan=chan)
results.append((s, t))
with open('results.pickle', 'wb') as fh:
pickle.dump(results, fh)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Measure online performance.")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--measure', action='store_true')
group.add_argument('--plot', action='store_true')
args = parser.parse_args()
if args.measure:
measure()
elif args.plot:
plot()
| mit |
wangyum/spark | python/pyspark/tests/test_readwrite.py | 23 | 14386 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import tempfile
import unittest
from pyspark.testing.utils import ReusedPySparkTestCase, SPARK_HOME
class InputFormatTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.sc._jvm.WriteInputFormatTestDataGenerator.generateData(cls.tempdir.name, cls.sc._jsc)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name)
def test_oldhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.hadoopFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
oldconf = {"mapreduce.input.fileinputformat.inputdir": hellopath}
hello = self.sc.hadoopRDD("org.apache.hadoop.mapred.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=oldconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
newconf = {"mapreduce.input.fileinputformat.inputdir": hellopath}
hello = self.sc.newAPIHadoopRDD("org.apache.hadoop.mapreduce.lib.input.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=newconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newolderror(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_bad_inputs(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.sequenceFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.NotValidWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
maps = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
keyConverter="org.apache.spark.api.python.TestInputKeyConverter",
valueConverter="org.apache.spark.api.python.TestInputValueConverter").collect())
em = [(u'\x01', []),
(u'\x01', [3.0]),
(u'\x02', [1.0]),
(u'\x02', [1.0]),
(u'\x03', [2.0])]
self.assertEqual(maps, em)
def test_binary_files(self):
path = os.path.join(self.tempdir.name, "binaryfiles")
os.mkdir(path)
data = b"short binary data"
with open(os.path.join(path, "part-0000"), 'wb') as f:
f.write(data)
[(p, d)] = self.sc.binaryFiles(path).collect()
self.assertTrue(p.endswith("part-0000"))
self.assertEqual(d, data)
def test_binary_records(self):
path = os.path.join(self.tempdir.name, "binaryrecords")
os.mkdir(path)
with open(os.path.join(path, "part-0000"), 'w') as f:
for i in range(100):
f.write('%04d' % i)
result = self.sc.binaryRecords(path, 4).map(int).collect()
self.assertEqual(list(range(100)), result)
class OutputFormatTests(ReusedPySparkTestCase):
def setUp(self):
self.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.tempdir.name)
def tearDown(self):
shutil.rmtree(self.tempdir.name, ignore_errors=True)
def test_oldhadoop(self):
basepath = self.tempdir.name
dict_data = [(1, {}),
(1, {"row1": 1.0}),
(2, {"row2": 2.0})]
self.sc.parallelize(dict_data).saveAsHadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable")
result = self.sc.hadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect()
for v in result:
self.assertTrue(v, dict_data)
conf = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.MapWritable",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/olddataset/"
}
self.sc.parallelize(dict_data).saveAsHadoopDataset(conf)
input_conf = {"mapreduce.input.fileinputformat.inputdir": basepath + "/olddataset/"}
result = self.sc.hadoopRDD(
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
conf=input_conf).collect()
for v in result:
self.assertTrue(v, dict_data)
def test_newhadoop(self):
basepath = self.tempdir.name
data = [(1, ""),
(1, "a"),
(2, "bcdf")]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
self.assertEqual(result, data)
conf = {
"mapreduce.job.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.Text",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/newdataset/"
}
self.sc.parallelize(data).saveAsNewAPIHadoopDataset(conf)
input_conf = {"mapreduce.input.fileinputformat.inputdir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
conf=input_conf).collect())
self.assertEqual(new_dataset, data)
def test_newolderror(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/newolderror/saveAsHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/newolderror/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat"))
def test_bad_inputs(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/badinputs/saveAsHadoopFile/",
"org.apache.hadoop.mapred.NotValidOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/badinputs/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.NotValidOutputFormat"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
data = [(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/converters/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
keyConverter="org.apache.spark.api.python.TestOutputKeyConverter",
valueConverter="org.apache.spark.api.python.TestOutputValueConverter")
converted = sorted(self.sc.sequenceFile(basepath + "/converters/").collect())
expected = [(u'1', 3.0),
(u'2', 1.0),
(u'3', 2.0)]
self.assertEqual(converted, expected)
def test_reserialization(self):
basepath = self.tempdir.name
x = range(1, 5)
y = range(1001, 1005)
data = list(zip(x, y))
rdd = self.sc.parallelize(x).zip(self.sc.parallelize(y))
rdd.saveAsSequenceFile(basepath + "/reserialize/sequence")
result1 = sorted(self.sc.sequenceFile(basepath + "/reserialize/sequence").collect())
self.assertEqual(result1, data)
rdd.saveAsHadoopFile(
basepath + "/reserialize/hadoop",
"org.apache.hadoop.mapred.SequenceFileOutputFormat")
result2 = sorted(self.sc.sequenceFile(basepath + "/reserialize/hadoop").collect())
self.assertEqual(result2, data)
rdd.saveAsNewAPIHadoopFile(
basepath + "/reserialize/newhadoop",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
result3 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newhadoop").collect())
self.assertEqual(result3, data)
conf4 = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/reserialize/dataset"}
rdd.saveAsHadoopDataset(conf4)
result4 = sorted(self.sc.sequenceFile(basepath + "/reserialize/dataset").collect())
self.assertEqual(result4, data)
conf5 = {"mapreduce.job.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/reserialize/newdataset"
}
rdd.saveAsNewAPIHadoopDataset(conf5)
result5 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newdataset").collect())
self.assertEqual(result5, data)
def test_malformed_RDD(self):
basepath = self.tempdir.name
# non-batch-serialized RDD[[(K, V)]] should be rejected
data = [[(1, "a")], [(2, "aa")], [(3, "aaa")]]
rdd = self.sc.parallelize(data, len(data))
self.assertRaises(Exception, lambda: rdd.saveAsSequenceFile(
basepath + "/malformed/sequence"))
if __name__ == "__main__":
from pyspark.tests.test_readwrite import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
mergecoin-project/Mergecoin-master | contrib/devtools/update-translations.py | 2 | 8105 | #!/usr/bin/env python
# Copyright (c) 2014 Wladimir J. van der Laan
# Copyright (c) 2017 Mitchell Cash
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'ion_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
specifiers.append(s[percent+1])
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# If both numeric format specifiers and "others" are used, assume we're dealing
# with a Qt-formatted message. In the case of Qt formatting (see https://doc.qt.io/qt-5/qstring.html#arg)
# only numeric formats are replaced at all. This means "(percentage: %1%)" is valid, without needing
# any kind of escaping that would be necessary for strprintf. Without this, this function
# would wrongly detect '%)' as a printf format specifier.
if numeric:
other = []
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
| mit |
liorvh/infernal-twin | build/pillow/selftest.py | 11 | 6176 | # minimal sanity check
from __future__ import print_function
import sys
import os
if "--installed" in sys.argv:
sys_path_0 = sys.path[0]
del sys.path[0]
from PIL import Image, ImageDraw, ImageFilter, ImageMath
from PIL import features
if "--installed" in sys.argv:
sys.path.insert(0, sys_path_0)
ROOT = "."
try:
Image.core.ping
except ImportError as v:
print("***", v)
sys.exit()
except AttributeError:
pass
def _info(im):
im.load()
return im.format, im.mode, im.size
def testimage():
"""
PIL lets you create in-memory images with various pixel types:
>>> im = Image.new("1", (128, 128)) # monochrome
>>> _info(im)
(None, '1', (128, 128))
>>> _info(Image.new("L", (128, 128))) # grayscale (luminance)
(None, 'L', (128, 128))
>>> _info(Image.new("P", (128, 128))) # palette
(None, 'P', (128, 128))
>>> _info(Image.new("RGB", (128, 128))) # truecolor
(None, 'RGB', (128, 128))
>>> _info(Image.new("I", (128, 128))) # 32-bit integer
(None, 'I', (128, 128))
>>> _info(Image.new("F", (128, 128))) # 32-bit floating point
(None, 'F', (128, 128))
Or open existing files:
>>> im = Image.open(os.path.join(ROOT, "Tests/images/hopper.gif"))
>>> _info(im)
('GIF', 'P', (128, 128))
>>> _info(Image.open(os.path.join(ROOT, "Tests/images/hopper.ppm")))
('PPM', 'RGB', (128, 128))
>>> try:
... _info(Image.open(os.path.join(ROOT, "Tests/images/hopper.jpg")))
... except IOError as v:
... print(v)
('JPEG', 'RGB', (128, 128))
PIL doesn't actually load the image data until it's needed,
or you call the "load" method:
>>> im = Image.open(os.path.join(ROOT, "Tests/images/hopper.ppm"))
>>> print(im.im) # internal image attribute
None
>>> a = im.load()
>>> type(im.im) # doctest: +ELLIPSIS
<... '...ImagingCore'>
You can apply many different operations on images. Most
operations return a new image:
>>> im = Image.open(os.path.join(ROOT, "Tests/images/hopper.ppm"))
>>> _info(im.convert("L"))
(None, 'L', (128, 128))
>>> _info(im.copy())
(None, 'RGB', (128, 128))
>>> _info(im.crop((32, 32, 96, 96)))
(None, 'RGB', (64, 64))
>>> _info(im.filter(ImageFilter.BLUR))
(None, 'RGB', (128, 128))
>>> im.getbands()
('R', 'G', 'B')
>>> im.getbbox()
(0, 0, 128, 128)
>>> len(im.getdata())
16384
>>> im.getextrema()
((0, 255), (0, 255), (0, 255))
>>> im.getpixel((0, 0))
(20, 20, 70)
>>> len(im.getprojection())
2
>>> len(im.histogram())
768
>>> _info(im.point(list(range(256))*3))
(None, 'RGB', (128, 128))
>>> _info(im.resize((64, 64)))
(None, 'RGB', (64, 64))
>>> _info(im.rotate(45))
(None, 'RGB', (128, 128))
>>> [_info(ch) for ch in im.split()]
[(None, 'L', (128, 128)), (None, 'L', (128, 128)), (None, 'L', (128, 128))]
>>> len(im.convert("1").tobitmap())
10456
>>> len(im.tobytes())
49152
>>> _info(im.transform((512, 512), Image.AFFINE, (1,0,0,0,1,0)))
(None, 'RGB', (512, 512))
>>> _info(im.transform((512, 512), Image.EXTENT, (32,32,96,96)))
(None, 'RGB', (512, 512))
The ImageDraw module lets you draw stuff in raster images:
>>> im = Image.new("L", (128, 128), 64)
>>> d = ImageDraw.ImageDraw(im)
>>> d.line((0, 0, 128, 128), fill=128)
>>> d.line((0, 128, 128, 0), fill=128)
>>> im.getextrema()
(64, 128)
In 1.1.4, you can specify colors in a number of ways:
>>> xy = 0, 0, 128, 128
>>> im = Image.new("RGB", (128, 128), 0)
>>> d = ImageDraw.ImageDraw(im)
>>> d.rectangle(xy, "#f00")
>>> im.getpixel((0, 0))
(255, 0, 0)
>>> d.rectangle(xy, "#ff0000")
>>> im.getpixel((0, 0))
(255, 0, 0)
>>> d.rectangle(xy, "rgb(255,0,0)")
>>> im.getpixel((0, 0))
(255, 0, 0)
>>> d.rectangle(xy, "rgb(100%,0%,0%)")
>>> im.getpixel((0, 0))
(255, 0, 0)
>>> d.rectangle(xy, "hsl(0, 100%, 50%)")
>>> im.getpixel((0, 0))
(255, 0, 0)
>>> d.rectangle(xy, "red")
>>> im.getpixel((0, 0))
(255, 0, 0)
In 1.1.6, you can use the ImageMath module to do image
calculations.
>>> im = ImageMath.eval("float(im + 20)", im=im.convert("L"))
>>> im.mode, im.size
('F', (128, 128))
PIL can do many other things, but I'll leave that for another
day. If you're curious, check the handbook, available from:
http://www.pythonware.com
Cheers /F
"""
if __name__ == "__main__":
# check build sanity
exit_status = 0
print("-"*68)
print("Pillow", Image.PILLOW_VERSION, "TEST SUMMARY ")
print("-"*68)
print("Python modules loaded from", os.path.dirname(Image.__file__))
print("Binary modules loaded from", os.path.dirname(Image.core.__file__))
print("-"*68)
for name, feature in [
("pil", "PIL CORE"),
("tkinter", "TKINTER"),
("freetype2", "FREETYPE2"),
("littlecms2", "LITTLECMS2"),
("webp", "WEBP"),
("transp_webp", "Transparent WEBP")
]:
supported = features.check_module(name)
if supported is None:
# A method was being tested, but the module required
# for the method could not be correctly imported
pass
elif supported:
print("---", feature, "support ok")
else:
print("***", feature, "support not installed")
for name, feature in [
("jpg", "JPEG"),
("jpg_2000", "OPENJPEG (JPEG2000)"),
("zlib", "ZLIB (PNG/ZIP)"),
("libtiff", "LIBTIFF")
]:
if features.check_codec(name):
print("---", feature, "support ok")
else:
print("***", feature, "support not installed")
print("-"*68)
# use doctest to make sure the test program behaves as documented!
import doctest
import selftest
print("Running selftest:")
status = doctest.testmod(selftest)
if status[0]:
print("*** %s tests of %d failed." % status)
exit_status = 1
else:
print("--- %s tests passed." % status[1])
sys.exit(exit_status)
| gpl-3.0 |
xgfone/snippet | snippet/example/python/pool.py | 1 | 6109 | from __future__ import absolute_import, unicode_literals, print_function, division
import time
from queue import Queue, Empty
from threading import Lock
class _ResourcePoolSession(object):
def __init__(self, pool, obj, close_on_exc=False):
self.__pool = pool
self.__obj = obj
self.__close_on_exc = close_on_exc
self.__closed = False
def __repr__(self):
return "ResourcePoolSession(obj={0})".format(self.__obj)
def __getattr__(self, name):
if self.__closed:
raise RuntimeError("The session has been closed.")
return getattr(self.__obj, name)
def __del__(self):
self.release_to_pool()
def __enter__(self):
return self
def __exit__(self, ex_type, ex_value, traceback):
self.release_to_pool(ex_type is not None)
def release_to_pool(self, close=False):
"""Release the obj into the resource pool.
If close is True, close it at first, then create a new obj and put it
into the resource pool.
"""
if self.__closed:
return
self.__closed = True
if close:
try:
self.__obj.close()
except Exception:
pass
self.__obj = None
self.__pool._put_from_session(self.__obj)
def close(self):
self.release_to_pool(close=True)
class ResourcePool(object):
def __init__(self, cls, *args, capacity=0, idle_timeout=None, autowrap=False,
close_on_exc=False, **kwargs):
"""Create a new pool object.
@param cls(object): The object class to be manage.
@param args(tuple): The positional parameters of cls.
@param kwargs(dict): The key parameters of cls.
@param capacity(int): The maximum capacity of the pool.
If 0, the capacity is infinite.
@param idle_timeout(int): The idle timeout. The unit is second.
If None or 0, never time out.
@param autowrap(bool): If True, it will wrap the obj in ResourcePoolSession
automatically, which will release the obj into the
pool when the session is closed or deleted.
@param close_on_exc(bool): If True and autowrap is True, in with context,
the session will close the obj firstly,
then new an new one into the pool.
"""
capacity = capacity if capacity >= 0 else 0
self._cls = cls
self._args = args
self._kwargs = kwargs
self._closed = False
self._lock = Lock()
self._capacity = capacity
self._timeout = idle_timeout
self._pools = Queue(capacity)
self._autowrap = autowrap
self._close_on_exc = close_on_exc
while capacity > 0:
self.put(None)
capacity -= 1
def __del__(self):
self.close()
def _get_now(self):
return int(time.time())
def _close_obj(self, obj):
if obj:
try:
obj.close()
except Exception:
pass
def close(self):
"""Close the pool and release all the objects.
When closed, it will raise an RuntimeError if putting an object into it.
"""
with self._lock:
if self._closed:
return
self._closed = True
while True:
try:
self._close_obj(self._pools.get_nowait()[0])
self._pools.task_done()
except Empty:
return
def get(self, timeout=None):
"""Get an object from the pool.
When the pool is closed, it will raise a RuntimeError if calling this
method.
"""
with self._lock:
if self._closed:
raise RuntimeError("The pool has been closed.")
_get = lambda obj: _ResourcePoolSession(self, obj, self._close_on_exc) \
if obj and self._autowrap else obj
if not self._capacity:
try:
obj = self._pools.get_nowait()
self._pools.task_done()
except Empty:
obj = (self._cls(*self._args, **self._kwargs), self._get_now())
else:
obj = self._pools.get(timeout=timeout)
self._pools.task_done()
if obj and obj[0]:
if self._timeout and self._get_now() - obj[1] > self._timeout:
return _get(self.get(timeout=timeout))
return _get(obj[0])
return _get(self._cls(*self._args, **self._kwargs))
def put(self, obj):
"""Put an object into the pool.
When the pool is closed, it will close the object, not put it into the
pool, if calling this method.
"""
with self._lock:
if self._closed:
self._close_obj(obj)
return
if isinstance(obj, _ResourcePoolSession):
obj.release_to_pool()
else:
self._pools.put_nowait((obj, self._get_now()))
def put_with_close(self, obj):
self._close_obj(obj)
self.put(None)
def _put_from_session(self, obj):
self._pools.put_nowait((obj, self._get_now()))
def main(pool):
o1 = pool.get()
print(o1)
o2 = pool.get()
print(o2)
def f():
o3 = pool.get()
print(o3)
pool.put(o3)
t = Thread(target=f)
t.start()
time.sleep(3)
pool.put(o1)
pool.put(o2)
return t
if __name__ == "__main__":
import time
from threading import Thread
class Obj(object):
def __init__(self):
self.time = time.time()
time.sleep(0.1)
def close(self):
print("close {0}".format(self.time))
def __repr__(self):
return "Obj(time={0})".format(self.time)
pool = ResourcePool(Obj, capacity=2, idle_timeout=1, autowrap=True)
task = main(pool)
time.sleep(1)
task.join()
| mit |
zhiyzhan/micolog | app/gmemsess.py | 14 | 3217 | # gmemsess.py - memcache-backed session Class for Google Appengine
# Version 1.4
# Copyright 2008 Greg Fawcett <greg@vig.co.nz>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import random
from google.appengine.api import memcache
_sidChars='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
_defaultTimeout=30*60 # 30 min
_defaultCookieName='gsid'
#----------------------------------------------------------------------
class Session(dict):
"""A secure lightweight memcache-backed session Class for Google Appengine."""
#----------------------------------------------------------
def __init__(self,rh,name=_defaultCookieName,timeout=_defaultTimeout):
"""Create a session object.
Keyword arguments:
rh -- the parent's request handler (usually self)
name -- the cookie name (defaults to "gsid")
timeout -- the number of seconds the session will last between
requests (defaults to 1800 secs - 30 minutes)
"""
self.rh=rh # request handler
self._timeout=timeout
self._name=name
self._new=True
self._invalid=False
dict.__init__(self)
if name in rh.request.str_cookies:
self._sid=rh.request.str_cookies[name]
data=memcache.get(self._sid)
if data!=None:
self.update(data)
# memcache timeout is absolute, so we need to reset it on each access
memcache.set(self._sid,data,self._timeout)
self._new=False
return
# Create a new session ID
# There are about 10^14 combinations, so guessing won't work
self._sid=random.choice(_sidChars)+random.choice(_sidChars)+\
random.choice(_sidChars)+random.choice(_sidChars)+\
random.choice(_sidChars)+random.choice(_sidChars)+\
random.choice(_sidChars)+random.choice(_sidChars)
# Added path so session works with any path
rh.response.headers.add_header('Set-Cookie','%s=%s; path=/;'%(name,self._sid))
#----------------------------------------------------------
def save(self):
"""Save session data."""
if not self._invalid:
memcache.set(self._sid,self.copy(),self._timeout)
#----------------------------------------------------------
def is_new(self):
"""Returns True if session was created during this request."""
return self._new
#----------------------------------------------------------
def get_id(self):
"""Returns session id string."""
return self._sid
#----------------------------------------------------------
def invalidate(self):
"""Delete session data and cookie."""
self.rh.response.headers.add_header('Set-Cookie',
'%s=; expires=Sat, 1-Jan-2000 00:00:00 GMT;'%(self._name))
memcache.delete(self._sid)
self.clear()
self._invalid=True
| mit |
matrumz/RPi_Custom_Files | Printing/hplip-3.15.2/fab.py | 1 | 24917 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# (c) Copyright 2003-2009 Hewlett-Packard Development Company, L.P.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Author: Don Welch
#
__version__ = '6.0'
__title__ = "Fax Address Book"
__mod__ = 'hp-fab'
__doc__ = "A simple fax address book for HPLIP."
# Std Lib
import cmd
import getopt
import os
# Local
from base.g import *
from base import utils, tui, module
from base.sixext.moves import input
# Console class (from ASPN Python Cookbook)
# Author: James Thiele
# Date: 27 April 2004
# Version: 1.0
# Location: http://www.eskimo.com/~jet/python/examples/cmd/
# Copyright (c) 2004, James Thiele
class Console(cmd.Cmd):
def __init__(self):
cmd.Cmd.__init__(self)
self.intro = "Type 'help' for a list of commands. Type 'exit' or 'quit' to quit."
self.db = fax.FaxAddressBook() # database instance
self.prompt = log.bold("hp-fab > ")
# Command definitions
def do_hist(self, args):
"""Print a list of commands that have been entered"""
print(self._hist)
def do_exit(self, args):
"""Exits from the console"""
return -1
def do_quit(self, args):
"""Exits from the console"""
return -1
# Command definitions to support Cmd object functionality
def do_EOF(self, args):
"""Exit on system end of file character"""
return self.do_exit(args)
def do_help(self, args):
"""Get help on commands
'help' or '?' with no arguments prints a list of commands for which help is available
'help <command>' or '? <command>' gives help on <command>
"""
# The only reason to define this method is for the help text in the doc string
cmd.Cmd.do_help(self, args)
# Override methods in Cmd object
def preloop(self):
"""Initialization before prompting user for commands.
Despite the claims in the Cmd documentaion, Cmd.preloop() is not a stub.
"""
cmd.Cmd.preloop(self) # sets up command completion
self._hist = [] # No history yet
self._locals = {} # Initialize execution namespace for user
self._globals = {}
self.do_list('')
def postloop(self):
"""Take care of any unfinished business.
Despite the claims in the Cmd documentaion, Cmd.postloop() is not a stub.
"""
cmd.Cmd.postloop(self) # Clean up command completion
print("Exiting...")
def precmd(self, line):
""" This method is called after the line has been input but before
it has been interpreted. If you want to modifdy the input line
before execution (for example, variable substitution) do it here.
"""
self._hist += [line.strip()]
return line
def postcmd(self, stop, line):
"""If you want to stop the console, return something that evaluates to true.
If you want to do some post command processing, do it here.
"""
return stop
def emptyline(self):
"""Do nothing on empty input line"""
pass
def default(self, line):
log.error("Unrecognized command. Use 'help' to list commands.")
def get_nickname(self, args, fail_if_match=True, alt_text=False):
if not args:
while True:
if alt_text:
nickname = input(log.bold("Enter the name to add to the group (<enter>=done*, c=cancel) ? ")).strip()
else:
nickname = input(log.bold("Enter name (c=cancel) ? ")).strip()
if nickname.lower() == 'c':
print(log.red("Canceled"))
return ''
if not nickname:
if alt_text:
return ''
else:
log.error("Name must not be blank.")
continue
if fail_if_match:
if self.db.get(nickname) is not None:
log.error("Name already exists. Please choose a different name.")
continue
else:
if self.db.get(nickname) is None:
log.error("Name not found. Please enter a different name.")
continue
break
else:
nickname = args.strip()
if fail_if_match:
if self.db.get(nickname) is not None:
log.error("Name already exists. Please choose a different name.")
return ''
else:
if self.db.get(nickname) is None:
log.error("Name not found. Please enter a different name.")
return ''
return nickname
def get_groupname(self, args, fail_if_match=True, alt_text=False):
all_groups = self.db.get_all_groups()
if not args:
while True:
if alt_text:
groupname = input(log.bold("Enter the group to join (<enter>=done*, c=cancel) ? ")).strip()
else:
groupname = input(log.bold("Enter the group (c=cancel) ? ")).strip()
if groupname.lower() == 'c':
print(log.red("Canceled"))
return ''
if not groupname:
if alt_text:
return ''
else:
log.error("The group name must not be blank.")
continue
if groupname == 'All':
print("Cannot specify group 'All'. Please choose a different group.")
return ''
if fail_if_match:
if groupname in all_groups:
log.error("Group already exists. Please choose a different group.")
continue
else:
if groupname not in all_groups:
log.error("Group not found. Please enter a different group.")
continue
break
else:
groupname = args.strip()
if fail_if_match:
if groupname in all_groups:
log.error("Group already exists. Please choose a different group.")
return ''
else:
if groupname not in all_groups:
log.error("Group not found. Please enter a different group.")
return ''
return groupname
def do_list(self, args):
"""
List names and/or groups.
list [names|groups|all|]
dir [names|groups|all|]
"""
if args:
scope = args.strip().split()[0]
if args.startswith('nam'):
self.do_names('')
return
elif args.startswith('gro'):
self.do_groups('')
return
self.do_names('')
self.do_groups('')
do_dir = do_list
def do_names(self, args):
"""
List names.
names
"""
all_entries = self.db.get_all_records()
log.debug(all_entries)
print(log.bold("\nNames:\n"))
if len(all_entries) > 0:
f = tui.Formatter()
f.header = ("Name", "Fax Number", "Notes", "Member of Group(s)")
for name, e in list(all_entries.items()):
if not name.startswith('__'):
f.add((name, e['fax'], e['notes'], ', '.join(e['groups'])))
f.output()
else:
print("(None)")
print()
def do_groups(self, args):
"""
List groups.
groups
"""
all_groups = self.db.get_all_groups()
log.debug(all_groups)
print(log.bold("\nGroups:\n"))
if len(all_groups):
f = tui.Formatter()
f.header = ("Group", "Members")
for group in all_groups:
f.add((group, ', '.join([x for x in self.db.group_members(group) if not x.startswith('__')])))
f.output()
else:
print("(None)")
print()
def do_edit(self, args):
"""
Edit an name.
edit [name]
modify [name]
"""
nickname = self.get_nickname(args, fail_if_match=False)
if not nickname: return
e = self.db.get(nickname)
log.debug(e)
print(log.bold("\nEdit/modify information for %s:\n" % nickname))
# save_title = e['title']
# title = raw_input(log.bold("Title (<enter>='%s', c=cancel) ? " % save_title)).strip()
#
# if title.lower() == 'c':
# print log.red("Canceled")
# return
#
# if not title:
# title = save_title
#
# save_firstname = e['firstname']
# firstname = raw_input(log.bold("First name (<enter>='%s', c=cancel) ? " % save_firstname)).strip()
#
# if firstname.lower() == 'c':
# print log.red("Canceled")
# return
#
# if not firstname:
# firstname = save_firstname
#
# save_lastname = e['lastname']
# lastname = raw_input(log.bold("Last name (<enter>='%s', c=cancel) ? " % save_lastname)).strip()
#
# if lastname.lower() == 'c':
# print log.red("Canceled")
# return
#
# if not lastname:
# lastname = save_lastname
lastname = ''
firstname = ''
title = ''
save_faxnum = e['fax']
while True:
faxnum = input(log.bold("Fax Number (<enter>='%s', c=cancel) ? " % save_faxnum)).strip()
if faxnum.lower() == 'c':
print(log.red("Canceled"))
return
if not faxnum and not save_faxnum:
log.error("Fax number must not be empty.")
continue
if not faxnum:
faxnum = save_faxnum
ok = True
for c in faxnum:
if c not in '0123456789-(+) *#':
log.error("Invalid characters in fax number. Fax number may only contain '0123456789-(+) '")
ok = False
break
if ok: break
save_notes = e['notes']
notes = input(log.bold("Notes (<enter>='%s', c=cancel) ? " % save_notes)).strip()
if notes.lower() == 'c':
print(log.red("Canceled"))
return
if not notes:
notes = save_notes
if e['groups']:
print("\nLeave or Stay in a Group:\n")
new_groups = []
for g in e['groups']:
if g == 'All':
continue
ok, ans = tui.enter_yes_no("Stay in group %s " % g,
choice_prompt="(y=yes* (stay), n=no (leave), c=cancel) ? ")
if not ok:
print(log.red("Canceled"))
return
if ans:
new_groups.append(g)
print("\nJoin New Group(s):\n")
while True:
add_group = self.get_groupname('', fail_if_match=False, alt_text=True)
if add_group.lower() == 'c':
print(log.red("Canceled"))
return
if not add_group:
break
all_groups = self.db.get_all_groups()
if add_group not in all_groups:
log.warn("Group not found.")
ok, ans = tui.enter_yes_no("Is this a new group",
choice_prompt="(y=yes* (new), n=no, c=cancel) ? ")
if not ok:
print(log.red("Canceled"))
return
if not ans:
continue
if add_group in e['groups']:
log.error("Group already specified. Choose a different group name or press <enter> to continue.")
continue
new_groups.append(add_group)
self.db.set(nickname, title, firstname, lastname, faxnum, new_groups, notes)
self.do_show(nickname)
print()
do_modify = do_edit
def do_editgrp(self, args):
"""
Edit a group.
editgrp [group]
modifygrp [group]
"""
group = self.get_groupname(args, fail_if_match=False)
if not group: return
old_entries = self.db.group_members(group)
new_entries = []
print("\nExisting Names in Group:\n")
for e in old_entries:
if not e.startswith('__'):
ok, ans = tui.enter_yes_no("Should '%s' stay in this group " % e,
choice_prompt="(y=yes* (stay), n=no (leave), c=cancel) ? ")
else:
continue
if not ok:
print(log.red("Canceled"))
return
if ans:
new_entries.append(e)
print("\nAdd New Names to Group:\n")
while True:
nickname = self.get_nickname('', fail_if_match=False, alt_text=True)
if nickname.lower() == 'c':
print(log.red("Canceled"))
return
if not nickname.lower():
break
new_entries.append(nickname)
self.db.update_groups(group, new_entries)
print()
do_modifygrp = do_editgrp
def do_add(self, args):
"""
Add an name.
add [name]
new [name]
"""
nickname = self.get_nickname(args, fail_if_match=True)
if not nickname: return
print(log.bold("\nEnter information for %s:\n" % nickname))
# title = raw_input(log.bold("Title (c=cancel) ? ")).strip()
#
# if title.lower() == 'c':
# print log.red("Canceled")
# return
#
# firstname = raw_input(log.bold("First name (c=cancel) ? ")).strip()
#
# if firstname.lower() == 'c':
# print log.red("Canceled")
# return
#
# lastname = raw_input(log.bold("Last name (c=cancel) ? ")).strip()
#
# if lastname.lower() == 'c':
# print log.red("Canceled")
# return
title = ''
firstname = ''
lastname = ''
while True:
faxnum = input(log.bold("Fax Number (c=cancel) ? ")).strip()
if faxnum.lower() == 'c':
print(log.red("Canceled"))
return
if not faxnum:
log.error("Fax number must not be empty.")
continue
ok = True
for c in faxnum:
if c not in '0123456789-(+) *#':
log.error("Invalid characters in fax number. Fax number may only contain '0123456789-(+) *#'")
ok = False
break
if ok: break
notes = input(log.bold("Notes (c=cancel) ? ")).strip()
if notes.strip().lower() == 'c':
print(log.red("Canceled"))
return
groups = []
all_groups = self.db.get_all_groups()
while True:
add_group = input(log.bold("Member of group (<enter>=done*, c=cancel) ? " )).strip()
if add_group.lower() == 'c':
print(log.red("Canceled"))
return
if not add_group:
break
if add_group == 'All':
print(log.red("Cannot specify 'All'."))
continue
if add_group not in all_groups:
log.warn("Group not found.")
while True:
user_input = input(log.bold("Is this a new group (y=yes*, n=no) ? ")).lower().strip()
if user_input not in ['', 'n', 'y']:
log.error("Please enter 'y', 'n' or press <enter> for 'yes'.")
continue
break
if user_input == 'n':
continue
if add_group in groups:
log.error("Group already specified. Choose a different group name or press <enter> to continue.")
continue
groups.append(add_group)
groups.append('All')
self.db.set(nickname, title, firstname, lastname, faxnum, groups, notes)
self.do_show(nickname)
do_new = do_add
def do_addgrp(self, args):
"""
Add a group.
addgrp [group]
newgrp [group]
"""
group = self.get_groupname(args, fail_if_match=True)
if not group: return
entries = []
while True:
nickname = self.get_nickname('', fail_if_match=False, alt_text=True)
if nickname.lower() == 'c':
print(log.red("Canceled"))
return
if not nickname.lower():
break
entries.append(nickname)
self.db.update_groups(group, entries)
print()
do_newgrp = do_addgrp
def do_view(self, args):
"""
View all name data.
view
"""
all_entries = self.db.get_all_records()
log.debug(all_entries)
print(log.bold("\nView all Data:\n"))
if len(all_entries) > 0:
f = tui.Formatter()
f.header = ("Name", "Fax", "Notes", "Member of Group(s)")
for name, e in list(all_entries.items()):
if not name.startswith('__'):
f.add((name, e['fax'], e['notes'], ', '.join(e['groups'])))
f.output()
print()
def do_show(self, args):
"""
Show a name (all details).
show [name]
details [name]
"""
name = self.get_nickname(args, fail_if_match=False)
if not name: return
e = self.db.get(name)
if e:
f = tui.Formatter()
f.header = ("Key", "Value")
f.add(("Name:", name))
#f.add(("Title:", e['title']))
#f.add(("First Name:", e['firstname']))
#f.add(("Last Name:", e['lastname']))
f.add(("Fax Number:", e['fax']))
f.add(("Notes:", e['notes']))
f.add(("Member of Group(s):", ', '.join(e['groups'])))
f.output()
else:
log.error("Name not found. Use the 'names' command to view all names.")
print()
do_details = do_show
def do_rm(self, args):
"""
Remove a name.
rm [name]
del [name]
"""
nickname = self.get_nickname(args, fail_if_match=False)
if not nickname: return
self.db.delete(nickname)
print()
do_del = do_rm
def do_rmgrp(self, args):
"""
Remove a group.
rmgrp [group]
delgrp [group]
"""
group = self.get_groupname(args, fail_if_match=False)
if not group: return
self.db.delete_group(group)
print()
do_delgrp = do_rmgrp
def do_about(self, args):
"""About fab."""
utils.log_title(__title__, __version__)
def do_import(self, args):
"""
Import LDIF
import <filename> [type]
[type] = vcf|ldif|auto
"""
args = args.strip().split()
if not args:
log.error("You must specify a filename to import from.")
return
filename = args[0]
if len(args) > 1:
typ = args[1].lower()
else:
typ = 'auto'
if typ not in ('auto', 'ldif', 'vcf', 'vcard'):
log.error("Invalid type: %s" % typ)
return
if not os.path.exists(filename):
log.error("File %s not found." % filename)
return
if typ == 'auto':
ext = os.path.splitext(filename)[1].lower()
if ext == '.vcf':
typ = 'vcf'
elif ext == '.ldif':
typ = 'ldif'
else:
head = open(filename, 'r').read(1024).lower()
if 'begin:vcard' in head:
typ = 'vcf'
else:
typ = 'ldif'
if typ == 'ldif':
print("Importing from LDIF file %s..." % filename)
ok, error_str = self.db.import_ldif(filename)
elif typ in ('vcard', 'vcf'):
print("Importing from VCF file %s..." % filename)
ok, error_str = self.db.import_vcard(filename)
if not ok:
log.error(error_str)
else:
self.do_list('')
print()
mod = module.Module(__mod__, __title__, __version__, __doc__, None,
(GUI_MODE, INTERACTIVE_MODE),
(UI_TOOLKIT_QT3, UI_TOOLKIT_QT4))
mod.setUsage(module.USAGE_FLAG_NONE)
opts, device_uri, printer_name, mode, ui_toolkit, loc = \
mod.parseStdOpts(handle_device_printer=False)
if ui_toolkit == 'qt3':
if not utils.canEnterGUIMode():
log.error("%s GUI mode requires GUI support (try running with --qt4). Entering interactive mode." % __mod__)
mode = INTERACTIVE_MODE
else:
if not utils.canEnterGUIMode4():
log.error("%s GUI mode requires GUI support (try running with --qt3). Entering interactive mode." % __mod__)
mode = INTERACTIVE_MODE
if mode == GUI_MODE:
if ui_toolkit == 'qt3':
log.set_module("hp-fab(qt3)")
try:
from qt import *
from ui.faxaddrbookform import FaxAddrBookForm
except ImportError:
log.error("Unable to load Qt3 support. Is it installed?")
sys.exit(1)
app = None
addrbook = None
# create the main application object
app = QApplication(sys.argv)
if loc is None:
loc = user_conf.get('ui', 'loc', 'system')
if loc.lower() == 'system':
loc = str(QTextCodec.locale())
log.debug("Using system locale: %s" % loc)
if loc.lower() != 'c':
e = 'utf8'
try:
l, x = loc.split('.')
loc = '.'.join([l, e])
except ValueError:
l = loc
loc = '.'.join([loc, e])
log.debug("Trying to load .qm file for %s locale." % loc)
trans = QTranslator(None)
qm_file = 'hplip_%s.qm' % l
log.debug("Name of .qm file: %s" % qm_file)
loaded = trans.load(qm_file, prop.localization_dir)
if loaded:
app.installTranslator(trans)
else:
loc = 'c'
if loc == 'c':
log.debug("Using default 'C' locale")
else:
log.debug("Using locale: %s" % loc)
QLocale.setDefault(QLocale(loc))
prop.locale = loc
try:
locale.setlocale(locale.LC_ALL, locale.normalize(loc))
except locale.Error:
pass
addrbook = FaxAddrBookForm()
addrbook.show()
app.setMainWidget(addrbook)
try:
log.debug("Starting GUI loop...")
app.exec_loop()
except KeyboardInterrupt:
pass
sys.exit(0)
else: # qt4
try:
from PyQt4.QtGui import QApplication
from ui4.fabwindow import FABWindow
except ImportError:
log.error("Unable to load Qt4 support. Is it installed?")
sys.exit(1)
log.set_module("hp-fab(qt4)")
if 1:
app = QApplication(sys.argv)
fab = FABWindow(None)
fab.show()
try:
log.debug("Starting GUI loop...")
app.exec_()
except KeyboardInterrupt:
sys.exit(0)
else: # INTERACTIVE_MODE
try:
from fax import fax
except ImportError:
# This can fail on Python < 2.3 due to the datetime module
log.error("Fax address book disabled - Python 2.3+ required.")
sys.exit(1)
console = Console()
try:
console.cmdloop()
except KeyboardInterrupt:
log.error("User exit.")
log.info("")
log.info("Done.")
| gpl-2.0 |
guc-cs/Campus-Vision | opencv-1.1.0/samples/python/morphology.py | 6 | 1894 | #!/usr/bin/python
import sys
from opencv.cv import *
from opencv.highgui import *
src = 0;
image = 0;
dest = 0;
element = 0;
element_shape = CV_SHAPE_RECT;
global_pos = 0;
def Opening(pos):
element = cvCreateStructuringElementEx( pos*2+1, pos*2+1, pos, pos, element_shape, None );
cvErode(src,image,element,1);
cvDilate(image,dest,element,1);
cvShowImage("Opening&Closing window",dest);
def Closing(pos):
element = cvCreateStructuringElementEx( pos*2+1, pos*2+1, pos, pos, element_shape, None );
cvDilate(src,image,element,1);
cvErode(image,dest,element,1);
cvShowImage("Opening&Closing window",dest);
def Erosion(pos):
element = cvCreateStructuringElementEx( pos*2+1, pos*2+1, pos, pos, element_shape, None );
cvErode(src,dest,element,1);
cvShowImage("Erosion&Dilation window",dest);
def Dilation(pos):
element = cvCreateStructuringElementEx( pos*2+1, pos*2+1, pos, pos, element_shape, None );
cvDilate(src,dest,element,1);
cvShowImage("Erosion&Dilation window",dest);
if __name__ == "__main__":
filename = "../c/baboon.jpg"
if len(sys.argv)==2:
filename = sys.argv[1]
src = cvLoadImage(filename,1)
if not src:
sys.exit(-1)
image = cvCloneImage(src);
dest = cvCloneImage(src);
cvNamedWindow("Opening&Closing window",1);
cvNamedWindow("Erosion&Dilation window",1);
cvShowImage("Opening&Closing window",src);
cvShowImage("Erosion&Dilation window",src);
cvCreateTrackbar("Open","Opening&Closing window",global_pos,10,Opening);
cvCreateTrackbar("Close","Opening&Closing window",global_pos,10,Closing);
cvCreateTrackbar("Dilate","Erosion&Dilation window",global_pos,10,Dilation);
cvCreateTrackbar("Erode","Erosion&Dilation window",global_pos,10,Erosion);
cvWaitKey(0);
cvDestroyWindow("Opening&Closing window");
cvDestroyWindow("Erosion&Dilation window");
| mit |
ContinuumIO/numpy | numpy/lib/_datasource.py | 46 | 21267 | """A file interface for handling local and remote data files.
The goal of datasource is to abstract some of the file system operations
when dealing with data files so the researcher doesn't have to know all the
low-level details. Through datasource, a researcher can obtain and use a
file with one function call, regardless of location of the file.
DataSource is meant to augment standard python libraries, not replace them.
It should work seamlessly with standard file IO operations and the os
module.
DataSource files can originate locally or remotely:
- local files : '/home/guido/src/local/data.txt'
- URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt'
DataSource files can also be compressed or uncompressed. Currently only
gzip and bz2 are supported.
Example::
>>> # Create a DataSource, use os.curdir (default) for local storage.
>>> ds = datasource.DataSource()
>>>
>>> # Open a remote file.
>>> # DataSource downloads the file, stores it locally in:
>>> # './www.google.com/index.html'
>>> # opens the file and returns a file object.
>>> fp = ds.open('http://www.google.com/index.html')
>>>
>>> # Use the file as you normally would
>>> fp.read()
>>> fp.close()
"""
from __future__ import division, absolute_import, print_function
import os
import sys
import shutil
_open = open
# Using a class instead of a module-level dictionary
# to reduce the inital 'import numpy' overhead by
# deferring the import of bz2 and gzip until needed
# TODO: .zip support, .tar support?
class _FileOpeners(object):
"""
Container for different methods to open (un-)compressed files.
`_FileOpeners` contains a dictionary that holds one method for each
supported file format. Attribute lookup is implemented in such a way
that an instance of `_FileOpeners` itself can be indexed with the keys
of that dictionary. Currently uncompressed files as well as files
compressed with ``gzip`` or ``bz2`` compression are supported.
Notes
-----
`_file_openers`, an instance of `_FileOpeners`, is made available for
use in the `_datasource` module.
Examples
--------
>>> np.lib._datasource._file_openers.keys()
[None, '.bz2', '.gz']
>>> np.lib._datasource._file_openers['.gz'] is gzip.open
True
"""
def __init__(self):
self._loaded = False
self._file_openers = {None: open}
def _load(self):
if self._loaded:
return
try:
import bz2
self._file_openers[".bz2"] = bz2.BZ2File
except ImportError:
pass
try:
import gzip
self._file_openers[".gz"] = gzip.open
except ImportError:
pass
self._loaded = True
def keys(self):
"""
Return the keys of currently supported file openers.
Parameters
----------
None
Returns
-------
keys : list
The keys are None for uncompressed files and the file extension
strings (i.e. ``'.gz'``, ``'.bz2'``) for supported compression
methods.
"""
self._load()
return list(self._file_openers.keys())
def __getitem__(self, key):
self._load()
return self._file_openers[key]
_file_openers = _FileOpeners()
def open(path, mode='r', destpath=os.curdir):
"""
Open `path` with `mode` and return the file object.
If ``path`` is an URL, it will be downloaded, stored in the
`DataSource` `destpath` directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open.
mode : str, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
append. Available modes depend on the type of object specified by
path. Default is 'r'.
destpath : str, optional
Path to the directory where the source file gets downloaded to for
use. If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
Returns
-------
out : file object
The opened file.
Notes
-----
This is a convenience function that instantiates a `DataSource` and
returns the file object from ``DataSource.open(path)``.
"""
ds = DataSource(destpath)
return ds.open(path, mode)
class DataSource (object):
"""
DataSource(destpath='.')
A generic data source file (file, http, ftp, ...).
DataSources can be local files or remote files/URLs. The files may
also be compressed or uncompressed. DataSource hides some of the
low-level details of downloading the file, allowing you to simply pass
in a valid file path (or URL) and obtain a file object.
Parameters
----------
destpath : str or None, optional
Path to the directory where the source file gets downloaded to for
use. If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
Notes
-----
URLs require a scheme string (``http://``) to be used, without it they
will fail::
>>> repos = DataSource()
>>> repos.exists('www.google.com/index.html')
False
>>> repos.exists('http://www.google.com/index.html')
True
Temporary directories are deleted when the DataSource is deleted.
Examples
--------
::
>>> ds = DataSource('/home/guido')
>>> urlname = 'http://www.google.com/index.html'
>>> gfile = ds.open('http://www.google.com/index.html') # remote file
>>> ds.abspath(urlname)
'/home/guido/www.google.com/site/index.html'
>>> ds = DataSource(None) # use with temporary file
>>> ds.open('/home/guido/foobar.txt')
<open file '/home/guido.foobar.txt', mode 'r' at 0x91d4430>
>>> ds.abspath('/home/guido/foobar.txt')
'/tmp/tmpy4pgsP/home/guido/foobar.txt'
"""
def __init__(self, destpath=os.curdir):
"""Create a DataSource with a local path at destpath."""
if destpath:
self._destpath = os.path.abspath(destpath)
self._istmpdest = False
else:
import tempfile # deferring import to improve startup time
self._destpath = tempfile.mkdtemp()
self._istmpdest = True
def __del__(self):
# Remove temp directories
if self._istmpdest:
shutil.rmtree(self._destpath)
def _iszip(self, filename):
"""Test if the filename is a zip file by looking at the file extension.
"""
fname, ext = os.path.splitext(filename)
return ext in _file_openers.keys()
def _iswritemode(self, mode):
"""Test if the given mode will open a file for writing."""
# Currently only used to test the bz2 files.
_writemodes = ("w", "+")
for c in mode:
if c in _writemodes:
return True
return False
def _splitzipext(self, filename):
"""Split zip extension from filename and return filename.
*Returns*:
base, zip_ext : {tuple}
"""
if self._iszip(filename):
return os.path.splitext(filename)
else:
return filename, None
def _possible_names(self, filename):
"""Return a tuple containing compressed filename variations."""
names = [filename]
if not self._iszip(filename):
for zipext in _file_openers.keys():
if zipext:
names.append(filename+zipext)
return names
def _isurl(self, path):
"""Test if path is a net location. Tests the scheme and netloc."""
# We do this here to reduce the 'import numpy' initial import time.
if sys.version_info[0] >= 3:
from urllib.parse import urlparse
else:
from urlparse import urlparse
# BUG : URLs require a scheme string ('http://') to be used.
# www.google.com will fail.
# Should we prepend the scheme for those that don't have it and
# test that also? Similar to the way we append .gz and test for
# for compressed versions of files.
scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
return bool(scheme and netloc)
def _cache(self, path):
"""Cache the file specified by path.
Creates a copy of the file in the datasource cache.
"""
# We import these here because importing urllib2 is slow and
# a significant fraction of numpy's total import time.
if sys.version_info[0] >= 3:
from urllib.request import urlopen
from urllib.error import URLError
else:
from urllib2 import urlopen
from urllib2 import URLError
upath = self.abspath(path)
# ensure directory exists
if not os.path.exists(os.path.dirname(upath)):
os.makedirs(os.path.dirname(upath))
# TODO: Doesn't handle compressed files!
if self._isurl(path):
try:
openedurl = urlopen(path)
f = _open(upath, 'wb')
try:
shutil.copyfileobj(openedurl, f)
finally:
f.close()
openedurl.close()
except URLError:
raise URLError("URL not found: %s" % path)
else:
shutil.copyfile(path, upath)
return upath
def _findfile(self, path):
"""Searches for ``path`` and returns full path if found.
If path is an URL, _findfile will cache a local copy and return the
path to the cached file. If path is a local file, _findfile will
return a path to that local file.
The search will include possible compressed versions of the file
and return the first occurrence found.
"""
# Build list of possible local file paths
if not self._isurl(path):
# Valid local paths
filelist = self._possible_names(path)
# Paths in self._destpath
filelist += self._possible_names(self.abspath(path))
else:
# Cached URLs in self._destpath
filelist = self._possible_names(self.abspath(path))
# Remote URLs
filelist = filelist + self._possible_names(path)
for name in filelist:
if self.exists(name):
if self._isurl(name):
name = self._cache(name)
return name
return None
def abspath(self, path):
"""
Return absolute path of file in the DataSource directory.
If `path` is an URL, then `abspath` will return either the location
the file exists locally or the location it would exist when opened
using the `open` method.
Parameters
----------
path : str
Can be a local file or a remote URL.
Returns
-------
out : str
Complete path, including the `DataSource` destination directory.
Notes
-----
The functionality is based on `os.path.abspath`.
"""
# We do this here to reduce the 'import numpy' initial import time.
if sys.version_info[0] >= 3:
from urllib.parse import urlparse
else:
from urlparse import urlparse
# TODO: This should be more robust. Handles case where path includes
# the destpath, but not other sub-paths. Failing case:
# path = /home/guido/datafile.txt
# destpath = /home/alex/
# upath = self.abspath(path)
# upath == '/home/alex/home/guido/datafile.txt'
# handle case where path includes self._destpath
splitpath = path.split(self._destpath, 2)
if len(splitpath) > 1:
path = splitpath[1]
scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
netloc = self._sanitize_relative_path(netloc)
upath = self._sanitize_relative_path(upath)
return os.path.join(self._destpath, netloc, upath)
def _sanitize_relative_path(self, path):
"""Return a sanitised relative path for which
os.path.abspath(os.path.join(base, path)).startswith(base)
"""
last = None
path = os.path.normpath(path)
while path != last:
last = path
# Note: os.path.join treats '/' as os.sep on Windows
path = path.lstrip(os.sep).lstrip('/')
path = path.lstrip(os.pardir).lstrip('..')
drive, path = os.path.splitdrive(path) # for Windows
return path
def exists(self, path):
"""
Test if path exists.
Test if `path` exists as (and in this order):
- a local file.
- a remote URL that has been downloaded and stored locally in the
`DataSource` directory.
- a remote URL that has not been downloaded, but is valid and
accessible.
Parameters
----------
path : str
Can be a local file or a remote URL.
Returns
-------
out : bool
True if `path` exists.
Notes
-----
When `path` is an URL, `exists` will return True if it's either
stored locally in the `DataSource` directory, or is a valid remote
URL. `DataSource` does not discriminate between the two, the file
is accessible if it exists in either location.
"""
# We import this here because importing urllib2 is slow and
# a significant fraction of numpy's total import time.
if sys.version_info[0] >= 3:
from urllib.request import urlopen
from urllib.error import URLError
else:
from urllib2 import urlopen
from urllib2 import URLError
# Test local path
if os.path.exists(path):
return True
# Test cached url
upath = self.abspath(path)
if os.path.exists(upath):
return True
# Test remote url
if self._isurl(path):
try:
netfile = urlopen(path)
netfile.close()
del(netfile)
return True
except URLError:
return False
return False
def open(self, path, mode='r'):
"""
Open and return file-like object.
If `path` is an URL, it will be downloaded, stored in the
`DataSource` directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open.
mode : {'r', 'w', 'a'}, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing,
'a' to append. Available modes depend on the type of object
specified by `path`. Default is 'r'.
Returns
-------
out : file object
File object.
"""
# TODO: There is no support for opening a file for writing which
# doesn't exist yet (creating a file). Should there be?
# TODO: Add a ``subdir`` parameter for specifying the subdirectory
# used to store URLs in self._destpath.
if self._isurl(path) and self._iswritemode(mode):
raise ValueError("URLs are not writeable")
# NOTE: _findfile will fail on a new file opened for writing.
found = self._findfile(path)
if found:
_fname, ext = self._splitzipext(found)
if ext == 'bz2':
mode.replace("+", "")
return _file_openers[ext](found, mode=mode)
else:
raise IOError("%s not found." % path)
class Repository (DataSource):
"""
Repository(baseurl, destpath='.')
A data repository where multiple DataSource's share a base
URL/directory.
`Repository` extends `DataSource` by prepending a base URL (or
directory) to all the files it handles. Use `Repository` when you will
be working with multiple files from one base URL. Initialize
`Repository` with the base URL, then refer to each file by its filename
only.
Parameters
----------
baseurl : str
Path to the local directory or remote location that contains the
data files.
destpath : str or None, optional
Path to the directory where the source file gets downloaded to for
use. If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
Examples
--------
To analyze all files in the repository, do something like this
(note: this is not self-contained code)::
>>> repos = np.lib._datasource.Repository('/home/user/data/dir/')
>>> for filename in filelist:
... fp = repos.open(filename)
... fp.analyze()
... fp.close()
Similarly you could use a URL for a repository::
>>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data')
"""
def __init__(self, baseurl, destpath=os.curdir):
"""Create a Repository with a shared url or directory of baseurl."""
DataSource.__init__(self, destpath=destpath)
self._baseurl = baseurl
def __del__(self):
DataSource.__del__(self)
def _fullpath(self, path):
"""Return complete path for path. Prepends baseurl if necessary."""
splitpath = path.split(self._baseurl, 2)
if len(splitpath) == 1:
result = os.path.join(self._baseurl, path)
else:
result = path # path contains baseurl already
return result
def _findfile(self, path):
"""Extend DataSource method to prepend baseurl to ``path``."""
return DataSource._findfile(self, self._fullpath(path))
def abspath(self, path):
"""
Return absolute path of file in the Repository directory.
If `path` is an URL, then `abspath` will return either the location
the file exists locally or the location it would exist when opened
using the `open` method.
Parameters
----------
path : str
Can be a local file or a remote URL. This may, but does not
have to, include the `baseurl` with which the `Repository` was
initialized.
Returns
-------
out : str
Complete path, including the `DataSource` destination directory.
"""
return DataSource.abspath(self, self._fullpath(path))
def exists(self, path):
"""
Test if path exists prepending Repository base URL to path.
Test if `path` exists as (and in this order):
- a local file.
- a remote URL that has been downloaded and stored locally in the
`DataSource` directory.
- a remote URL that has not been downloaded, but is valid and
accessible.
Parameters
----------
path : str
Can be a local file or a remote URL. This may, but does not
have to, include the `baseurl` with which the `Repository` was
initialized.
Returns
-------
out : bool
True if `path` exists.
Notes
-----
When `path` is an URL, `exists` will return True if it's either
stored locally in the `DataSource` directory, or is a valid remote
URL. `DataSource` does not discriminate between the two, the file
is accessible if it exists in either location.
"""
return DataSource.exists(self, self._fullpath(path))
def open(self, path, mode='r'):
"""
Open and return file-like object prepending Repository base URL.
If `path` is an URL, it will be downloaded, stored in the
DataSource directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open. This may, but does not have to,
include the `baseurl` with which the `Repository` was
initialized.
mode : {'r', 'w', 'a'}, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing,
'a' to append. Available modes depend on the type of object
specified by `path`. Default is 'r'.
Returns
-------
out : file object
File object.
"""
return DataSource.open(self, self._fullpath(path), mode)
def listdir(self):
"""
List files in the source Repository.
Returns
-------
files : list of str
List of file names (not containing a directory part).
Notes
-----
Does not currently work for remote repositories.
"""
if self._isurl(self._baseurl):
raise NotImplementedError(
"Directory listing of URLs, not supported yet.")
else:
return os.listdir(self._baseurl)
| bsd-3-clause |
lightcn/odoo | addons/account_asset/__openerp__.py | 314 | 2182 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Assets Management',
'version': '1.0',
'depends': ['account'],
'author': 'OpenERP S.A.',
'description': """
Financial and accounting asset management.
==========================================
This Module manages the assets owned by a company or an individual. It will keep
track of depreciation's occurred on those assets. And it allows to create Move's
of the depreciation lines.
""",
'website': 'https://www.odoo.com/page/accounting',
'category': 'Accounting & Finance',
'sequence': 32,
'demo': [ 'account_asset_demo.xml'],
'test': [
'test/account_asset_demo.yml',
'test/account_asset.yml',
'test/account_asset_wizard.yml',
],
'data': [
'security/account_asset_security.xml',
'security/ir.model.access.csv',
'wizard/account_asset_change_duration_view.xml',
'wizard/wizard_asset_compute_view.xml',
'account_asset_view.xml',
'account_asset_invoice_view.xml',
'report/account_asset_report_view.xml',
],
'auto_install': False,
'installable': True,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
simone/django-gb | django/conf/locale/lt/formats.py | 82 | 1834 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'Y \m. E j \d.'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = r'Y \m. E j \d., H:i:s'
YEAR_MONTH_FORMAT = r'Y \m. F'
MONTH_DAY_FORMAT = r'E j \d.'
SHORT_DATE_FORMAT = 'Y-m-d'
SHORT_DATETIME_FORMAT = 'Y-m-d H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
'%H.%M.%S', # '14.30.59'
'%H.%M.%S.%f', # '14.30.59.000200'
'%H.%M', # '14.30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y %H.%M.%S', # '25.10.06 14.30.59'
'%d.%m.%y %H.%M.%S.%f', # '25.10.06 14.30.59.000200'
'%d.%m.%y %H.%M', # '25.10.06 14.30'
'%d.%m.%y', # '25.10.06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
jptomo/rpython-lang-scheme | rpython/translator/exceptiontransform.py | 2 | 22163 | from rpython.translator.simplify import join_blocks, cleanup_graph
from rpython.translator.unsimplify import varoftype
from rpython.translator.unsimplify import insert_empty_block, split_block
from rpython.translator.backendopt import canraise, inline
from rpython.flowspace.model import Block, Constant, Variable, Link, \
SpaceOperation, FunctionGraph, mkentrymap
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
from rpython.rtyper.lltypesystem import lloperation
from rpython.rtyper.rclass import ll_inst_type
from rpython.rtyper import rtyper
from rpython.rtyper.rmodel import inputconst
from rpython.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong
from rpython.rlib.rarithmetic import r_singlefloat, r_longfloat
from rpython.rlib.debug import ll_assert
from rpython.rtyper.llannotation import lltype_to_annotation
from rpython.rtyper.annlowlevel import MixLevelHelperAnnotator
from rpython.tool.sourcetools import func_with_new_name
PrimitiveErrorValue = {lltype.Signed: -1,
lltype.Unsigned: r_uint(-1),
lltype.SignedLongLong: r_longlong(-1),
lltype.UnsignedLongLong: r_ulonglong(-1),
lltype.Float: -1.0,
lltype.SingleFloat: r_singlefloat(-1.0),
lltype.LongFloat: r_longfloat(-1.0),
lltype.Char: chr(255),
lltype.UniChar: unichr(0xFFFF), # XXX is this always right?
lltype.Bool: True,
llmemory.Address: llmemory.NULL,
lltype.Void: None}
for TYPE in rffi.NUMBER_TYPES:
PrimitiveErrorValue[TYPE] = lltype.cast_primitive(TYPE, -1)
del TYPE
def error_value(T):
if isinstance(T, lltype.Primitive):
return PrimitiveErrorValue[T]
elif isinstance(T, lltype.Ptr):
return lltype.nullptr(T.TO)
assert 0, "not implemented yet"
def error_constant(T):
return Constant(error_value(T), T)
def constant_value(llvalue):
return Constant(llvalue, lltype.typeOf(llvalue))
class ExceptionTransformer(object):
def __init__(self, translator):
self.translator = translator
self.raise_analyzer = canraise.RaiseAnalyzer(translator)
edata = translator.rtyper.exceptiondata
self.lltype_of_exception_value = edata.lltype_of_exception_value
self.lltype_of_exception_type = edata.lltype_of_exception_type
self.mixlevelannotator = MixLevelHelperAnnotator(translator.rtyper)
exc_data, null_type, null_value = self.setup_excdata()
(assertion_error_ll_exc_type,
assertion_error_ll_exc) = self.get_builtin_exception(AssertionError)
(n_i_error_ll_exc_type,
n_i_error_ll_exc) = self.get_builtin_exception(NotImplementedError)
self.c_assertion_error_ll_exc_type = constant_value(
assertion_error_ll_exc_type)
self.c_n_i_error_ll_exc_type = constant_value(n_i_error_ll_exc_type)
def rpyexc_occured():
exc_type = exc_data.exc_type
return bool(exc_type)
def rpyexc_fetch_type():
return exc_data.exc_type
def rpyexc_fetch_value():
return exc_data.exc_value
def rpyexc_clear():
exc_data.exc_type = null_type
exc_data.exc_value = null_value
def rpyexc_raise(etype, evalue):
# When compiling in debug mode, the following ll_asserts will
# crash the program as soon as it raises AssertionError or
# NotImplementedError. Useful when you are in a debugger.
# When compiling in release mode, AssertionErrors and
# NotImplementedErrors are raised normally, and only later
# caught by debug_catch_exception and printed, which allows
# us to see at least part of the traceback for them.
ll_assert(etype != assertion_error_ll_exc_type, "AssertionError")
ll_assert(etype != n_i_error_ll_exc_type, "NotImplementedError")
exc_data.exc_type = etype
exc_data.exc_value = evalue
lloperation.llop.debug_start_traceback(lltype.Void, etype)
def rpyexc_reraise(etype, evalue):
exc_data.exc_type = etype
exc_data.exc_value = evalue
lloperation.llop.debug_reraise_traceback(lltype.Void, etype)
def rpyexc_fetch_exception():
evalue = rpyexc_fetch_value()
rpyexc_clear()
return evalue
def rpyexc_restore_exception(evalue):
if evalue:
exc_data.exc_type = ll_inst_type(evalue)
exc_data.exc_value = evalue
self.rpyexc_occured_ptr = self.build_func(
"RPyExceptionOccurred",
rpyexc_occured,
[], lltype.Bool)
self.rpyexc_fetch_type_ptr = self.build_func(
"RPyFetchExceptionType",
rpyexc_fetch_type,
[], self.lltype_of_exception_type)
self.rpyexc_fetch_value_ptr = self.build_func(
"RPyFetchExceptionValue",
rpyexc_fetch_value,
[], self.lltype_of_exception_value)
self.rpyexc_clear_ptr = self.build_func(
"RPyClearException",
rpyexc_clear,
[], lltype.Void)
self.rpyexc_raise_ptr = self.build_func(
"RPyRaiseException",
self.noinline(rpyexc_raise),
[self.lltype_of_exception_type, self.lltype_of_exception_value],
lltype.Void,
jitcallkind='rpyexc_raise') # for the JIT
self.rpyexc_reraise_ptr = self.build_func(
"RPyReRaiseException",
rpyexc_reraise,
[self.lltype_of_exception_type, self.lltype_of_exception_value],
lltype.Void,
jitcallkind='rpyexc_raise') # for the JIT
self.rpyexc_fetch_exception_ptr = self.build_func(
"RPyFetchException",
rpyexc_fetch_exception,
[], self.lltype_of_exception_value)
self.rpyexc_restore_exception_ptr = self.build_func(
"RPyRestoreException",
self.noinline(rpyexc_restore_exception),
[self.lltype_of_exception_value], lltype.Void)
self.build_extra_funcs()
self.mixlevelannotator.finish()
self.lltype_to_classdef = translator.rtyper.lltype_to_classdef_mapping()
def noinline(self, fn):
fn = func_with_new_name(fn, fn.__name__)
fn._dont_inline_ = True
return fn
def build_func(self, name, fn, inputtypes, rettype, **kwds):
l2a = lltype_to_annotation
graph = self.mixlevelannotator.getgraph(fn, map(l2a, inputtypes), l2a(rettype))
return self.constant_func(name, inputtypes, rettype, graph,
exception_policy="exc_helper", **kwds)
def get_builtin_exception(self, Class):
edata = self.translator.rtyper.exceptiondata
bk = self.translator.annotator.bookkeeper
error_def = bk.getuniqueclassdef(Class)
error_ll_exc = edata.get_standard_ll_exc_instance(
self.translator.rtyper, error_def)
error_ll_exc_type = ll_inst_type(error_ll_exc)
return error_ll_exc_type, error_ll_exc
def transform_completely(self):
for graph in self.translator.graphs:
self.create_exception_handling(graph)
def create_exception_handling(self, graph):
"""After an exception in a direct_call (or indirect_call), that is not caught
by an explicit
except statement, we need to reraise the exception. So after this
direct_call we need to test if an exception had occurred. If so, we return
from the current graph with a special value (False/-1/-1.0/null).
Because of the added exitswitch we need an additional block.
"""
if hasattr(graph, 'exceptiontransformed'):
assert self.same_obj(self.exc_data_ptr, graph.exceptiontransformed)
return
else:
self.raise_analyzer.analyze_direct_call(graph)
graph.exceptiontransformed = self.exc_data_ptr
join_blocks(graph)
# collect the blocks before changing them
n_need_exc_matching_blocks = 0
n_gen_exc_checks = 0
#
entrymap = mkentrymap(graph)
if graph.exceptblock in entrymap:
for link in entrymap[graph.exceptblock]:
self.transform_jump_to_except_block(graph, entrymap, link)
#
for block in list(graph.iterblocks()):
self.replace_fetch_restore_operations(block)
need_exc_matching, gen_exc_checks = self.transform_block(graph, block)
n_need_exc_matching_blocks += need_exc_matching
n_gen_exc_checks += gen_exc_checks
cleanup_graph(graph)
return n_need_exc_matching_blocks, n_gen_exc_checks
def replace_fetch_restore_operations(self, block):
# the gctransformer will create these operations. It looks as if the
# order of transformations is important - but the gctransformer will
# put them in a new graph, so all transformations will run again.
for i in range(len(block.operations)):
opname = block.operations[i].opname
if opname == 'gc_fetch_exception':
block.operations[i].opname = "direct_call"
block.operations[i].args = [self.rpyexc_fetch_exception_ptr]
elif opname == 'gc_restore_exception':
block.operations[i].opname = "direct_call"
block.operations[i].args.insert(0, self.rpyexc_restore_exception_ptr)
elif opname == 'get_exception_addr': # only for lltype
block.operations[i].opname = "direct_call"
block.operations[i].args.insert(0, self.rpyexc_get_exception_addr_ptr)
elif opname == 'get_exc_value_addr': # only for lltype
block.operations[i].opname = "direct_call"
block.operations[i].args.insert(0, self.rpyexc_get_exc_value_addr_ptr)
def transform_block(self, graph, block):
need_exc_matching = False
n_gen_exc_checks = 0
if block is graph.exceptblock:
return need_exc_matching, n_gen_exc_checks
elif block is graph.returnblock:
return need_exc_matching, n_gen_exc_checks
last_operation = len(block.operations) - 1
if block.canraise:
need_exc_matching = True
last_operation -= 1
elif (len(block.exits) == 1 and
block.exits[0].target is graph.returnblock and
len(block.operations) and
(block.exits[0].args[0].concretetype is lltype.Void or
block.exits[0].args[0] is block.operations[-1].result) and
block.operations[-1].opname not in ('malloc', 'malloc_varsize')): # special cases
last_operation -= 1
lastblock = block
for i in range(last_operation, -1, -1):
op = block.operations[i]
if not self.raise_analyzer.can_raise(op):
continue
splitlink = split_block(block, i+1)
afterblock = splitlink.target
if lastblock is block:
lastblock = afterblock
self.gen_exc_check(block, graph.returnblock, afterblock)
n_gen_exc_checks += 1
if need_exc_matching:
assert lastblock.canraise
if not self.raise_analyzer.can_raise(lastblock.operations[-1]):
lastblock.exitswitch = None
lastblock.recloseblock(lastblock.exits[0])
lastblock.exits[0].exitcase = None
else:
self.insert_matching(lastblock, graph)
return need_exc_matching, n_gen_exc_checks
def comes_from_last_exception(self, entrymap, link):
seen = set()
pending = [(link, link.args[1])]
while pending:
link, v = pending.pop()
if (link, v) in seen:
continue
seen.add((link, v))
if link.last_exc_value is not None and v is link.last_exc_value:
return True
block = link.prevblock
if block is None:
continue
for op in block.operations[::-1]:
if v is op.result:
if op.opname == 'cast_pointer':
v = op.args[0]
else:
break
for link in entrymap.get(block, ()):
for v1, v2 in zip(link.args, block.inputargs):
if v2 is v:
pending.append((link, v1))
return False
def transform_jump_to_except_block(self, graph, entrymap, link):
reraise = self.comes_from_last_exception(entrymap, link)
result = Variable()
result.concretetype = lltype.Void
block = Block([v.copy() for v in graph.exceptblock.inputargs])
if reraise:
block.operations = [
SpaceOperation("direct_call",
[self.rpyexc_reraise_ptr] + block.inputargs,
result),
]
else:
block.operations = [
SpaceOperation("direct_call",
[self.rpyexc_raise_ptr] + block.inputargs,
result),
SpaceOperation('debug_record_traceback', [],
varoftype(lltype.Void)),
]
link.target = block
RETTYPE = graph.returnblock.inputargs[0].concretetype
l = Link([error_constant(RETTYPE)], graph.returnblock)
block.recloseblock(l)
def insert_matching(self, block, graph):
proxygraph, op = self.create_proxy_graph(block.operations[-1])
block.operations[-1] = op
#non-exception case
block.exits[0].exitcase = block.exits[0].llexitcase = None
# use the dangerous second True flag :-)
inliner = inline.OneShotInliner(
self.translator, graph, self.lltype_to_classdef,
inline_guarded_calls=True, inline_guarded_calls_no_matter_what=True,
raise_analyzer=self.raise_analyzer)
inliner.inline_once(block, len(block.operations)-1)
#block.exits[0].exitcase = block.exits[0].llexitcase = False
def create_proxy_graph(self, op):
""" creates a graph which calls the original function, checks for
raised exceptions, fetches and then raises them again. If this graph is
inlined, the correct exception matching blocks are produced."""
# XXX slightly annoying: construct a graph by hand
# but better than the alternative
result = op.result.copy()
opargs = []
inputargs = []
callargs = []
ARGTYPES = []
for var in op.args:
if isinstance(var, Variable):
v = Variable()
v.concretetype = var.concretetype
inputargs.append(v)
opargs.append(v)
callargs.append(var)
ARGTYPES.append(var.concretetype)
else:
opargs.append(var)
newop = SpaceOperation(op.opname, opargs, result)
startblock = Block(inputargs)
startblock.operations.append(newop)
newgraph = FunctionGraph("dummy_exc1", startblock)
startblock.closeblock(Link([result], newgraph.returnblock))
newgraph.returnblock.inputargs[0].concretetype = op.result.concretetype
self.gen_exc_check(startblock, newgraph.returnblock)
excblock = Block([])
llops = rtyper.LowLevelOpList(None)
var_value = self.gen_getfield('exc_value', llops)
var_type = self.gen_getfield('exc_type' , llops)
#
c_check1 = self.c_assertion_error_ll_exc_type
c_check2 = self.c_n_i_error_ll_exc_type
llops.genop('debug_catch_exception', [var_type, c_check1, c_check2])
#
self.gen_setfield('exc_value', self.c_null_evalue, llops)
self.gen_setfield('exc_type', self.c_null_etype, llops)
excblock.operations[:] = llops
newgraph.exceptblock.inputargs[0].concretetype = self.lltype_of_exception_type
newgraph.exceptblock.inputargs[1].concretetype = self.lltype_of_exception_value
excblock.closeblock(Link([var_type, var_value], newgraph.exceptblock))
startblock.exits[True].target = excblock
startblock.exits[True].args = []
fptr = self.constant_func("dummy_exc1", ARGTYPES, op.result.concretetype, newgraph)
return newgraph, SpaceOperation("direct_call", [fptr] + callargs, op.result)
def gen_exc_check(self, block, returnblock, normalafterblock=None):
llops = rtyper.LowLevelOpList(None)
spaceop = block.operations[-1]
alloc_shortcut = self.check_for_alloc_shortcut(spaceop)
if alloc_shortcut:
var_no_exc = self.gen_nonnull(spaceop.result, llops)
else:
v_exc_type = self.gen_getfield('exc_type', llops)
var_no_exc = self.gen_isnull(v_exc_type, llops)
#
# We could add a "var_no_exc is likely true" hint, but it seems
# not to help, so it was commented out again.
#var_no_exc = llops.genop('likely', [var_no_exc], lltype.Bool)
block.operations.extend(llops)
block.exitswitch = var_no_exc
#exception occurred case
b = Block([])
b.operations = [SpaceOperation('debug_record_traceback', [],
varoftype(lltype.Void))]
l = Link([error_constant(returnblock.inputargs[0].concretetype)], returnblock)
b.closeblock(l)
l = Link([], b)
l.exitcase = l.llexitcase = False
#non-exception case
l0 = block.exits[0]
l0.exitcase = l0.llexitcase = True
block.recloseblock(l0, l)
insert_zeroing_op = False
if spaceop.opname in ['malloc','malloc_varsize']:
flavor = spaceop.args[1].value['flavor']
if flavor == 'gc':
insert_zeroing_op = True
true_zero = spaceop.args[1].value.get('zero', False)
# NB. when inserting more special-cases here, keep in mind that
# you also need to list the opnames in transform_block()
# (see "special cases")
if insert_zeroing_op:
if normalafterblock is None:
normalafterblock = insert_empty_block(l0)
v_result = spaceop.result
if v_result in l0.args:
result_i = l0.args.index(v_result)
v_result_after = normalafterblock.inputargs[result_i]
else:
v_result_after = v_result.copy()
l0.args.append(v_result)
normalafterblock.inputargs.append(v_result_after)
if true_zero:
opname = "zero_everything_inside"
else:
opname = "zero_gc_pointers_inside"
normalafterblock.operations.insert(
0, SpaceOperation(opname, [v_result_after],
varoftype(lltype.Void)))
def setup_excdata(self):
EXCDATA = lltype.Struct('ExcData',
('exc_type', self.lltype_of_exception_type),
('exc_value', self.lltype_of_exception_value))
self.EXCDATA = EXCDATA
exc_data = lltype.malloc(EXCDATA, immortal=True)
null_type = lltype.nullptr(self.lltype_of_exception_type.TO)
null_value = lltype.nullptr(self.lltype_of_exception_value.TO)
self.exc_data_ptr = exc_data
self.cexcdata = Constant(exc_data, lltype.Ptr(self.EXCDATA))
self.c_null_etype = Constant(null_type, self.lltype_of_exception_type)
self.c_null_evalue = Constant(null_value, self.lltype_of_exception_value)
return exc_data, null_type, null_value
def constant_func(self, name, inputtypes, rettype, graph, **kwds):
FUNC_TYPE = lltype.FuncType(inputtypes, rettype)
fn_ptr = lltype.functionptr(FUNC_TYPE, name, graph=graph, **kwds)
return Constant(fn_ptr, lltype.Ptr(FUNC_TYPE))
def gen_getfield(self, name, llops):
c_name = inputconst(lltype.Void, name)
return llops.genop('getfield', [self.cexcdata, c_name],
resulttype = getattr(self.EXCDATA, name))
def gen_setfield(self, name, v_value, llops):
c_name = inputconst(lltype.Void, name)
llops.genop('setfield', [self.cexcdata, c_name, v_value])
def gen_isnull(self, v, llops):
return llops.genop('ptr_iszero', [v], lltype.Bool)
def gen_nonnull(self, v, llops):
return llops.genop('ptr_nonzero', [v], lltype.Bool)
def same_obj(self, ptr1, ptr2):
return ptr1._same_obj(ptr2)
def check_for_alloc_shortcut(self, spaceop):
if spaceop.opname in ('malloc', 'malloc_varsize'):
return True
elif spaceop.opname == 'direct_call':
fnobj = spaceop.args[0].value._obj
if hasattr(fnobj, '_callable'):
oopspec = getattr(fnobj._callable, 'oopspec', None)
if oopspec and oopspec == 'newlist(length)':
return True
return False
def build_extra_funcs(self):
EXCDATA = self.EXCDATA
exc_data = self.exc_data_ptr
def rpyexc_get_exception_addr():
return (llmemory.cast_ptr_to_adr(exc_data) +
llmemory.offsetof(EXCDATA, 'exc_type'))
def rpyexc_get_exc_value_addr():
return (llmemory.cast_ptr_to_adr(exc_data) +
llmemory.offsetof(EXCDATA, 'exc_value'))
self.rpyexc_get_exception_addr_ptr = self.build_func(
"RPyGetExceptionAddr",
rpyexc_get_exception_addr,
[], llmemory.Address)
self.rpyexc_get_exc_value_addr_ptr = self.build_func(
"RPyGetExcValueAddr",
rpyexc_get_exc_value_addr,
[], llmemory.Address)
| mit |
dkubiak789/odoo | addons/hr_recruitment/wizard/__init__.py | 381 | 1095 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-Today OpenERP (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_recruitment_create_partner_job
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Cybernetic1/bookcam | crop-fixed.py | 1 | 5190 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
## Automatically crop book margins
## ================================================================
## Use SimpleCV
from SimpleCV import *
from math import sqrt
import sys
import numpy as np
from subprocess import call
import cv
import cv2
standard_height = 1600.0 #1430.0
standard_width = 1200.0 #970.0
print "Standard width = ", standard_width
print "Standard height = ", standard_height
# Create preview window
cv2.namedWindow('preview', 0)
# cv2.resizeWindow('preview', standard_width, standard_height)
if len(sys.argv) == 1:
print "\nUsage:"
print "To crop one file use: (default out_file = 'test.png')"
print " crop -1 image_file [out_file]"
print "To crop a list of images:"
print " crop files_list"
# print "where L/R specifies which side is the 'book' side"
print "\nDuring preview: press 'f' to record failure"
print " 'd' to delete current file"
print " any other key to accept\n"
# print "where 'left/right' specifies which side is the 'variable' side."
exit()
f1 = open('remainder_list', 'a')
f2 = open('failure_list', 'a')
## !!!!!!!!!!! Notice that right = 0 and left = 1 !!!!!!!!!!!
# *********** This part is not working **************
#if sys.argv[1] == "-1":
#print "This function is not working"
#exit()
#img0 = Image(sys.argv[2])
#while True:
#old_width = img0.width
#old_height = img0.height
#img1 = crop(img0)
#img0 = img1
#if old_width == img0.width and old_height == img0.height:
#break
#img1 = resize(img0, bookside)
#if sys.argv[3] is None:
#img1.save("test.png")
#else:
#img1.save(sys.argv[3])
## img1 = Image("test.jpg")
## raw_input("Press Enter to continue...")
#exit()
############################ New Workflow ############################
# -- allow users to choose margins and try again
# -- backspace to re-examine previous files?
# right left top bottom
crop = [20, 20, 20, 20]
print "crop right/left/top/bottom = ", crop
with open(sys.argv[1]) as f:
files = f.readlines()
skip = False
i = 0
while i < len(files):
fname = files[i]
i += 1
fname1 = fname.rstrip() # '\n' is included in fname
if skip:
f1.write(fname1 + '\n')
print "skipping: " + fname1
continue
print "***** Processing: " + fname1
print " backspace = previous, d = delete, f = record failure, esc = skip rest"
print " arrows = shrink, shift arrows = expand, ctrl arrows = fast shrink"
print " any other key = accept"
while True:
#img0 = Image(fname1)
# bookside = find_bookside(img0)
img0 = cv2.imread(fname1, 0)
old_height, old_width = img0.shape
# preview original image with red frame
# draw red frame
cv2.rectangle(img0, (crop[1], crop[2]), (old_width - crop[0], old_height - crop[3]), (0,0,0), 1)
cv2.imshow('preview', img0)
# ask for key and possibly redraw red frame
key = cv2.waitKey(0)
if key == 65363: # right
crop[0] += 1
elif key == 65361: # left
crop[1] += 1
elif key == 65362: # top
crop[2] += 1
elif key == 65364: # bottom
crop[3] += 1
elif key == 65363 + 262144: # right
crop[0] += 10
elif key == 65361 + 262144: # left
crop[1] += 10
elif key == 65362 + 262144: # top
crop[2] += 10
elif key == 65364 + 262144: # bottom
crop[3] += 10
elif key == 65363 + 65536: # Right
crop[0] -= 1
elif key == 65361 + 65536: # Left
crop[1] -= 1
elif key == 65362 + 65536: # Top
crop[2] -= 1
elif key == 65364 + 65536: # Bottom
crop[3] -= 1
elif key == ord('0'): # full view crop
crop = [0, 0, 0, 0]
elif key == ord('f'): # record failure
f2.write(fname)
print "failed: " + fname
call(["beep", "-f300"])
# delete file?
break
elif key == ord('d'): # delete
os.remove(fname1)
print " deleted: " + fname
call(["beep", "-f300"])
break
elif key == 65288: # Backspace = go to previous image
i -= 2
fname = files[i]
fname1 = fname.rstrip() # '\n' is included in fname
print " new image index = " + str(i)
elif key == ord('o'): # set options
print " Examining book sides (left, right, top, bottom) = ", \
[left_percent, right_percent, top_percent, bottom_percent]
print " Color sum threshold = ", sum_threshold
ans = raw_input(" input new parameters (threshold, L, R, T, B): ")
ans2 = ans.split(',')
sum_threshold = int(ans2[0])
left_percent = float(ans2[1])
right_percent = float(ans2[2])
top_percent = float(ans2[3])
bottom_percent = float(ans2[4])
i -= 1 # remain at current index
elif key == 27 or key == ord('q'): # quit
skip = True # set flag to skip remainders
break
elif key < 256: #accept
# do the real cropping and resize
img1 = img0[crop[2] : old_height - crop[3], crop[1] : old_width - crop[0]]
img0 = cv2.resize(img1, (int(standard_width), int(standard_height)))
new_name = "3" + fname1[1:] # add filename prefix with '1'
cv2.imwrite(new_name, img0)
print " saved image: " + new_name
call(["beep", "-l30", "-f2500"])
break
f1.close()
f2.close()
exit()
| apache-2.0 |
MakeHer/edx-platform | lms/djangoapps/mobile_api/test_milestones.py | 80 | 5550 | """
Milestone related tests for the mobile_api
"""
from mock import patch
from courseware.access_response import MilestoneError
from courseware.tests.helpers import get_request_for_user
from courseware.tests.test_entrance_exam import answer_entrance_exam_problem, add_entrance_exam_milestone
from util.milestones_helpers import (
add_prerequisite_course,
fulfill_course_milestone,
seed_milestone_relationship_types,
)
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
class MobileAPIMilestonesMixin(object):
"""
Tests the Mobile API decorators for milestones.
The two milestones currently supported in these tests are entrance exams and
pre-requisite courses. If either of these milestones are unfulfilled,
the mobile api will appropriately block content until the milestone is
fulfilled.
"""
ALLOW_ACCESS_TO_MILESTONE_COURSE = False # pylint: disable=invalid-name
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_PREREQUISITE_COURSES': True, 'MILESTONES_APP': True})
def test_unfulfilled_prerequisite_course(self):
""" Tests the case for an unfulfilled pre-requisite course """
self._add_prerequisite_course()
self.init_course_access()
self._verify_unfulfilled_milestone_response()
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_PREREQUISITE_COURSES': True, 'MILESTONES_APP': True})
def test_unfulfilled_prerequisite_course_for_staff(self):
self._add_prerequisite_course()
self.user.is_staff = True
self.user.save()
self.init_course_access()
self.api_response()
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_PREREQUISITE_COURSES': True, 'MILESTONES_APP': True})
def test_fulfilled_prerequisite_course(self):
"""
Tests the case when a user fulfills existing pre-requisite course
"""
self._add_prerequisite_course()
add_prerequisite_course(self.course.id, self.prereq_course.id)
fulfill_course_milestone(self.prereq_course.id, self.user)
self.init_course_access()
self.api_response()
@patch.dict('django.conf.settings.FEATURES', {'ENTRANCE_EXAMS': True, 'MILESTONES_APP': True})
def test_unpassed_entrance_exam(self):
"""
Tests the case where the user has not passed the entrance exam
"""
self._add_entrance_exam()
self.init_course_access()
self._verify_unfulfilled_milestone_response()
@patch.dict('django.conf.settings.FEATURES', {'ENTRANCE_EXAMS': True, 'MILESTONES_APP': True})
def test_unpassed_entrance_exam_for_staff(self):
self._add_entrance_exam()
self.user.is_staff = True
self.user.save()
self.init_course_access()
self.api_response()
@patch.dict('django.conf.settings.FEATURES', {'ENTRANCE_EXAMS': True, 'MILESTONES_APP': True})
def test_passed_entrance_exam(self):
"""
Tests access when user has passed the entrance exam
"""
self._add_entrance_exam()
self._pass_entrance_exam()
self.init_course_access()
self.api_response()
def _add_entrance_exam(self):
""" Sets up entrance exam """
seed_milestone_relationship_types()
self.course.entrance_exam_enabled = True
self.entrance_exam = ItemFactory.create( # pylint: disable=attribute-defined-outside-init
parent=self.course,
category="chapter",
display_name="Entrance Exam Chapter",
is_entrance_exam=True,
in_entrance_exam=True
)
self.problem_1 = ItemFactory.create( # pylint: disable=attribute-defined-outside-init
parent=self.entrance_exam,
category='problem',
display_name="The Only Exam Problem",
graded=True,
in_entrance_exam=True
)
add_entrance_exam_milestone(self.course, self.entrance_exam)
self.course.entrance_exam_minimum_score_pct = 0.50
self.course.entrance_exam_id = unicode(self.entrance_exam.location)
modulestore().update_item(self.course, self.user.id)
def _add_prerequisite_course(self):
""" Helper method to set up the prerequisite course """
seed_milestone_relationship_types()
self.prereq_course = CourseFactory.create() # pylint: disable=attribute-defined-outside-init
add_prerequisite_course(self.course.id, self.prereq_course.id)
def _pass_entrance_exam(self):
""" Helper function to pass the entrance exam """
request = get_request_for_user(self.user)
answer_entrance_exam_problem(self.course, request, self.problem_1)
def _verify_unfulfilled_milestone_response(self):
"""
Verifies the response depending on ALLOW_ACCESS_TO_MILESTONE_COURSE
Since different endpoints will have different behaviours towards milestones,
setting ALLOW_ACCESS_TO_MILESTONE_COURSE (default is False) to True, will
not return a 404. For example, when getting a list of courses a user is
enrolled in, although a user may have unfulfilled milestones, the course
should still show up in the course enrollments list.
"""
if self.ALLOW_ACCESS_TO_MILESTONE_COURSE:
self.api_response()
else:
response = self.api_response(expected_response_code=404)
self.assertEqual(response.data, MilestoneError().to_json())
| agpl-3.0 |
kaideyi/KDYSample | kYPython/Scrapy/zhihu/zhihu/items.py | 1 | 1897 | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy import Item, Field
class ZhihuItem(scrapy.Item):
# define the fields for your item here like:
id = Field()
name = Field()
account_status = Field()
allow_message= Field()
answer_count = Field()
articles_count = Field()
avatar_hue = Field()
avatar_url = Field()
avatar_url_template = Field()
badge = Field()
business = Field()
employments = Field()
columns_count = Field()
commercial_question_count = Field()
cover_url = Field()
description = Field()
educations = Field()
favorite_count = Field()
favorited_count = Field()
follower_count = Field()
following_columns_count = Field()
following_favlists_count = Field()
following_question_count = Field()
following_topic_count = Field()
gender = Field()
headline = Field()
hosted_live_count = Field()
is_active = Field()
is_bind_sina = Field()
is_blocked = Field()
is_advertiser = Field()
is_blocking = Field()
is_followed = Field()
is_following = Field()
is_force_renamed = Field()
is_privacy_protected = Field()
locations = Field()
is_org = Field()
type = Field()
url = Field()
url_token = Field()
user_type = Field()
logs_count = Field()
marked_answers_count = Field()
marked_answers_text = Field()
message_thread_token = Field()
mutual_followees_count = Field()
participated_live_count = Field()
pins_count = Field()
question_count = Field()
show_sina_weibo = Field()
thank_from_count = Field()
thank_to_count = Field()
thanked_count = Field()
type = Field()
vote_from_count = Field()
vote_to_count = Field()
voteup_count = Field()
| mit |
sg00dwin/origin | vendor/github.com/google/certificate-transparency/python/ct/cert_analysis/observation.py | 24 | 1620 | import logging
class Observation(object):
"""Describes certificate observation."""
def __init__(self, description, reason=None, details=None):
self.description = description
self.reason = reason
self.details = details
def __repr__(self):
return "%s(%s, %s, %s)" % (self.__class__.__name__,
repr(self.description),
repr(self.reason),
repr(self.details))
def __str__(self):
"""Returns observation representation as: description (reason) [details]
(if some field is unavailable then even brackets aren't returned)
"""
ret = self.description
if self.reason:
ret = "%s (%s)" % (ret, self.reason)
if self.details:
ret = "%s [%s]" % (ret, self._format_details())
return ret
def _format_details(self):
"""Convenience method, so it's easy to override how details have to be
printed without overriding whole __str__.
"""
try:
if isinstance(self.details, str):
return unicode(self.details, 'utf-8')
else:
return unicode(self.details)
except Exception as e:
logging.warning("Unprintable observation %r" % self.details)
return "UNPRINTABLE " + str(e)
def details_to_proto(self):
"""Specifies how details should be written to protobuf."""
if self.details:
return self._format_details().encode('utf-8')
else:
return ''
| apache-2.0 |
jml/flocker | flocker/acceptance/test_moving_applications.py | 3 | 2010 | # Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Tests for moving applications between nodes.
"""
from twisted.trial.unittest import TestCase
from .testtools import (require_cluster,
MONGO_APPLICATION, MONGO_IMAGE,
get_mongo_application, require_flocker_cli)
class MovingApplicationTests(TestCase):
"""
Tests for moving applications between nodes.
Similar to http://doc-dev.clusterhq.com/gettingstarted/tutorial/
moving-applications.html#moving-an-application
"""
@require_flocker_cli
@require_cluster(2)
def test_moving_application(self, cluster):
"""
After deploying an application to one node and then moving it onto
another node, it is only on the second node. This only tests that the
application is present with the given name and image on a second node
after it has been moved from the first.
"""
node_1, node_2 = cluster.nodes
minimal_deployment = {
u"version": 1,
u"nodes": {
node_1.reported_hostname: [MONGO_APPLICATION],
node_2.reported_hostname: [],
},
}
minimal_application = {
u"version": 1,
u"applications": {
MONGO_APPLICATION: {
u"image": MONGO_IMAGE,
},
},
}
cluster.flocker_deploy(self, minimal_deployment, minimal_application)
minimal_deployment_moved = {
u"version": 1,
u"nodes": {
node_1.reported_hostname: [],
node_2.reported_hostname: [MONGO_APPLICATION],
},
}
cluster.flocker_deploy(
self, minimal_deployment_moved, minimal_application)
return cluster.assert_expected_deployment(self, {
node_1.reported_hostname: set([]),
node_2.reported_hostname: set([get_mongo_application()])
})
| apache-2.0 |
dyninst/toolchain-origin | script/FeatGen.py | 1 | 3014 | import sys
import argparse
import os
from subprocess import *
compilerList = ["GCC", "ICC", "LLVM", "PGI"]
def getParameters():
parser = argparse.ArgumentParser(description='Extract function level code features for toolchain identification')
parser.add_argument("--filelist", help="A list of binaries to extract features", required=True)
parser.add_argument("--outputdir", help="The directory to store extracted features", required = True)
parser.add_argument("--idiom", help="Extract instruction idioms with specified sizes.")
parser.add_argument("--graphlet", help="Extract graphlets for functions")
parser.add_argument("--path_to_extract_bin", help="The installed binary for extracting features", required=True)
parser.add_argument("--thread", help="The number of threads for feature extraction", type=int, default=1)
args = parser.parse_args()
return args
def ParseFeatureSize(param):
if param == None:
return None
ret = []
for size in param.split(":"):
ret.append(int(size))
return ret
def Execute(featType, featSize, path, filename):
global featureDir
out = os.path.join(featureDir, "{0}.{1}.{2}".format(filename, featType, featSize))
cmd = "OMP_NUM_THREADS={0} ".format(args.thread)
cmd += "LD_PRELOAD=/home/xm13/dyninst-pp/install/lib/libtbbmalloc_proxy.so "
cmd += "{0} {1} {2} {3} {4}".format(args.path_to_extract_bin, path, featType, featSize, out)
print cmd
p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
msg, err = p.communicate()
if (len(err) > 0):
print "Error message:", err
def GenerateFeatures(featType, featSizes, path, filename):
if featSizes == None:
return
if featType == "libcall" or featType == "insns_freq":
Execute(featType, 1, path, filename)
else:
for size in featSizes:
Execute(featType, size, path, filename)
def BuildDirStructure(featRootDir, dirs):
curDir = os.getcwd()
os.chdir(featRootDir)
for d in dirs:
if not os.path.exists(d):
os.makedirs(d)
os.chdir(d)
os.chdir(curDir)
def GenerateDirPath(parts):
# the output directory should have the following structure
# featureRootDir/proj/compiler/version/opt
compiler = "None"
for i in range(len(parts)):
if parts[i] in compilerList:
compiler = parts[i]
version = parts[i+1]
opt = parts[i+2]
proj = parts[i-1]
break
assert(compiler != "None")
BuildDirStructure(args.outputdir, [proj, compiler, version, opt])
return os.path.join(args.outputdir, proj, compiler, version, opt)
args = getParameters()
idiomSizes = ParseFeatureSize(args.idiom)
graphletSizes = ParseFeatureSize(args.graphlet)
for line in open(args.filelist, "r"):
path = line[:-1]
parts = path.split("/")
featureDir = GenerateDirPath(parts)
filename = path.split("/")[-1]
GenerateFeatures("idiom", idiomSizes, path, filename)
GenerateFeatures("graphlet", graphletSizes, path, filename)
| lgpl-2.1 |
eblot/miscripts | Python/embeddev/btparse.py | 1 | 5918 | #!/usr/bin/env python2.7
# Parse and decode an ARM-EABI MAP file
from optparse import OptionParser
from subprocess import Popen, PIPE, STDOUT
import binascii
import re
import struct
import sys
demangle_cache = {}
def parse_map_file(mapf):
symbols = {}
text_section = False
symcre = re.compile(r'^\s+(0x[0-9a-f]+)\s+([a-z_]\w+)', re.IGNORECASE)
loccre = re.compile(r'^\s+\.text.*(0x[0-9a-f]+)\s+(0x[0-9a-f]+)\s+'
r'([^\(]*)(\(.*\))?', re.IGNORECASE)
for n, l in enumerate(mapf.xreadlines()):
n = n+1
l = l.strip('\n').strip('\r')
if l.startswith(' .text'):
# if not text_section:
# print "TEXT @ %d" % n
text_section = True
if len(l) > 16 and l[len(' .text')+1] != ' ':
continue
elif l.startswith(' .'):
# if text_section:
# print "END TEXT @ %d" % n
text_section = False
elif l.startswith(' *('):
# if text_section:
# print "END TEXT @ %d" % n
text_section = False
if not text_section:
continue
lmo = loccre.search(l)
smo = symcre.match(l)
# if not lmo and not smo:
# pass
# #print "Unknown line @ %d:\n\t'%s'" % (n+1, l)
if lmo:
address = int(lmo.group(1), 16)
symbols[address] = lmo.group(4)
# print 'LMO %d %s' % (n, symbols[address])
if smo:
address = int(smo.group(1), 16)
symbols[address] = smo.group(2)
# print "@ %08x : %s" % (address, symbols[address])
return symbols
def parse_abort_trace(tracef):
stkcre = re.compile(r'^([0-9a-f]{8}):\s+((?:[0-9a-f]{2}\s+){16})',
re.IGNORECASE)
regcre = re.compile(r'([A-Z]{2,3}):\s([0-9a-f]{8})', re.IGNORECASE)
stack = []
registers = {}
for n, l in enumerate(tracef.readlines()):
mo = stkcre.match(l)
if mo:
address = int(mo.group(1), 16)
data = mo.group(2).strip(' ').replace(' ', ' ')
bytes = data.split(' ')
while len(bytes):
word = binascii.unhexlify(''.join(bytes[0:4]))
bytes[:] = bytes[4:]
(value, ) = struct.unpack('<I', word)
stack.append(value)
if ',' in l:
for reg in l.strip('\n').strip('\r').split(','):
mo = regcre.match(reg.strip(' '))
if mo:
registers[mo.group(1).lower()] = \
int('0x%s' % mo.group(2), 16)
return reversed(stack), registers
def find_symbol(symbols, address):
symbol = None
for addr in sorted(symbols.keys()):
if addr > address:
return symbol
elif addr <= address:
symbol = symbols[addr]
return None
def demangle(symbol):
if not symbol:
return symbol
global demangle_cache
if symbol in demangle_cache:
return demangle_cache[symbol]
args = ['arm-eabi-c++filt', symbol]
demangle = symbol
try:
cppfilter = Popen(args, stdout=PIPE)
demangle = cppfilter.stdout.readlines()[0].strip('\n')
cppfilter.wait()
except:
pass
demangle_cache[symbol] = demangle
return demangle
def decode_stack(symbols, stack):
print "Decoded stack:"
for address in stack:
symbol = find_symbol(symbols, address)
if symbol:
print " 0x%08x: %s" % (address, demangle(symbol))
def decode_registers(symbols, registers):
print "Decoded registers:"
for reg in regs:
symbol = find_symbol(symbols, registers[reg])
if symbol:
print " %s: %s" % (reg, demangle(symbol))
# -- Main -------------------------------------------------------------------
if __name__ == "__main__":
usage = 'Usage: %prog [options]\n' \
' Parse an arm-eabi MAP file'
optparser = OptionParser(usage=usage)
optparser.add_option('-m', '--map', dest='map',
help='input ELF32 map file')
optparser.add_option('-a', '--address', dest='address',
help='symbol address to look up')
optparser.add_option('-t', '--trace', dest='trace',
help='frame trace to analyse')
optparser.add_option('-i', '--input', dest='input',
help='read symbol address from the specified file')
(options, args) = optparser.parse_args(sys.argv[1:])
try:
if not options.map:
raise AssertionError('Missing map file')
with open(options.map, 'rt') as mapf:
symbols = parse_map_file(mapf)
mapf.close()
if options.address:
address = int(options.address, 16)
symbol = demangle(find_symbol(symbols, address))
if not symbol:
AssertionError('No symbol @ 0x%x' % address)
print "Symbol %s @ 0x%x" % (symbol, address)
if options.input:
with options.input != '-' and open(options.input, 'r') or \
sys.stdin as inp:
for line in inp:
line = line.strip('\r\n\t ')
address = int(line, 16)
symbol = demangle(find_symbol(symbols, address))
if not symbol:
AssertionError('No symbol @ 0x%x' % address)
print "@ 0x%08x: %s" % (address, symbol)
if options.trace:
with (options.trace == '-') and sys.stdin or \
open(options.trace, 'rt') as tracef:
(stack, regs) = parse_abort_trace(tracef)
decode_stack(symbols, stack)
decode_registers(symbols, regs)
except AssertionError, e:
print >> sys.stderr, "Error: %s" % e
sys.exit(1)
| mit |
klmitch/python-keystoneclient | keystoneclient/tests/unit/auth/test_token.py | 8 | 1876 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from keystoneclient.auth.identity.generic import token
from keystoneclient.auth.identity import v2
from keystoneclient.auth.identity import v3
from keystoneclient.auth.identity.v3 import token as v3_token
from keystoneclient.tests.unit.auth import utils
class TokenTests(utils.GenericPluginTestCase):
PLUGIN_CLASS = token.Token
V2_PLUGIN_CLASS = v2.Token
V3_PLUGIN_CLASS = v3.Token
def new_plugin(self, **kwargs):
kwargs.setdefault('token', uuid.uuid4().hex)
return super(TokenTests, self).new_plugin(**kwargs)
def test_options(self):
opts = [o.name for o in self.PLUGIN_CLASS.get_options()]
allowed_opts = ['token',
'domain-id',
'domain-name',
'tenant-id',
'tenant-name',
'project-id',
'project-name',
'project-domain-id',
'project-domain-name',
'trust-id',
'auth-url']
self.assertEqual(set(allowed_opts), set(opts))
self.assertEqual(len(allowed_opts), len(opts))
def test_symbols(self):
self.assertIs(v3.Token, v3_token.Token)
self.assertIs(v3.TokenMethod, v3_token.TokenMethod)
| apache-2.0 |
pigeonflight/strider-plone | docker/appengine/lib/django-1.4/django/contrib/comments/admin.py | 361 | 3299 | from django.contrib import admin
from django.contrib.comments.models import Comment
from django.utils.translation import ugettext_lazy as _, ungettext
from django.contrib.comments import get_model
from django.contrib.comments.views.moderation import perform_flag, perform_approve, perform_delete
class CommentsAdmin(admin.ModelAdmin):
fieldsets = (
(None,
{'fields': ('content_type', 'object_pk', 'site')}
),
(_('Content'),
{'fields': ('user', 'user_name', 'user_email', 'user_url', 'comment')}
),
(_('Metadata'),
{'fields': ('submit_date', 'ip_address', 'is_public', 'is_removed')}
),
)
list_display = ('name', 'content_type', 'object_pk', 'ip_address', 'submit_date', 'is_public', 'is_removed')
list_filter = ('submit_date', 'site', 'is_public', 'is_removed')
date_hierarchy = 'submit_date'
ordering = ('-submit_date',)
raw_id_fields = ('user',)
search_fields = ('comment', 'user__username', 'user_name', 'user_email', 'user_url', 'ip_address')
actions = ["flag_comments", "approve_comments", "remove_comments"]
def get_actions(self, request):
actions = super(CommentsAdmin, self).get_actions(request)
# Only superusers should be able to delete the comments from the DB.
if not request.user.is_superuser and 'delete_selected' in actions:
actions.pop('delete_selected')
if not request.user.has_perm('comments.can_moderate'):
if 'approve_comments' in actions:
actions.pop('approve_comments')
if 'remove_comments' in actions:
actions.pop('remove_comments')
return actions
def flag_comments(self, request, queryset):
self._bulk_flag(request, queryset, perform_flag,
lambda n: ungettext('flagged', 'flagged', n))
flag_comments.short_description = _("Flag selected comments")
def approve_comments(self, request, queryset):
self._bulk_flag(request, queryset, perform_approve,
lambda n: ungettext('approved', 'approved', n))
approve_comments.short_description = _("Approve selected comments")
def remove_comments(self, request, queryset):
self._bulk_flag(request, queryset, perform_delete,
lambda n: ungettext('removed', 'removed', n))
remove_comments.short_description = _("Remove selected comments")
def _bulk_flag(self, request, queryset, action, done_message):
"""
Flag, approve, or remove some comments from an admin action. Actually
calls the `action` argument to perform the heavy lifting.
"""
n_comments = 0
for comment in queryset:
action(request, comment)
n_comments += 1
msg = ungettext(u'1 comment was successfully %(action)s.',
u'%(count)s comments were successfully %(action)s.',
n_comments)
self.message_user(request, msg % {'count': n_comments, 'action': done_message(n_comments)})
# Only register the default admin if the model is the built-in comment model
# (this won't be true if there's a custom comment app).
if get_model() is Comment:
admin.site.register(Comment, CommentsAdmin)
| mit |
afifnz/django-material | tests/integration/tests/test_fileinput.py | 10 | 4507 | import json
from django import forms
from django_webtest import WebTest
from . import build_test_urls
class FileInputForm(forms.Form):
test_field = forms.FileField()
data_field = forms.BooleanField(required=False, widget=forms.HiddenInput,
help_text='To produce non empty POST for empty test_field')
class Test(WebTest):
default_form = FileInputForm
urls = 'tests.integration.tests.test_fileinput'
def test_default_usecase(self):
page = self.app.get(self.test_default_usecase.url)
self.assertIn('id="id_test_field_container"', page.body.decode('utf-8'))
self.assertIn('id="id_test_field"', page.body.decode('utf-8'))
form = page.form
self.assertIn('test_field', form.fields)
response = form.submit(upload_files=[('test_field', __file__)])
response = json.loads(response.body.decode('utf-8'))
self.assertIn('cleaned_data', response)
self.assertIn('test_field', response['cleaned_data'])
self.assertEquals('InMemoryUploadedFile', response['cleaned_data']['test_field'])
def test_invalid_value(self):
form = self.app.get(self.test_invalid_value.url).form
form['data_field'] = '1'
response = form.submit()
self.assertIn('This field is required.', response.body.decode('utf-8'))
def test_part_group_class(self):
page = self.app.get(self.test_part_group_class.url)
self.assertIn('class="input-field file-field col s12 required yellow"', page.body.decode('utf-8'))
test_part_group_class.template = '''
{% form %}
{% part form.test_field group_class %}input-field file-field col s12 required yellow{% endpart %}
{% endform %}
'''
def test_part_add_group_class(self):
page = self.app.get(self.test_part_add_group_class.url)
self.assertIn('class="input-field file-field col s12 required deep-purple lighten-5"', page.body.decode('utf-8'))
test_part_add_group_class.template = '''
{% form %}
{% part form.test_field add_group_class %}deep-purple lighten-5{% endpart %}
{% endform %}
'''
def test_part_prefix(self):
response = self.app.get(self.test_part_prefix.url)
self.assertIn('<span>DATA</span>', response.body.decode('utf-8'))
test_part_prefix.template = '''
{% form %}
{% part form.test_field prefix %}<span>DATA</span>{% endpart %}
{% endform %}
'''
def test_part_add_control_class(self):
response = self.app.get(self.test_part_add_control_class.url)
self.assertIn('class="file-path orange"', response.body.decode('utf-8'))
test_part_add_control_class.template = '''
{% form %}
{% part form.test_field add_control_class %}orange{% endpart %}
{% endform %}
'''
def test_part_label(self):
response = self.app.get(self.test_part_label.url)
self.assertIn('<label for="id_test_field">My label</label>', response.body.decode('utf-8'))
test_part_label.template = '''
{% form %}
{% part form.test_field label %}<label for="id_test_field">My label</label>{% endpart %}
{% endform %}
'''
def test_part_add_label_class(self):
response = self.app.get(self.test_part_add_label_class.url)
self.assertIn('<label for="id_test_field" class="green-text">Test field</label>', response.body.decode('utf-8'))
test_part_add_label_class.template = '''
{% form %}
{% part form.test_field add_label_class %}green-text{% endpart %}
{% endform %}
'''
def test_part_help_text(self):
response = self.app.get(self.test_part_help_text.url)
self.assertIn('<small class="help-block">My help</small>', response.body.decode('utf-8'))
test_part_help_text.template = '''
{% form %}
{% part form.test_field help_text %}<small class="help-block">My help</small>{% endpart %}
{% endform %}
'''
def test_part_errors(self):
response = self.app.get(self.test_part_errors.url)
self.assertIn('<div class="errors"><small class="error">My Error</small></div>', response.body.decode('utf-8'))
test_part_errors.template = '''
{% form %}
{% part form.test_field errors%}<div class="errors"><small class="error">My Error</small></div>{% endpart %}
{% endform %}
'''
urlpatterns = build_test_urls(Test)
| bsd-3-clause |
MarcosCommunity/odoo | openerp/addons/base/tests/test_ir_actions.py | 291 | 20121 | import unittest2
from openerp.osv.orm import except_orm
import openerp.tests.common as common
from openerp.tools import mute_logger
class TestServerActionsBase(common.TransactionCase):
def setUp(self):
super(TestServerActionsBase, self).setUp()
cr, uid = self.cr, self.uid
# Models
self.ir_actions_server = self.registry('ir.actions.server')
self.ir_actions_client = self.registry('ir.actions.client')
self.ir_values = self.registry('ir.values')
self.ir_model = self.registry('ir.model')
self.ir_model_fields = self.registry('ir.model.fields')
self.res_partner = self.registry('res.partner')
self.res_country = self.registry('res.country')
# Data on which we will run the server action
self.test_country_id = self.res_country.create(cr, uid, {
'name': 'TestingCountry',
'code': 'TY',
'address_format': 'SuperFormat',
})
self.test_country = self.res_country.browse(cr, uid, self.test_country_id)
self.test_partner_id = self.res_partner.create(cr, uid, {
'name': 'TestingPartner',
'city': 'OrigCity',
'country_id': self.test_country_id,
})
self.test_partner = self.res_partner.browse(cr, uid, self.test_partner_id)
self.context = {
'active_id': self.test_partner_id,
'active_model': 'res.partner',
}
# Model data
self.res_partner_model_id = self.ir_model.search(cr, uid, [('model', '=', 'res.partner')])[0]
self.res_partner_name_field_id = self.ir_model_fields.search(cr, uid, [('model', '=', 'res.partner'), ('name', '=', 'name')])[0]
self.res_partner_city_field_id = self.ir_model_fields.search(cr, uid, [('model', '=', 'res.partner'), ('name', '=', 'city')])[0]
self.res_partner_country_field_id = self.ir_model_fields.search(cr, uid, [('model', '=', 'res.partner'), ('name', '=', 'country_id')])[0]
self.res_partner_parent_field_id = self.ir_model_fields.search(cr, uid, [('model', '=', 'res.partner'), ('name', '=', 'parent_id')])[0]
self.res_country_model_id = self.ir_model.search(cr, uid, [('model', '=', 'res.country')])[0]
self.res_country_name_field_id = self.ir_model_fields.search(cr, uid, [('model', '=', 'res.country'), ('name', '=', 'name')])[0]
self.res_country_code_field_id = self.ir_model_fields.search(cr, uid, [('model', '=', 'res.country'), ('name', '=', 'code')])[0]
# create server action to
self.act_id = self.ir_actions_server.create(cr, uid, {
'name': 'TestAction',
'condition': 'True',
'model_id': self.res_partner_model_id,
'state': 'code',
'code': 'obj.write({"comment": "MyComment"})',
})
class TestServerActions(TestServerActionsBase):
def test_00_action(self):
cr, uid = self.cr, self.uid
# Do: eval 'True' condition
self.ir_actions_server.run(cr, uid, [self.act_id], self.context)
self.test_partner.refresh()
self.assertEqual(self.test_partner.comment, 'MyComment', 'ir_actions_server: invalid condition check')
self.test_partner.write({'comment': False})
# Do: eval False condition, that should be considered as True (void = True)
self.ir_actions_server.write(cr, uid, [self.act_id], {'condition': False})
self.ir_actions_server.run(cr, uid, [self.act_id], self.context)
self.test_partner.refresh()
self.assertEqual(self.test_partner.comment, 'MyComment', 'ir_actions_server: invalid condition check')
# Do: create contextual action
self.ir_actions_server.create_action(cr, uid, [self.act_id])
# Test: ir_values created
ir_values_ids = self.ir_values.search(cr, uid, [('name', '=', 'Run TestAction')])
self.assertEqual(len(ir_values_ids), 1, 'ir_actions_server: create_action should have created an entry in ir_values')
ir_value = self.ir_values.browse(cr, uid, ir_values_ids[0])
self.assertEqual(ir_value.value, 'ir.actions.server,%s' % self.act_id, 'ir_actions_server: created ir_values should reference the server action')
self.assertEqual(ir_value.model, 'res.partner', 'ir_actions_server: created ir_values should be linked to the action base model')
# Do: remove contextual action
self.ir_actions_server.unlink_action(cr, uid, [self.act_id])
# Test: ir_values removed
ir_values_ids = self.ir_values.search(cr, uid, [('name', '=', 'Run TestAction')])
self.assertEqual(len(ir_values_ids), 0, 'ir_actions_server: unlink_action should remove the ir_values record')
def test_10_code(self):
cr, uid = self.cr, self.uid
self.ir_actions_server.write(cr, uid, self.act_id, {
'state': 'code',
'code': """partner_name = obj.name + '_code'
self.pool["res.partner"].create(cr, uid, {"name": partner_name}, context=context)
workflow"""
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: code server action correctly finished should return False')
pids = self.res_partner.search(cr, uid, [('name', 'ilike', 'TestingPartner_code')])
self.assertEqual(len(pids), 1, 'ir_actions_server: 1 new partner should have been created')
def test_20_trigger(self):
cr, uid = self.cr, self.uid
# Data: code server action (at this point code-based actions should work)
act_id2 = self.ir_actions_server.create(cr, uid, {
'name': 'TestAction2',
'type': 'ir.actions.server',
'condition': 'True',
'model_id': self.res_partner_model_id,
'state': 'code',
'code': 'obj.write({"comment": "MyComment"})',
})
act_id3 = self.ir_actions_server.create(cr, uid, {
'name': 'TestAction3',
'type': 'ir.actions.server',
'condition': 'True',
'model_id': self.res_country_model_id,
'state': 'code',
'code': 'obj.write({"code": "ZZ"})',
})
# Data: create workflows
partner_wf_id = self.registry('workflow').create(cr, uid, {
'name': 'TestWorkflow',
'osv': 'res.partner',
'on_create': True,
})
partner_act1_id = self.registry('workflow.activity').create(cr, uid, {
'name': 'PartnerStart',
'wkf_id': partner_wf_id,
'flow_start': True
})
partner_act2_id = self.registry('workflow.activity').create(cr, uid, {
'name': 'PartnerTwo',
'wkf_id': partner_wf_id,
'kind': 'function',
'action': 'True',
'action_id': act_id2,
})
partner_trs1_id = self.registry('workflow.transition').create(cr, uid, {
'signal': 'partner_trans',
'act_from': partner_act1_id,
'act_to': partner_act2_id
})
country_wf_id = self.registry('workflow').create(cr, uid, {
'name': 'TestWorkflow',
'osv': 'res.country',
'on_create': True,
})
country_act1_id = self.registry('workflow.activity').create(cr, uid, {
'name': 'CountryStart',
'wkf_id': country_wf_id,
'flow_start': True
})
country_act2_id = self.registry('workflow.activity').create(cr, uid, {
'name': 'CountryTwo',
'wkf_id': country_wf_id,
'kind': 'function',
'action': 'True',
'action_id': act_id3,
})
country_trs1_id = self.registry('workflow.transition').create(cr, uid, {
'signal': 'country_trans',
'act_from': country_act1_id,
'act_to': country_act2_id
})
# Data: re-create country and partner to benefit from the workflows
self.test_country_id = self.res_country.create(cr, uid, {
'name': 'TestingCountry2',
'code': 'T2',
})
self.test_country = self.res_country.browse(cr, uid, self.test_country_id)
self.test_partner_id = self.res_partner.create(cr, uid, {
'name': 'TestingPartner2',
'country_id': self.test_country_id,
})
self.test_partner = self.res_partner.browse(cr, uid, self.test_partner_id)
self.context = {
'active_id': self.test_partner_id,
'active_model': 'res.partner',
}
# Run the action on partner object itself ('base')
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'trigger',
'use_relational_model': 'base',
'wkf_model_id': self.res_partner_model_id,
'wkf_model_name': 'res.partner',
'wkf_transition_id': partner_trs1_id,
})
self.ir_actions_server.run(cr, uid, [self.act_id], self.context)
self.test_partner.refresh()
self.assertEqual(self.test_partner.comment, 'MyComment', 'ir_actions_server: incorrect signal trigger')
# Run the action on related country object ('relational')
self.ir_actions_server.write(cr, uid, [self.act_id], {
'use_relational_model': 'relational',
'wkf_model_id': self.res_country_model_id,
'wkf_model_name': 'res.country',
'wkf_field_id': self.res_partner_country_field_id,
'wkf_transition_id': country_trs1_id,
})
self.ir_actions_server.run(cr, uid, [self.act_id], self.context)
self.test_country.refresh()
self.assertEqual(self.test_country.code, 'ZZ', 'ir_actions_server: incorrect signal trigger')
# Clear workflow cache, otherwise openerp will try to create workflows even if it has been deleted
from openerp.workflow import clear_cache
clear_cache(cr, uid)
def test_30_client(self):
cr, uid = self.cr, self.uid
client_action_id = self.registry('ir.actions.client').create(cr, uid, {
'name': 'TestAction2',
'tag': 'Test',
})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'client_action',
'action_id': client_action_id,
})
res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertEqual(res['name'], 'TestAction2', 'ir_actions_server: incorrect return result for a client action')
def test_40_crud_create(self):
cr, uid = self.cr, self.uid
_city = 'TestCity'
_name = 'TestNew'
# Do: create a new record in the same model and link it
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'object_create',
'use_create': 'new',
'link_new_record': True,
'link_field_id': self.res_partner_parent_field_id,
'fields_lines': [(0, 0, {'col1': self.res_partner_name_field_id, 'value': _name}),
(0, 0, {'col1': self.res_partner_city_field_id, 'value': _city})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new partner created
pids = self.res_partner.search(cr, uid, [('name', 'ilike', _name)])
self.assertEqual(len(pids), 1, 'ir_actions_server: TODO')
partner = self.res_partner.browse(cr, uid, pids[0])
self.assertEqual(partner.city, _city, 'ir_actions_server: TODO')
# Test: new partner linked
self.test_partner.refresh()
self.assertEqual(self.test_partner.parent_id.id, pids[0], 'ir_actions_server: TODO')
# Do: copy current record
self.ir_actions_server.write(cr, uid, [self.act_id], {'fields_lines': [[5]]})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'object_create',
'use_create': 'copy_current',
'link_new_record': False,
'fields_lines': [(0, 0, {'col1': self.res_partner_name_field_id, 'value': 'TestCopyCurrent'}),
(0, 0, {'col1': self.res_partner_city_field_id, 'value': 'TestCity'})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new partner created
pids = self.res_partner.search(cr, uid, [('name', 'ilike', 'TestingPartner (copy)')]) # currently res_partner overrides default['name'] whatever its value
self.assertEqual(len(pids), 1, 'ir_actions_server: TODO')
partner = self.res_partner.browse(cr, uid, pids[0])
self.assertEqual(partner.city, 'TestCity', 'ir_actions_server: TODO')
self.assertEqual(partner.country_id.id, self.test_partner.country_id.id, 'ir_actions_server: TODO')
# Do: create a new record in another model
self.ir_actions_server.write(cr, uid, [self.act_id], {'fields_lines': [[5]]})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'object_create',
'use_create': 'new_other',
'crud_model_id': self.res_country_model_id,
'link_new_record': False,
'fields_lines': [(0, 0, {'col1': self.res_country_name_field_id, 'value': 'obj.name', 'type': 'equation'}),
(0, 0, {'col1': self.res_country_code_field_id, 'value': 'obj.name[0:2]', 'type': 'equation'})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new country created
cids = self.res_country.search(cr, uid, [('name', 'ilike', 'TestingPartner')])
self.assertEqual(len(cids), 1, 'ir_actions_server: TODO')
country = self.res_country.browse(cr, uid, cids[0])
self.assertEqual(country.code, 'TE', 'ir_actions_server: TODO')
# Do: copy a record in another model
self.ir_actions_server.write(cr, uid, [self.act_id], {'fields_lines': [[5]]})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'object_create',
'use_create': 'copy_other',
'crud_model_id': self.res_country_model_id,
'link_new_record': False,
'ref_object': 'res.country,%s' % self.test_country_id,
'fields_lines': [(0, 0, {'col1': self.res_country_name_field_id, 'value': 'NewCountry', 'type': 'value'}),
(0, 0, {'col1': self.res_country_code_field_id, 'value': 'NY', 'type': 'value'})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new country created
cids = self.res_country.search(cr, uid, [('name', 'ilike', 'NewCountry')])
self.assertEqual(len(cids), 1, 'ir_actions_server: TODO')
country = self.res_country.browse(cr, uid, cids[0])
self.assertEqual(country.code, 'NY', 'ir_actions_server: TODO')
self.assertEqual(country.address_format, 'SuperFormat', 'ir_actions_server: TODO')
def test_50_crud_write(self):
cr, uid = self.cr, self.uid
_name = 'TestNew'
# Do: create a new record in the same model and link it
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'object_write',
'use_write': 'current',
'fields_lines': [(0, 0, {'col1': self.res_partner_name_field_id, 'value': _name})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new partner created
pids = self.res_partner.search(cr, uid, [('name', 'ilike', _name)])
self.assertEqual(len(pids), 1, 'ir_actions_server: TODO')
partner = self.res_partner.browse(cr, uid, pids[0])
self.assertEqual(partner.city, 'OrigCity', 'ir_actions_server: TODO')
# Do: copy current record
self.ir_actions_server.write(cr, uid, [self.act_id], {'fields_lines': [[5]]})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'use_write': 'other',
'crud_model_id': self.res_country_model_id,
'ref_object': 'res.country,%s' % self.test_country_id,
'fields_lines': [(0, 0, {'col1': self.res_country_name_field_id, 'value': 'obj.name', 'type': 'equation'})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new country created
cids = self.res_country.search(cr, uid, [('name', 'ilike', 'TestNew')])
self.assertEqual(len(cids), 1, 'ir_actions_server: TODO')
# Do: copy a record in another model
self.ir_actions_server.write(cr, uid, [self.act_id], {'fields_lines': [[5]]})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'use_write': 'expression',
'crud_model_id': self.res_country_model_id,
'write_expression': 'object.country_id',
'fields_lines': [(0, 0, {'col1': self.res_country_name_field_id, 'value': 'NewCountry', 'type': 'value'})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new country created
cids = self.res_country.search(cr, uid, [('name', 'ilike', 'NewCountry')])
self.assertEqual(len(cids), 1, 'ir_actions_server: TODO')
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models')
def test_60_multi(self):
cr, uid = self.cr, self.uid
# Data: 2 server actions that will be nested
act1_id = self.ir_actions_server.create(cr, uid, {
'name': 'Subaction1',
'sequence': 1,
'model_id': self.res_partner_model_id,
'state': 'code',
'code': 'action = {"type": "ir.actions.act_window"}',
})
act2_id = self.ir_actions_server.create(cr, uid, {
'name': 'Subaction2',
'sequence': 2,
'model_id': self.res_partner_model_id,
'state': 'object_create',
'use_create': 'copy_current',
})
act3_id = self.ir_actions_server.create(cr, uid, {
'name': 'Subaction3',
'sequence': 3,
'model_id': self.res_partner_model_id,
'state': 'code',
'code': 'action = {"type": "ir.actions.act_url"}',
})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'multi',
'child_ids': [(6, 0, [act1_id, act2_id, act3_id])],
})
# Do: run the action
res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
# Test: new partner created
pids = self.res_partner.search(cr, uid, [('name', 'ilike', 'TestingPartner (copy)')]) # currently res_partner overrides default['name'] whatever its value
self.assertEqual(len(pids), 1, 'ir_actions_server: TODO')
# Test: action returned
self.assertEqual(res.get('type'), 'ir.actions.act_url')
# Test loops
with self.assertRaises(except_orm):
self.ir_actions_server.write(cr, uid, [self.act_id], {
'child_ids': [(6, 0, [self.act_id])]
})
if __name__ == '__main__':
unittest2.main()
| agpl-3.0 |
glewis17/cvxpy | cvxpy/tests/test_quad_form.py | 11 | 3160 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from scipy import linalg
import cvxopt
import cvxpy
from cvxpy.tests.base_test import BaseTest
class TestNonOptimal(BaseTest):
def test_singular_quad_form(self):
"""Test quad form with a singular matrix.
"""
# Solve a quadratic program.
np.random.seed(1234)
for n in (3, 4, 5):
for i in range(5):
# construct a random 1d finite distribution
v = np.exp(np.random.randn(n))
v = v / np.sum(v)
# construct a random positive definite matrix
A = np.random.randn(n, n)
Q = np.dot(A, A.T)
# Project onto the orthogonal complement of v.
# This turns Q into a singular matrix with a known nullspace.
E = np.identity(n) - np.outer(v, v) / np.inner(v, v)
Q = np.dot(E, np.dot(Q, E.T))
observed_rank = np.linalg.matrix_rank(Q)
desired_rank = n-1
yield assert_equal, observed_rank, desired_rank
for action in 'minimize', 'maximize':
# Look for the extremum of the quadratic form
# under the simplex constraint.
x = cvxpy.Variable(n)
if action == 'minimize':
q = cvxpy.quad_form(x, Q)
objective = cvxpy.Minimize(q)
elif action == 'maximize':
q = cvxpy.quad_form(x, -Q)
objective = cvxpy.Maximize(q)
constraints = [0 <= x, cvxpy.sum_entries(x) == 1]
p = cvxpy.Problem(objective, constraints)
p.solve()
# check that cvxpy found the right answer
xopt = x.value.A.flatten()
yopt = np.dot(xopt, np.dot(Q, xopt))
assert_allclose(yopt, 0, atol=1e-3)
assert_allclose(xopt, v, atol=1e-3)
def test_sparse_quad_form(self):
"""Test quad form with a sparse matrix.
"""
Q = cvxopt.spdiag([1,1])
x = cvxpy.Variable(2,1)
cost = cvxpy.quad_form(x,Q)
prob = cvxpy.Problem(cvxpy.Minimize(cost), [x == [1,2]])
self.assertAlmostEqual(prob.solve(), 5)
def test_non_symmetric(self):
"""Test error when P is constant and not symmetric.
"""
P = np.array([[1, 2], [3, 4]])
x = cvxpy.Variable(2,1)
with self.assertRaises(Exception) as cm:
cvxpy.quad_form(x,P)
self.assertEqual(str(cm.exception),
"P is not symmetric.")
def test_non_psd(self):
"""Test error when P is symmetric but not definite.
"""
P = np.array([[1, 0], [0, -1]])
x = cvxpy.Variable(2,1)
with self.assertRaises(Exception) as cm:
cvxpy.quad_form(x,P)
self.assertEqual(str(cm.exception),
"P has both positive and negative eigenvalues.")
| gpl-3.0 |
fitermay/intellij-community | python/helpers/py2only/docutils/parsers/rst/directives/misc.py | 106 | 22888 | # $Id: misc.py 7487 2012-07-22 21:20:28Z milde $
# Authors: David Goodger <goodger@python.org>; Dethe Elza
# Copyright: This module has been placed in the public domain.
"""Miscellaneous directives."""
__docformat__ = 'reStructuredText'
import sys
import os.path
import re
import time
from docutils import io, nodes, statemachine, utils
from docutils.utils.error_reporting import SafeString, ErrorString
from docutils.utils.error_reporting import locale_encoding
from docutils.parsers.rst import Directive, convert_directive_function
from docutils.parsers.rst import directives, roles, states
from docutils.parsers.rst.directives.body import CodeBlock, NumberLines
from docutils.parsers.rst.roles import set_classes
from docutils.transforms import misc
class Include(Directive):
"""
Include content read from a separate source file.
Content may be parsed by the parser, or included as a literal
block. The encoding of the included file can be specified. Only
a part of the given file argument may be included by specifying
start and end line or text to match before and/or after the text
to be used.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'literal': directives.flag,
'code': directives.unchanged,
'encoding': directives.encoding,
'tab-width': int,
'start-line': int,
'end-line': int,
'start-after': directives.unchanged_required,
'end-before': directives.unchanged_required,
# ignored except for 'literal' or 'code':
'number-lines': directives.unchanged, # integer or None
'class': directives.class_option,
'name': directives.unchanged}
standard_include_path = os.path.join(os.path.dirname(states.__file__),
'include')
def run(self):
"""Include a file as part of the content of this reST file."""
if not self.state.document.settings.file_insertion_enabled:
raise self.warning('"%s" directive disabled.' % self.name)
source = self.state_machine.input_lines.source(
self.lineno - self.state_machine.input_offset - 1)
source_dir = os.path.dirname(os.path.abspath(source))
path = directives.path(self.arguments[0])
if path.startswith('<') and path.endswith('>'):
path = os.path.join(self.standard_include_path, path[1:-1])
path = os.path.normpath(os.path.join(source_dir, path))
path = utils.relative_path(None, path)
path = nodes.reprunicode(path)
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
e_handler=self.state.document.settings.input_encoding_error_handler
tab_width = self.options.get(
'tab-width', self.state.document.settings.tab_width)
try:
self.state.document.settings.record_dependencies.add(path)
include_file = io.FileInput(source_path=path,
encoding=encoding,
error_handler=e_handler)
except UnicodeEncodeError, error:
raise self.severe(u'Problems with "%s" directive path:\n'
'Cannot encode input file path "%s" '
'(wrong locale?).' %
(self.name, SafeString(path)))
except IOError, error:
raise self.severe(u'Problems with "%s" directive path:\n%s.' %
(self.name, ErrorString(error)))
startline = self.options.get('start-line', None)
endline = self.options.get('end-line', None)
try:
if startline or (endline is not None):
lines = include_file.readlines()
rawtext = ''.join(lines[startline:endline])
else:
rawtext = include_file.read()
except UnicodeError, error:
raise self.severe(u'Problem with "%s" directive:\n%s' %
(self.name, ErrorString(error)))
# start-after/end-before: no restrictions on newlines in match-text,
# and no restrictions on matching inside lines vs. line boundaries
after_text = self.options.get('start-after', None)
if after_text:
# skip content in rawtext before *and incl.* a matching text
after_index = rawtext.find(after_text)
if after_index < 0:
raise self.severe('Problem with "start-after" option of "%s" '
'directive:\nText not found.' % self.name)
rawtext = rawtext[after_index + len(after_text):]
before_text = self.options.get('end-before', None)
if before_text:
# skip content in rawtext after *and incl.* a matching text
before_index = rawtext.find(before_text)
if before_index < 0:
raise self.severe('Problem with "end-before" option of "%s" '
'directive:\nText not found.' % self.name)
rawtext = rawtext[:before_index]
include_lines = statemachine.string2lines(rawtext, tab_width,
convert_whitespace=True)
if 'literal' in self.options:
# Convert tabs to spaces, if `tab_width` is positive.
if tab_width >= 0:
text = rawtext.expandtabs(tab_width)
else:
text = rawtext
literal_block = nodes.literal_block(rawtext, source=path,
classes=self.options.get('class', []))
literal_block.line = 1
self.add_name(literal_block)
if 'number-lines' in self.options:
try:
startline = int(self.options['number-lines'] or 1)
except ValueError:
raise self.error(':number-lines: with non-integer '
'start value')
endline = startline + len(include_lines)
if text.endswith('\n'):
text = text[:-1]
tokens = NumberLines([([], text)], startline, endline)
for classes, value in tokens:
if classes:
literal_block += nodes.inline(value, value,
classes=classes)
else:
literal_block += nodes.Text(value, value)
else:
literal_block += nodes.Text(text, text)
return [literal_block]
if 'code' in self.options:
self.options['source'] = path
codeblock = CodeBlock(self.name,
[self.options.pop('code')], # arguments
self.options,
include_lines, # content
self.lineno,
self.content_offset,
self.block_text,
self.state,
self.state_machine)
return codeblock.run()
self.state_machine.insert_input(include_lines, path)
return []
class Raw(Directive):
"""
Pass through content unchanged
Content is included in output based on type argument
Content may be included inline (content section of directive) or
imported from a file or url.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'file': directives.path,
'url': directives.uri,
'encoding': directives.encoding}
has_content = True
def run(self):
if (not self.state.document.settings.raw_enabled
or (not self.state.document.settings.file_insertion_enabled
and ('file' in self.options
or 'url' in self.options))):
raise self.warning('"%s" directive disabled.' % self.name)
attributes = {'format': ' '.join(self.arguments[0].lower().split())}
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
e_handler=self.state.document.settings.input_encoding_error_handler
if self.content:
if 'file' in self.options or 'url' in self.options:
raise self.error(
'"%s" directive may not both specify an external file '
'and have content.' % self.name)
text = '\n'.join(self.content)
elif 'file' in self.options:
if 'url' in self.options:
raise self.error(
'The "file" and "url" options may not be simultaneously '
'specified for the "%s" directive.' % self.name)
source_dir = os.path.dirname(
os.path.abspath(self.state.document.current_source))
path = os.path.normpath(os.path.join(source_dir,
self.options['file']))
path = utils.relative_path(None, path)
try:
raw_file = io.FileInput(source_path=path,
encoding=encoding,
error_handler=e_handler)
# TODO: currently, raw input files are recorded as
# dependencies even if not used for the chosen output format.
self.state.document.settings.record_dependencies.add(path)
except IOError, error:
raise self.severe(u'Problems with "%s" directive path:\n%s.'
% (self.name, ErrorString(error)))
try:
text = raw_file.read()
except UnicodeError, error:
raise self.severe(u'Problem with "%s" directive:\n%s'
% (self.name, ErrorString(error)))
attributes['source'] = path
elif 'url' in self.options:
source = self.options['url']
# Do not import urllib2 at the top of the module because
# it may fail due to broken SSL dependencies, and it takes
# about 0.15 seconds to load.
import urllib2
try:
raw_text = urllib2.urlopen(source).read()
except (urllib2.URLError, IOError, OSError), error:
raise self.severe(u'Problems with "%s" directive URL "%s":\n%s.'
% (self.name, self.options['url'], ErrorString(error)))
raw_file = io.StringInput(source=raw_text, source_path=source,
encoding=encoding,
error_handler=e_handler)
try:
text = raw_file.read()
except UnicodeError, error:
raise self.severe(u'Problem with "%s" directive:\n%s'
% (self.name, ErrorString(error)))
attributes['source'] = source
else:
# This will always fail because there is no content.
self.assert_has_content()
raw_node = nodes.raw('', text, **attributes)
(raw_node.source,
raw_node.line) = self.state_machine.get_source_and_line(self.lineno)
return [raw_node]
class Replace(Directive):
has_content = True
def run(self):
if not isinstance(self.state, states.SubstitutionDef):
raise self.error(
'Invalid context: the "%s" directive can only be used within '
'a substitution definition.' % self.name)
self.assert_has_content()
text = '\n'.join(self.content)
element = nodes.Element(text)
self.state.nested_parse(self.content, self.content_offset,
element)
# element might contain [paragraph] + system_message(s)
node = None
messages = []
for elem in element:
if not node and isinstance(elem, nodes.paragraph):
node = elem
elif isinstance(elem, nodes.system_message):
elem['backrefs'] = []
messages.append(elem)
else:
return [
self.state_machine.reporter.error(
'Error in "%s" directive: may contain a single paragraph '
'only.' % (self.name), line=self.lineno) ]
if node:
return messages + node.children
return messages
class Unicode(Directive):
r"""
Convert Unicode character codes (numbers) to characters. Codes may be
decimal numbers, hexadecimal numbers (prefixed by ``0x``, ``x``, ``\x``,
``U+``, ``u``, or ``\u``; e.g. ``U+262E``), or XML-style numeric character
entities (e.g. ``☮``). Text following ".." is a comment and is
ignored. Spaces are ignored, and any other text remains as-is.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'trim': directives.flag,
'ltrim': directives.flag,
'rtrim': directives.flag}
comment_pattern = re.compile(r'( |\n|^)\.\. ')
def run(self):
if not isinstance(self.state, states.SubstitutionDef):
raise self.error(
'Invalid context: the "%s" directive can only be used within '
'a substitution definition.' % self.name)
substitution_definition = self.state_machine.node
if 'trim' in self.options:
substitution_definition.attributes['ltrim'] = 1
substitution_definition.attributes['rtrim'] = 1
if 'ltrim' in self.options:
substitution_definition.attributes['ltrim'] = 1
if 'rtrim' in self.options:
substitution_definition.attributes['rtrim'] = 1
codes = self.comment_pattern.split(self.arguments[0])[0].split()
element = nodes.Element()
for code in codes:
try:
decoded = directives.unicode_code(code)
except ValueError, error:
raise self.error(u'Invalid character code: %s\n%s'
% (code, ErrorString(error)))
element += nodes.Text(decoded)
return element.children
class Class(Directive):
"""
Set a "class" attribute on the directive content or the next element.
When applied to the next element, a "pending" element is inserted, and a
transform does the work later.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
has_content = True
def run(self):
try:
class_value = directives.class_option(self.arguments[0])
except ValueError:
raise self.error(
'Invalid class attribute value for "%s" directive: "%s".'
% (self.name, self.arguments[0]))
node_list = []
if self.content:
container = nodes.Element()
self.state.nested_parse(self.content, self.content_offset,
container)
for node in container:
node['classes'].extend(class_value)
node_list.extend(container.children)
else:
pending = nodes.pending(
misc.ClassAttribute,
{'class': class_value, 'directive': self.name},
self.block_text)
self.state_machine.document.note_pending(pending)
node_list.append(pending)
return node_list
class Role(Directive):
has_content = True
argument_pattern = re.compile(r'(%s)\s*(\(\s*(%s)\s*\)\s*)?$'
% ((states.Inliner.simplename,) * 2))
def run(self):
"""Dynamically create and register a custom interpreted text role."""
if self.content_offset > self.lineno or not self.content:
raise self.error('"%s" directive requires arguments on the first '
'line.' % self.name)
args = self.content[0]
match = self.argument_pattern.match(args)
if not match:
raise self.error('"%s" directive arguments not valid role names: '
'"%s".' % (self.name, args))
new_role_name = match.group(1)
base_role_name = match.group(3)
messages = []
if base_role_name:
base_role, messages = roles.role(
base_role_name, self.state_machine.language, self.lineno,
self.state.reporter)
if base_role is None:
error = self.state.reporter.error(
'Unknown interpreted text role "%s".' % base_role_name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return messages + [error]
else:
base_role = roles.generic_custom_role
assert not hasattr(base_role, 'arguments'), (
'Supplemental directive arguments for "%s" directive not '
'supported (specified by "%r" role).' % (self.name, base_role))
try:
converted_role = convert_directive_function(base_role)
(arguments, options, content, content_offset) = (
self.state.parse_directive_block(
self.content[1:], self.content_offset, converted_role,
option_presets={}))
except states.MarkupError, detail:
error = self.state_machine.reporter.error(
'Error in "%s" directive:\n%s.' % (self.name, detail),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return messages + [error]
if 'class' not in options:
try:
options['class'] = directives.class_option(new_role_name)
except ValueError, detail:
error = self.state_machine.reporter.error(
u'Invalid argument for "%s" directive:\n%s.'
% (self.name, SafeString(detail)), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return messages + [error]
role = roles.CustomRole(new_role_name, base_role, options, content)
roles.register_local_role(new_role_name, role)
return messages
class DefaultRole(Directive):
"""Set the default interpreted text role."""
optional_arguments = 1
final_argument_whitespace = False
def run(self):
if not self.arguments:
if '' in roles._roles:
# restore the "default" default role
del roles._roles['']
return []
role_name = self.arguments[0]
role, messages = roles.role(role_name, self.state_machine.language,
self.lineno, self.state.reporter)
if role is None:
error = self.state.reporter.error(
'Unknown interpreted text role "%s".' % role_name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return messages + [error]
roles._roles[''] = role
# @@@ should this be local to the document, not the parser?
return messages
class Title(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
def run(self):
self.state_machine.document['title'] = self.arguments[0]
return []
class Date(Directive):
has_content = True
def run(self):
if not isinstance(self.state, states.SubstitutionDef):
raise self.error(
'Invalid context: the "%s" directive can only be used within '
'a substitution definition.' % self.name)
format_str = '\n'.join(self.content) or '%Y-%m-%d'
if sys.version_info< (3, 0):
try:
format_str = format_str.encode(locale_encoding or 'utf-8')
except UnicodeEncodeError:
raise self.warning(u'Cannot encode date format string '
u'with locale encoding "%s".' % locale_encoding)
text = time.strftime(format_str)
if sys.version_info< (3, 0):
# `text` is a byte string that may contain non-ASCII characters:
try:
text = text.decode(locale_encoding or 'utf-8')
except UnicodeDecodeError:
text = text.decode(locale_encoding or 'utf-8', 'replace')
raise self.warning(u'Error decoding "%s"'
u'with locale encoding "%s".' % (text, locale_encoding))
return [nodes.Text(text)]
class TestDirective(Directive):
"""This directive is useful only for testing purposes."""
optional_arguments = 1
final_argument_whitespace = True
option_spec = {'option': directives.unchanged_required}
has_content = True
def run(self):
if self.content:
text = '\n'.join(self.content)
info = self.state_machine.reporter.info(
'Directive processed. Type="%s", arguments=%r, options=%r, '
'content:' % (self.name, self.arguments, self.options),
nodes.literal_block(text, text), line=self.lineno)
else:
info = self.state_machine.reporter.info(
'Directive processed. Type="%s", arguments=%r, options=%r, '
'content: None' % (self.name, self.arguments, self.options),
line=self.lineno)
return [info]
# Old-style, functional definition:
#
# def directive_test_function(name, arguments, options, content, lineno,
# content_offset, block_text, state, state_machine):
# """This directive is useful only for testing purposes."""
# if content:
# text = '\n'.join(content)
# info = state_machine.reporter.info(
# 'Directive processed. Type="%s", arguments=%r, options=%r, '
# 'content:' % (name, arguments, options),
# nodes.literal_block(text, text), line=lineno)
# else:
# info = state_machine.reporter.info(
# 'Directive processed. Type="%s", arguments=%r, options=%r, '
# 'content: None' % (name, arguments, options), line=lineno)
# return [info]
#
# directive_test_function.arguments = (0, 1, 1)
# directive_test_function.options = {'option': directives.unchanged_required}
# directive_test_function.content = 1
| apache-2.0 |
cowlicks/odo | odo/utils.py | 3 | 9847 | from __future__ import absolute_import, division, print_function
import inspect
import datetime
import tempfile
import os
import shutil
import numpy as np
from contextlib import contextmanager
from multiprocessing.pool import ThreadPool
from multipledispatch import Dispatcher
from datashape import dshape, Record
from toolz import pluck, get, curry, keyfilter
from .compatibility import unicode
sample = Dispatcher('sample')
def iter_except(func, exception, first=None):
"""Call a `func` repeatedly until `exception` is raised. Optionally call
`first` first.
Parameters
----------
func : callable
Repeatedly call this until `exception` is raised.
exception : Exception
Stop calling `func` when this is raised.
first : callable, optional, default ``None``
Call this first if it isn't ``None``.
Examples
--------
>>> x = {'a': 1, 'b': 2}
>>> def iterate():
... yield 'a'
... yield 'b'
... yield 'c'
...
>>> keys = iterate()
>>> diter = iter_except(lambda: x[next(keys)], KeyError)
>>> list(diter)
[1, 2]
Notes
-----
* Taken from https://docs.python.org/2/library/itertools.html#recipes
"""
try:
if first is not None:
yield first()
while 1: # True isn't a reserved word in Python 2.x
yield func()
except exception:
pass
def ext(filename):
_, e = os.path.splitext(filename)
return e.lstrip(os.extsep)
def raises(err, lamda):
try:
lamda()
return False
except err:
return True
def expand_tuples(L):
"""
>>> expand_tuples([1, (2, 3)])
[(1, 2), (1, 3)]
>>> expand_tuples([1, 2])
[(1, 2)]
"""
if not L:
return [()]
elif not isinstance(L[0], tuple):
rest = expand_tuples(L[1:])
return [(L[0],) + t for t in rest]
else:
rest = expand_tuples(L[1:])
return [(item,) + t for t in rest for item in L[0]]
@contextmanager
def tmpfile(extension=''):
extension = '.' + extension.lstrip('.')
handle, filename = tempfile.mkstemp(extension)
os.close(handle)
os.remove(filename)
yield filename
if os.path.exists(filename):
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
try:
os.remove(filename)
except OSError: # sometimes we can't remove a generated temp file
pass
def keywords(func):
""" Get the argument names of a function
>>> def f(x, y=2):
... pass
>>> keywords(f)
['x', 'y']
"""
if isinstance(func, type):
return keywords(func.__init__)
return inspect.getargspec(func).args
def cls_name(cls):
if 'builtin' in cls.__module__:
return cls.__name__
else:
return cls.__module__.split('.')[0] + '.' + cls.__name__
@contextmanager
def filetext(text, extension='', open=open, mode='w'):
with tmpfile(extension=extension) as filename:
f = open(filename, mode=mode)
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield filename
@contextmanager
def filetexts(d, open=open):
""" Dumps a number of textfiles to disk
d - dict
a mapping from filename to text like {'a.csv': '1,1\n2,2'}
"""
for filename, text in d.items():
f = open(filename, 'wt')
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield list(d)
for filename in d:
if os.path.exists(filename):
os.remove(filename)
def normalize_to_date(dt):
if isinstance(dt, datetime.datetime) and not dt.time():
return dt.date()
else:
return dt
def assert_allclose(lhs, rhs):
for tb in map(zip, lhs, rhs):
for left, right in tb:
if isinstance(left, (np.floating, float)):
# account for nans
assert np.all(np.isclose(left, right, equal_nan=True))
continue
if isinstance(left, datetime.datetime):
left = normalize_to_date(left)
if isinstance(right, datetime.datetime):
right = normalize_to_date(right)
assert left == right
def records_to_tuples(ds, data):
""" Transform records into tuples
Examples
--------
>>> seq = [{'a': 1, 'b': 10}, {'a': 2, 'b': 20}]
>>> list(records_to_tuples('var * {a: int, b: int}', seq))
[(1, 10), (2, 20)]
>>> records_to_tuples('{a: int, b: int}', seq[0]) # single elements
(1, 10)
>>> records_to_tuples('var * int', [1, 2, 3]) # pass through on non-records
[1, 2, 3]
See Also
--------
tuples_to_records
"""
if isinstance(ds, (str, unicode)):
ds = dshape(ds)
if isinstance(ds.measure, Record) and len(ds.shape) == 1:
return pluck(ds.measure.names, data, default=None)
if isinstance(ds.measure, Record) and len(ds.shape) == 0:
return get(ds.measure.names, data)
if not isinstance(ds.measure, Record):
return data
raise NotImplementedError()
def tuples_to_records(ds, data):
""" Transform tuples into records
Examples
--------
>>> seq = [(1, 10), (2, 20)]
>>> list(tuples_to_records('var * {a: int, b: int}', seq)) # doctest: +SKIP
[{'a': 1, 'b': 10}, {'a': 2, 'b': 20}]
>>> tuples_to_records('{a: int, b: int}', seq[0]) # doctest: +SKIP
{'a': 1, 'b': 10}
>>> tuples_to_records('var * int', [1, 2, 3]) # pass through on non-records
[1, 2, 3]
See Also
--------
records_to_tuples
"""
if isinstance(ds, (str, unicode)):
ds = dshape(ds)
if isinstance(ds.measure, Record) and len(ds.shape) == 1:
names = ds.measure.names
return (dict(zip(names, tup)) for tup in data)
if isinstance(ds.measure, Record) and len(ds.shape) == 0:
names = ds.measure.names
return dict(zip(names, data))
if not isinstance(ds.measure, Record):
return data
raise NotImplementedError()
@contextmanager
def ignoring(*exceptions):
try:
yield
except exceptions:
pass
def into_path(*path):
""" Path to file in into directory
>>> into_path('backends', 'tests', 'myfile.csv') # doctest: +SKIP
'/home/user/odo/odo/backends/tests/myfile.csv'
"""
import odo
return os.path.join(os.path.dirname(odo.__file__), *path)
@curry
def pmap(f, iterable):
"""Map `f` over `iterable` in parallel using a ``ThreadPool``.
"""
p = ThreadPool()
try:
result = p.map(f, iterable)
finally:
p.terminate()
return result
@curry
def write(triple, writer):
"""Write a file using the input from `gentemp` using `writer` and return
its index and filename.
Parameters
----------
triple : tuple of int, str, str
The first element is the index in the set of chunks of a file, the
second element is the path to write to, the third element is the data
to write.
Returns
-------
i, filename : int, str
File's index and filename. This is used to return the index and
filename after splitting files.
Notes
-----
This could be adapted to write to an already open handle, which would
allow, e.g., multipart gzip uploads. Currently we open write a new file
every time.
"""
i, filename, data = triple
with writer(filename, mode='wb') as f:
f.write(data)
return i, filename
def gentemp(it, suffix=None, start=0):
"""Yield an index, a temp file, and data for each element in `it`.
Parameters
----------
it : Iterable
suffix : str or ``None``, optional
Suffix to add to each temporary file's name
start : int, optional
A integer indicating where to start the numbering of chunks in `it`.
"""
for i, data in enumerate(it, start=start): # aws needs parts to start at 1
with tmpfile('.into') as fn:
yield i, fn, data
@curry
def split(filename, nbytes, suffix=None, writer=open, start=0):
"""Split a file into chunks of size `nbytes` with each filename containing
a suffix specified by `suffix`. The file will be written with the ``write``
method of an instance of `writer`.
Parameters
----------
filename : str
The file to split
nbytes : int
Split `filename` into chunks of this size
suffix : str, optional
writer : callable, optional
Callable object to use to write the chunks of `filename`
"""
with open(filename, mode='rb') as f:
byte_chunks = iter(curry(f.read, nbytes), '')
return pmap(write(writer=writer),
gentemp(byte_chunks, suffix=suffix, start=start))
def filter_kwargs(f, kwargs):
"""Return a dict of valid kwargs for `f` from a subset of `kwargs`
Examples
--------
>>> def f(a, b=1, c=2):
... return a + b + c
...
>>> raw_kwargs = dict(a=1, b=3, d=4)
>>> f(**raw_kwargs)
Traceback (most recent call last):
...
TypeError: f() got an unexpected keyword argument 'd'
>>> kwargs = filter_kwargs(f, raw_kwargs)
>>> f(**kwargs)
6
"""
return keyfilter(keywords(f).__contains__, kwargs)
@curry
def copydoc(from_, to):
"""Copies the docstring from one function to another.
Paramaters
----------
from_ : any
The object to copy the docstring from.
to : any
The object to copy the docstring to.
Returns
-------
to : any
``to`` with the docstring from ``from_``
"""
to.__doc__ = from_.__doc__
return to
| bsd-3-clause |
gnrfan/chichafortunes | fortunes/views.py | 1 | 2195 | #-*- coding: utf-8 -*-
# Create your views here.
from django.http import HttpResponse, HttpResponseRedirect
from django.views.generic.simple import direct_to_template
from django.core.urlresolvers import reverse
from django.db import connection, transaction
from django.shortcuts import get_object_or_404
from helpers.shortcuts import set_message
from forms import FortuneForm
from models import Fortune
import strings
def index(request, template='fortunes/index.html'):
"""Main view of fortunes app"""
form = FortuneForm()
fortune = None
if request.method == 'POST':
form = FortuneForm(data=request.POST)
if form.is_valid():
remote_addr = request.META.get('REMOTE_ADDR', None)
fortune = form.save(remote_addr=remote_addr)
set_message(strings.FORTUNE_CREATED_MSG, request)
return HttpResponseRedirect(reverse('homepage'))
random_fortune = Fortune.objects.random(exclude=fortune)
return direct_to_template(
request,
template,
{'form': form,
'random_fortune': random_fortune
}
)
def fortune_detail(request, url_id, format='text', template='fortunes/fortune_detail.html'):
"""Renders fortune in detail"""
fortune = get_object_or_404(Fortune, url_id=url_id, accepted=True, moderated=True)
if format == 'text':
return HttpResponse(fortune.as_text(), content_type='text/plain; charset="utf-8"')
else:
return direct_to_template(
request,
template,
{'fortune': fortune}
)
def fortunes_as_text(request):
"""Renders fortunes as text file"""
fortunes = Fortune.objects.accepted()
return HttpResponse('\n'.join([f.as_text() for f in fortunes]),
content_type='text/plain; charset="utf-8"')
def fortunes_as_html(request, template='fortunes/fortunes.html'):
"""Renders fortunes as HTML file"""
fortunes = Fortune.objects.accepted()
return direct_to_template(
request,
template,
{'fortunes': fortunes}
)
| bsd-3-clause |
endlessm/chromium-browser | content/test/gpu/gpu_tests/pixel_integration_test.py | 1 | 11760 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import sys
import time
from gpu_tests import gpu_integration_test
from gpu_tests import pixel_test_pages
from gpu_tests import skia_gold_integration_test_base
from telemetry.util import image_util
test_harness_script = r"""
var domAutomationController = {};
domAutomationController._proceed = false;
domAutomationController._readyForActions = false;
domAutomationController._succeeded = undefined;
domAutomationController._finished = false;
domAutomationController._originalLog = window.console.log;
domAutomationController._messages = '';
domAutomationController.log = function(msg) {
domAutomationController._messages += msg + "\n";
domAutomationController._originalLog.apply(window.console, [msg]);
}
domAutomationController.send = function(msg) {
domAutomationController._proceed = true;
let lmsg = msg.toLowerCase();
if (lmsg == "ready") {
domAutomationController._readyForActions = true;
} else {
domAutomationController._finished = true;
// Do not squelch any previous failures. Show any new ones.
if (domAutomationController._succeeded === undefined ||
domAutomationController._succeeded)
domAutomationController._succeeded = (lmsg == "success");
}
}
window.domAutomationController = domAutomationController;
"""
class PixelIntegrationTest(
skia_gold_integration_test_base.SkiaGoldIntegrationTestBase):
"""GPU pixel tests backed by Skia Gold and Telemetry."""
test_base_name = 'Pixel'
@classmethod
def Name(cls):
"""The name by which this test is invoked on the command line."""
return 'pixel'
@classmethod
def GenerateGpuTests(cls, options):
cls.SetParsedCommandLineOptions(options)
namespace = pixel_test_pages.PixelTestPages
pages = namespace.DefaultPages(cls.test_base_name)
pages += namespace.GpuRasterizationPages(cls.test_base_name)
pages += namespace.ExperimentalCanvasFeaturesPages(cls.test_base_name)
pages += namespace.PaintWorkletPages(cls.test_base_name)
# pages += namespace.NoGpuProcessPages(cls.test_base_name)
# The following pages should run only on platforms where SwiftShader is
# enabled. They are skipped on other platforms through test expectations.
# pages += namespace.SwiftShaderPages(cls.test_base_name)
if sys.platform.startswith('darwin'):
pages += namespace.MacSpecificPages(cls.test_base_name)
# Unfortunately we don't have a browser instance here so can't tell
# whether we should really run these tests. They're short-circuited to a
# certain degree on the other platforms.
pages += namespace.DualGPUMacSpecificPages(cls.test_base_name)
if sys.platform.startswith('win'):
pages += namespace.DirectCompositionPages(cls.test_base_name)
pages += namespace.LowLatencySwapChainPages(cls.test_base_name)
pages += namespace.HdrTestPages(cls.test_base_name)
for p in pages:
yield (p.name, skia_gold_integration_test_base.GPU_RELATIVE_PATH + p.url,
(p))
def RunActualGpuTest(self, test_path, *args):
page = args[0]
# Some pixel tests require non-standard browser arguments. Need to
# check before running each page that it can run in the current
# browser instance.
self.RestartBrowserIfNecessaryWithArgs(
self._AddDefaultArgs(page.browser_args))
url = self.UrlOfStaticFilePath(test_path)
# This property actually comes off the class, not 'self'.
tab = self.tab
tab.Navigate(url, script_to_evaluate_on_commit=test_harness_script)
tab.action_runner.WaitForJavaScriptCondition(
'domAutomationController._proceed', timeout=300)
do_page_action = tab.EvaluateJavaScript(
'domAutomationController._readyForActions')
try:
if do_page_action:
# The page action may itself signal test failure via self.fail().
self._DoPageAction(tab, page)
self._RunSkiaGoldBasedPixelTest(page)
finally:
test_messages = _TestHarnessMessages(tab)
if test_messages:
logging.info('Logging messages from the test:\n' + test_messages)
if do_page_action or page.restart_browser_after_test:
self._RestartBrowser(
'Must restart after page actions or if required by test')
if do_page_action and self._IsDualGPUMacLaptop():
# Give the system a few seconds to reliably indicate that the
# low-power GPU is active again, to avoid race conditions if the next
# test makes assertions about the active GPU.
time.sleep(4)
def GetExpectedCrashes(self, args):
"""Returns which crashes, per process type, to expect for the current test.
Args:
args: The list passed to _RunGpuTest()
Returns:
A dictionary mapping crash types as strings to the number of expected
crashes of that type. Examples include 'gpu' for the GPU process,
'renderer' for the renderer process, and 'browser' for the browser
process.
"""
# args[0] is the PixelTestPage for the current test.
return args[0].expected_per_process_crashes
def _RunSkiaGoldBasedPixelTest(self, page):
"""Captures and compares a test image using Skia Gold.
Raises an Exception if the comparison fails.
Args:
page: the GPU PixelTestPage object for the test.
"""
tab = self.tab
# Actually run the test and capture the screenshot.
if not tab.EvaluateJavaScript('domAutomationController._succeeded'):
self.fail('page indicated test failure')
screenshot = tab.Screenshot(5)
if screenshot is None:
self.fail('Could not capture screenshot')
dpr = tab.EvaluateJavaScript('window.devicePixelRatio')
if page.test_rect:
screenshot = image_util.Crop(screenshot, int(page.test_rect[0] * dpr),
int(page.test_rect[1] * dpr),
int(page.test_rect[2] * dpr),
int(page.test_rect[3] * dpr))
build_id_args = self._GetBuildIdArgs()
# Compare images against approved images/colors.
if page.expected_colors:
# Use expected colors instead of hash comparison for validation.
self._ValidateScreenshotSamplesWithSkiaGold(tab, page, screenshot, dpr,
build_id_args)
return
image_name = self._UrlToImageName(page.name)
self._UploadTestResultToSkiaGold(
image_name, screenshot, page, build_id_args=build_id_args)
def _DoPageAction(self, tab, page):
getattr(self, '_' + page.optional_action)(tab, page)
# Now that we've done the page's specific action, wait for it to
# report completion.
tab.action_runner.WaitForJavaScriptCondition(
'domAutomationController._finished', timeout=300)
def _AssertLowPowerGPU(self):
if self._IsDualGPUMacLaptop():
if not self._IsIntelGPUActive():
self.fail('Low power GPU should have been active but wasn\'t')
def _AssertHighPerformanceGPU(self):
if self._IsDualGPUMacLaptop():
if self._IsIntelGPUActive():
self.fail('High performance GPU should have been active but wasn\'t')
#
# Optional actions pages can take.
# These are specified as methods taking the tab and the page as
# arguments.
#
def _CrashGpuProcess(self, tab, page): # pylint: disable=no-self-use
# Crash the GPU process.
#
# This used to create a new tab and navigate it to
# chrome://gpucrash, but there was enough unreliability
# navigating between these tabs (one of which was created solely
# in order to navigate to chrome://gpucrash) that the simpler
# solution of provoking the GPU process crash from this renderer
# process was chosen.
del page # Unused in this particular action.
tab.EvaluateJavaScript('chrome.gpuBenchmarking.crashGpuProcess()')
def _SwitchTabs(self, tab, page):
del page # Unused in this particular action.
if not tab.browser.supports_tab_control:
self.fail('Browser must support tab control')
dummy_tab = tab.browser.tabs.New()
dummy_tab.Activate()
# Wait for 2 seconds so that new tab becomes visible.
dummy_tab.action_runner.Wait(2)
tab.Activate()
def _RunTestWithHighPerformanceTab(self, tab, page):
del page # Unused in this particular action.
if not self._IsDualGPUMacLaptop():
# Short-circuit this test.
logging.info('Short-circuiting test because not running on dual-GPU Mac '
'laptop')
tab.EvaluateJavaScript('initialize(false)')
tab.action_runner.WaitForJavaScriptCondition(
'domAutomationController._readyForActions', timeout=30)
tab.EvaluateJavaScript('runToCompletion()')
return
# Reset the ready state of the harness.
tab.EvaluateJavaScript('domAutomationController._readyForActions = false')
high_performance_tab = tab.browser.tabs.New()
high_performance_tab.Navigate(
self.
UrlOfStaticFilePath(skia_gold_integration_test_base.GPU_RELATIVE_PATH +
'functional_webgl_high_performance.html'),
script_to_evaluate_on_commit=test_harness_script)
high_performance_tab.action_runner.WaitForJavaScriptCondition(
'domAutomationController._finished', timeout=30)
# Wait a few seconds for the GPU switched notification to propagate
# throughout the system.
time.sleep(5)
# Switch back to the main tab and quickly start its rendering, while the
# high-power GPU is still active.
tab.Activate()
tab.EvaluateJavaScript('initialize(true)')
tab.action_runner.WaitForJavaScriptCondition(
'domAutomationController._readyForActions', timeout=30)
# Close the high-performance tab.
high_performance_tab.Close()
# Wait for ~15 seconds for the system to switch back to the
# integrated GPU.
time.sleep(15)
# Run the page to completion.
tab.EvaluateJavaScript('runToCompletion()')
def _RunLowToHighPowerTest(self, tab, page):
del page # Unused in this particular action.
is_dual_gpu = self._IsDualGPUMacLaptop()
tab.EvaluateJavaScript('initialize(' +
('true' if is_dual_gpu else 'false') + ')')
# The harness above will take care of waiting for the test to
# complete with either a success or failure.
def _RunOffscreenCanvasIBRCWebGLTest(self, tab, page):
del page # Unused in this particular action.
self._AssertLowPowerGPU()
tab.EvaluateJavaScript('setup()')
# Wait a few seconds for any (incorrect) GPU switched
# notifications to propagate throughout the system.
time.sleep(5)
self._AssertLowPowerGPU()
tab.EvaluateJavaScript('render()')
def _RunOffscreenCanvasIBRCWebGLHighPerfTest(self, tab, page):
del page # Unused in this particular action.
self._AssertLowPowerGPU()
tab.EvaluateJavaScript('setup(true)')
# Wait a few seconds for any (incorrect) GPU switched
# notifications to propagate throughout the system.
time.sleep(5)
self._AssertHighPerformanceGPU()
tab.EvaluateJavaScript('render()')
@classmethod
def ExpectationsFiles(cls):
return [
os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'test_expectations',
'pixel_expectations.txt')
]
def _TestHarnessMessages(tab):
return tab.EvaluateJavaScript('domAutomationController._messages')
def load_tests(loader, tests, pattern):
del loader, tests, pattern # Unused.
return gpu_integration_test.LoadAllTestsInModule(sys.modules[__name__])
| bsd-3-clause |
glwu/python-for-android | python3-alpha/python3-src/Lib/unittest/util.py | 794 | 4157 | """Various utility functions."""
from collections import namedtuple, OrderedDict
__unittest = True
_MAX_LENGTH = 80
def safe_repr(obj, short=False):
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < _MAX_LENGTH:
return result
return result[:_MAX_LENGTH] + ' [truncated]...'
def strclass(cls):
return "%s.%s" % (cls.__module__, cls.__name__)
def sorted_list_difference(expected, actual):
"""Finds elements in only one or the other of two, sorted input lists.
Returns a two-element tuple of lists. The first list contains those
elements in the "expected" list but not in the "actual" list, and the
second contains those elements in the "actual" list but not in the
"expected" list. Duplicate elements in either input list are ignored.
"""
i = j = 0
missing = []
unexpected = []
while True:
try:
e = expected[i]
a = actual[j]
if e < a:
missing.append(e)
i += 1
while expected[i] == e:
i += 1
elif e > a:
unexpected.append(a)
j += 1
while actual[j] == a:
j += 1
else:
i += 1
try:
while expected[i] == e:
i += 1
finally:
j += 1
while actual[j] == a:
j += 1
except IndexError:
missing.extend(expected[i:])
unexpected.extend(actual[j:])
break
return missing, unexpected
def unorderable_list_difference(expected, actual):
"""Same behavior as sorted_list_difference but
for lists of unorderable items (like dicts).
As it does a linear search per item (remove) it
has O(n*n) performance."""
missing = []
while expected:
item = expected.pop()
try:
actual.remove(item)
except ValueError:
missing.append(item)
# anything left in actual is unexpected
return missing, actual
def three_way_cmp(x, y):
"""Return -1 if x < y, 0 if x == y and 1 if x > y"""
return (x > y) - (x < y)
_Mismatch = namedtuple('Mismatch', 'actual expected value')
def _count_diff_all_purpose(actual, expected):
'Returns list of (cnt_act, cnt_exp, elem) triples where the counts differ'
# elements need not be hashable
s, t = list(actual), list(expected)
m, n = len(s), len(t)
NULL = object()
result = []
for i, elem in enumerate(s):
if elem is NULL:
continue
cnt_s = cnt_t = 0
for j in range(i, m):
if s[j] == elem:
cnt_s += 1
s[j] = NULL
for j, other_elem in enumerate(t):
if other_elem == elem:
cnt_t += 1
t[j] = NULL
if cnt_s != cnt_t:
diff = _Mismatch(cnt_s, cnt_t, elem)
result.append(diff)
for i, elem in enumerate(t):
if elem is NULL:
continue
cnt_t = 0
for j in range(i, n):
if t[j] == elem:
cnt_t += 1
t[j] = NULL
diff = _Mismatch(0, cnt_t, elem)
result.append(diff)
return result
def _ordered_count(iterable):
'Return dict of element counts, in the order they were first seen'
c = OrderedDict()
for elem in iterable:
c[elem] = c.get(elem, 0) + 1
return c
def _count_diff_hashable(actual, expected):
'Returns list of (cnt_act, cnt_exp, elem) triples where the counts differ'
# elements must be hashable
s, t = _ordered_count(actual), _ordered_count(expected)
result = []
for elem, cnt_s in s.items():
cnt_t = t.get(elem, 0)
if cnt_s != cnt_t:
diff = _Mismatch(cnt_s, cnt_t, elem)
result.append(diff)
for elem, cnt_t in t.items():
if elem not in s:
diff = _Mismatch(0, cnt_t, elem)
result.append(diff)
return result
| apache-2.0 |
cjqian/incubator-airflow | tests/contrib/operators/test_emr_terminate_job_flow_operator.py | 56 | 1676 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from mock import MagicMock, patch
from airflow import configuration
from airflow.contrib.operators.emr_terminate_job_flow_operator import EmrTerminateJobFlowOperator
TERMINATE_SUCCESS_RETURN = {
'ResponseMetadata': {
'HTTPStatusCode': 200
}
}
class TestEmrTerminateJobFlowOperator(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
# Mock out the emr_client (moto has incorrect response)
mock_emr_client = MagicMock()
mock_emr_client.terminate_job_flows.return_value = TERMINATE_SUCCESS_RETURN
# Mock out the emr_client creator
self.boto3_client_mock = MagicMock(return_value=mock_emr_client)
def test_execute_terminates_the_job_flow_and_does_not_error(self):
with patch('boto3.client', self.boto3_client_mock):
operator = EmrTerminateJobFlowOperator(
task_id='test_task',
job_flow_id='j-8989898989',
aws_conn_id='aws_default'
)
operator.execute(None)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
GodBlessPP/W17test_2nd_1 | static/Brython3.1.1-20150328-091302/Lib/ui/widget.py | 706 | 1774 | import random
from browser import doc
def getMousePosition(e):
if e is None:
e=win.event
if e.pageX or e.pageY:
return {'x': e.pageX, 'y': e.pageY}
if e.clientX or e.clientY:
_posx=e.clientX + doc.body.scrollLeft + doc.documentElement.scrollLeft;
_posy=e.clientY + doc.body.scrollTop + doc.documentElement.scrollTop;
return {'x': _posx, 'y': _posy}
return {'x': 0, 'y': 0}
class Widget:
def __init__(self, element, type, id=None):
self._element=element
if id is None:
self._element.id='%s_%s' % (type, int(100000*random.random()))
else:
self._element.id=id
def get_id(self):
return self._element.id
def attach(self, element_id):
""" append this DOM component to DOM element element_id"""
#document[element_id] <= self._element #this doesn't work :(
#doc is actually the global 'doc' not the one we imported from browser :(
doc[element_id] <= self._element
def show(self):
self._element.display='block'
def hide(self):
self._element.display='none'
class DraggableWidget(Widget):
def __init__(self, element, type, id=None):
Widget.__init__(self, element, type, id)
def drag(e):
self._element.style.top='%spx' % (e.clientY - self._deltaY)
self._element.style.left='%spx' % (e.clientX - self._deltaX)
def mouseDown(e):
self._element.style.position='absolute'
self._deltaX=e.clientX - self._element.offsetLeft
self._deltaY=e.clientY - self._element.offsetTop
doc.bind('mousemove', drag)
def mouseUp(e):
doc.unbind('mousemove')
self._element.bind('mousedown', mouseDown)
self._element.bind('mouseup', mouseUp)
| gpl-3.0 |
firepick1/jansson | doc/ext/refcounting.py | 17 | 1715 | """
refcounting
~~~~~~~~~~~
Reference count annotations for C API functions. Has the same
result as the sphinx.ext.refcounting extension but works for all
functions regardless of the signature, and the reference counting
information is written inline with the documentation instead of a
separate file.
Adds a new directive "refcounting". The directive has no content
and one required positional parameter:: "new" or "borrow".
Example:
.. cfunction:: json_t *json_object(void)
.. refcounting:: new
<description of the json_object function>
:copyright: Copyright (c) 2009-2014 Petri Lehtinen <petri@digip.org>
:license: MIT, see LICENSE for details.
"""
from docutils import nodes
class refcounting(nodes.emphasis): pass
def visit(self, node):
self.visit_emphasis(node)
def depart(self, node):
self.depart_emphasis(node)
def html_visit(self, node):
self.body.append(self.starttag(node, 'em', '', CLASS='refcount'))
def html_depart(self, node):
self.body.append('</em>')
def refcounting_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
if arguments[0] == 'borrow':
text = 'Return value: Borrowed reference.'
elif arguments[0] == 'new':
text = 'Return value: New reference.'
else:
raise Error('Valid arguments: new, borrow')
return [refcounting(text, text)]
def setup(app):
app.add_node(refcounting,
html=(html_visit, html_depart),
latex=(visit, depart),
text=(visit, depart))
app.add_directive('refcounting', refcounting_directive, 0, (1, 0, 0))
| mit |
OPM/ResInsight | ThirdParty/Ert/python/ecl/well/well_segment.py | 2 | 2422 | from cwrap import BaseCClass
from ecl import EclPrototype
class WellSegment(BaseCClass):
TYPE_NAME = "well_segment"
_active = EclPrototype("bool well_segment_active(well_segment)")
_main_stem = EclPrototype("bool well_segment_main_stem(well_segment)")
_nearest_wellhead = EclPrototype("bool well_segment_nearest_wellhead(well_segment)")
_id = EclPrototype("int well_segment_get_id(well_segment)")
_link_count = EclPrototype("int well_segment_get_link_count(well_segment)")
_branch_id = EclPrototype("int well_segment_get_branch_id(well_segment)")
_outlet_id = EclPrototype("int well_segment_get_outlet_id(well_segment)")
_depth = EclPrototype("double well_segment_get_depth(well_segment)")
_length = EclPrototype("double well_segment_get_length(well_segment)")
_total_length = EclPrototype("double well_segment_get_total_length(well_segment)")
_diameter = EclPrototype("double well_segment_get_diameter(well_segment)")
def __init__(self):
raise NotImplementedError("Class can not be instantiated directly")
def free(self):
pass
def __repr__(self):
return 'WellSegment(%s) at 0x%x' % (str(self), self._address())
def __str__(self):
return "{Segment ID:%d BranchID:%d Length:%g}" % (self.id() , self.branchId() , self.length())
def id(self):
""" @rtype: int """
return self._id()
def linkCount(self):
""" @rtype: int """
return self._link_count()
def branchId(self):
""" @rtype: int """
return self._branch_id()
def outletId(self):
""" @rtype: int """
return self._outlet_id()
def isActive(self):
""" @rtype: bool """
return self._active()
def isMainStem(self):
""" @rtype: bool """
return self._main_stem()
def isNearestWellHead(self):
""" @rtype: bool """
return self._nearest_wellhead()
def depth(self):
""" @rtype: float """
return self._depth()
def __len__(self):
return self.length()
def length(self):
""" @rtype: float """
return self._length()
def totalLength(self):
""" @rtype: float """
return self._total_length()
def diameter(self):
""" @rtype: float """
return self._diameter()
| gpl-3.0 |
coder-han/hugula | Client/tools/site-packages/pyExcelerator/Row.py | 15 | 8282 | #!/usr/bin/env python
# -*- coding: windows-1251 -*-
# Copyright (C) 2005 Roman V. Kiseliov
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# 3. All advertising materials mentioning features or use of this
# software must display the following acknowledgment:
# "This product includes software developed by
# Roman V. Kiseliov <roman@kiseliov.ru>."
#
# 4. Redistributions of any form whatsoever must retain the following
# acknowledgment:
# "This product includes software developed by
# Roman V. Kiseliov <roman@kiseliov.ru>."
#
# THIS SOFTWARE IS PROVIDED BY Roman V. Kiseliov ``AS IS'' AND ANY
# EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Roman V. Kiseliov OR
# ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
__rev_id__ = """$Id: Row.py,v 1.6 2005/08/11 08:53:48 rvk Exp $"""
import BIFFRecords
from Deco import *
from Worksheet import Worksheet
import Style
import Cell
import ExcelFormula
import datetime as dt
class Row(object):
__slots__ = ["__init__",
"__adjust_height",
"__adjust_bound_col_idx",
"__excel_date_dt",
"get_height_in_pixels",
"set_style",
"get_xf_index",
"get_cells_count",
"get_min_col",
"get_max_col",
"get_str_count",
"get_row_biff_data",
"get_cells_biff_data",
"get_index",
"write",
"write_blanks",
# private variables
"__idx",
"__parent",
"__parent_wb",
"__cells",
"__min_col_idx",
"__max_col_idx",
"__total_str",
"__xf_index",
"__has_default_format",
"__height_in_pixels",
# public variables
"height",
"has_default_height",
"level",
"collapse",
"hidden",
"space_above",
"space_below"]
#################################################################
## Constructor
#################################################################
def __init__(self, index, parent_sheet):
self.__idx = index
self.__parent = parent_sheet
self.__parent_wb = parent_sheet.get_parent()
self.__cells = []
self.__min_col_idx = 0
self.__max_col_idx = 0
self.__total_str = 0
self.__xf_index = 0x0F
self.__has_default_format = 0
self.__height_in_pixels = 0x11
self.height = 0x00FF
self.has_default_height = 0x00
self.level = 0
self.collapse = 0
self.hidden = 0
self.space_above = 0
self.space_below = 0
def __adjust_height(self, style):
twips = style.font.height
points = float(twips)/20.0
# Cell height in pixels can be calcuted by following approx. formula:
# cell height in pixels = font height in points * 83/50 + 2/5
# It works when screen resolution is 96 dpi
pix = int(round(points*83.0/50.0 + 2.0/5.0))
if pix > self.__height_in_pixels:
self.__height_in_pixels = pix
def __adjust_bound_col_idx(self, *args):
for arg in args:
if arg < self.__min_col_idx:
self.__min_col_idx = arg
elif arg > self.__max_col_idx:
self.__max_col_idx = arg
def __excel_date_dt(self, date):
if isinstance(date, dt.date) and (not isinstance(date, dt.datetime)):
epoch = dt.date(1899, 12, 31)
elif isinstance(date, dt.time):
date = dt.datetime.combine(dt.datetime(1900, 1, 1), date)
epoch = dt.datetime(1900, 1, 1, 0, 0, 0)
else:
epoch = dt.datetime(1899, 12, 31, 0, 0, 0)
delta = date - epoch
xldate = delta.days + float(delta.seconds) / (24*60*60)
# Add a day for Excel's missing leap day in 1900
if xldate > 59:
xldate += 1
return xldate
def get_height_in_pixels(self):
return self.__height_in_pixels
@accepts(object, Style.XFStyle)
def set_style(self, style):
self.__adjust_height(style)
self.__xf_index = self.__parent_wb.add_style(style)
def get_xf_index(self):
return self.__xf_index
def get_cells_count(self):
return len(self.__cells)
def get_min_col(self):
return self.__min_col_idx
def get_max_col(self):
return self.__min_col_idx
def get_str_count(self):
return self.__total_str
def get_row_biff_data(self):
height_options = (self.height & 0x07FFF)
height_options |= (self.has_default_height & 0x01) << 15
options = (self.level & 0x07) << 0
options |= (self.collapse & 0x01) << 4
options |= (self.hidden & 0x01) << 5
options |= (0x00 & 0x01) << 6
options |= (0x01 & 0x01) << 8
if self.__xf_index != 0x0F:
options |= (0x01 & 0x01) << 7
else:
options |= (0x00 & 0x01) << 7
options |= (self.__xf_index & 0x0FFF) << 16
options |= (0x00 & self.space_above) << 28
options |= (0x00 & self.space_below) << 29
return BIFFRecords.RowRecord(self.__idx, self.__min_col_idx, self.__max_col_idx, height_options, options).get()
def get_cells_biff_data(self):
return ''.join([ cell.get_biff_data() for cell in self.__cells ])
def get_index(self):
return self.__idx
@accepts(object, int, (str, unicode, int, float, dt.datetime, dt.time, dt.date, ExcelFormula.Formula), Style.XFStyle)
def write(self, col, label, style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(col)
if isinstance(label, (str, unicode)):
if len(label) > 0:
self.__cells.extend([ Cell.StrCell(self, col, self.__parent_wb.add_style(style), self.__parent_wb.add_str(label)) ])
self.__total_str += 1
else:
self.__cells.extend([ Cell.BlankCell(self, col, self.__parent_wb.add_style(style)) ])
elif isinstance(label, (int, float)):
self.__cells.extend([ Cell.NumberCell(self, col, self.__parent_wb.add_style(style), label) ])
elif isinstance(label, (dt.datetime, dt.time)):
self.__cells.extend([ Cell.NumberCell(self, col, self.__parent_wb.add_style(style), self.__excel_date_dt(label)) ])
else:
self.__cells.extend([ Cell.FormulaCell(self, col, self.__parent_wb.add_style(style), label) ])
@accepts(object, int, int, Style.XFStyle)
def write_blanks(self, c1, c2, style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(c1, c2)
self.__cells.extend([ Cell.MulBlankCell(self, c1, c2, self.__parent_wb.add_style(style)) ])
| mit |
tjcsl/ion | intranet/apps/eighth/management/commands/update_counselors.py | 1 | 2090 | #!/usr/bin/env python3
import csv
import sys
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Update counselor information"
def add_arguments(self, parser):
parser.add_argument("filename", type=str, help="Path to SIS import CSV with a Student ID and Counselor column")
parser.add_argument("--run", action="store_true", dest="run", default=False, help="Actually modifies the DB")
def handle(self, *args, **kwargs):
data = []
# We assume that the provided file has up-to-date information.
# DO NOT RUN IF YOU DON'T HAVE UP-TO-DATE INFORMATION
filename = kwargs["filename"]
to_run = kwargs["run"]
if not to_run:
sys.stdout.write("This script is running in pretend mode.\n")
sys.stdout.write("Pass --run to actually run this script.\n")
sys.stdout.write("Please MAKE SURE you have updated info before running this script.\n")
sys.stdout.write("Actually running is a destructive operation.\n")
with open(filename) as f:
contents = csv.DictReader(f)
data = list(contents)
counselors = get_user_model().objects.filter(user_type="counselor")
for row in data:
sid = row["Student ID"].strip()
# We assume that every single counselor has a unique last name
# If this is not true, please edit this file
counselor = row["Counselor"].split(",")[0].strip()
counselor = counselors.get(last_name=counselor)
u = get_user_model().objects.user_with_student_id(sid)
if u is None:
sys.stdout.write("There is no Ion account found for SID {}\n".format(sid))
continue
if counselor != u.counselor:
sys.stdout.write("Switching counselor for SID {} from {} to {}\n".format(sid, u.counselor, counselor))
if to_run:
u.counselor = counselor
u.save()
| gpl-2.0 |
shanot/imp | modules/multifit/test/test_surface_sample_protein.py | 2 | 1150 | import IMP
import IMP.test
import sys
import IMP.em
import IMP.multifit
import os
class Tests(IMP.test.TestCase):
"""Tests for sampled density maps"""
def setUp(self):
"""initialize IMP environment create particles"""
IMP.test.TestCase.setUp(self)
# init IMP model ( the environment)
self.mdl = IMP.Model()
self.mh = IMP.atom.read_pdb(self.get_input_file_name("1z5s_A.pdb"),
self.mdl, IMP.atom.CAlphaPDBSelector())
IMP.atom.add_radii(self.mh)
self.particles = IMP.core.get_leaves(self.mh)
def test_add_surface_attribute(self):
"""Check that reading a map back in preserves the stdevs"""
IMP.set_log_level(IMP.VERBOSE)
voxel_size = 1.
IMP.multifit.add_surface_index(self.mh, voxel_size)
shell_key = IMP.FloatKey("surf_ind")
for p in self.particles:
# print p.get_particle().get_value(shell_key)
self.assertGreater(p.get_particle().get_value(shell_key), 3.,
"map was not sampled correctly")
if __name__ == '__main__':
IMP.test.main()
| gpl-3.0 |
milchakov/omim | tools/python/maps_generator/utils/file.py | 2 | 5544 | import errno
import functools
import glob
import logging
import os
import shutil
from functools import partial
from multiprocessing.pool import ThreadPool
from typing import AnyStr
from typing import Dict
from typing import List
from typing import Optional
from urllib.parse import unquote
from urllib.parse import urljoin
from urllib.parse import urlparse
import requests
from bs4 import BeautifulSoup
from requests_file import FileAdapter
from maps_generator.utils.md5 import check_md5
from maps_generator.utils.md5 import md5_ext
logger = logging.getLogger("maps_generator")
def is_executable(fpath: AnyStr) -> bool:
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
@functools.lru_cache()
def find_executable(path: AnyStr, exe: Optional[AnyStr] = None) -> AnyStr:
if exe is None:
if is_executable(path):
return path
else:
raise FileNotFoundError(path)
find_pattern = f"{path}/**/{exe}"
for name in glob.iglob(find_pattern, recursive=True):
if is_executable(name):
return name
raise FileNotFoundError(f"{exe} not found in {path}")
def download_file(url: AnyStr, name: AnyStr, download_if_exists: bool = True):
logger.info(f"Trying to download {name} from {url}.")
if not download_if_exists and os.path.exists(name):
logger.info(f"File {name} already exists.")
return
tmp_name = f"{name}__"
os.makedirs(os.path.dirname(tmp_name), exist_ok=True)
with requests.Session() as session:
session.mount("file://", FileAdapter())
with open(tmp_name, "wb") as handle:
response = session.get(url, stream=True)
file_length = int(response.headers["Content-Length"])
current = 0
max_attempts = 32
attempts = max_attempts
while attempts:
for data in response.iter_content(chunk_size=4096):
current += len(data)
handle.write(data)
if file_length == current:
break
logger.warning(
f"Download interrupted. Resuming download from {url}: {current}/{file_length}."
)
headers = {"Range": f"bytes={current}-"}
response = session.get(url, headers=headers, stream=True)
attempts -= 1
assert (
attempts > 0
), f"Maximum failed resuming download attempts of {max_attempts} is exceeded."
shutil.move(tmp_name, name)
logger.info(f"File {name} was downloaded from {url}.")
def is_dir(url) -> bool:
return url.endswith("/")
def find_files(url) -> List[AnyStr]:
def files_list_file_scheme(path, results=None):
if results is None:
results = []
for p in os.listdir(path):
new_path = os.path.join(path, p)
if os.path.isdir(new_path):
files_list_file_scheme(new_path, results)
else:
results.append(new_path)
return results
def files_list_http_scheme(url, results=None):
if results is None:
results = []
page = requests.get(url).content
bs = BeautifulSoup(page, "html.parser")
links = bs.findAll("a", href=True)
for link in links:
href = link["href"]
if href == "./" or href == "../":
continue
new_url = urljoin(url, href)
if is_dir(new_url):
files_list_http_scheme(new_url, results)
else:
results.append(new_url)
return results
parse_result = urlparse(url)
if parse_result.scheme == "file":
return [
f.replace(parse_result.path, "")
for f in files_list_file_scheme(parse_result.path)
]
if parse_result.scheme == "http" or parse_result.scheme == "https":
return [f.replace(url, "") for f in files_list_http_scheme(url)]
assert False, parse_result
def normalize_url_to_path_dict(
url_to_path: Dict[AnyStr, AnyStr]
) -> Dict[AnyStr, AnyStr]:
for url in list(url_to_path.keys()):
if is_dir(url):
path = url_to_path[url]
del url_to_path[url]
for rel_path in find_files(url):
abs_url = urljoin(url, rel_path)
url_to_path[abs_url] = unquote(os.path.join(path, rel_path))
return url_to_path
def download_files(url_to_path: Dict[AnyStr, AnyStr], download_if_exists: bool = True):
with ThreadPool() as pool:
pool.starmap(
partial(download_file, download_if_exists=download_if_exists),
url_to_path.items(),
)
def is_exists_file_and_md5(name: AnyStr) -> bool:
return os.path.isfile(name) and os.path.isfile(md5_ext(name))
def is_verified(name: AnyStr) -> bool:
return is_exists_file_and_md5(name) and check_md5(name, md5_ext(name))
def copy_overwrite(from_path: AnyStr, to_path: AnyStr):
if os.path.exists(to_path):
shutil.rmtree(to_path)
shutil.copytree(from_path, to_path)
def make_symlink(target: AnyStr, link_name: AnyStr):
try:
os.symlink(target, link_name)
except OSError as e:
if e.errno == errno.EEXIST:
if os.path.islink(link_name):
link = os.readlink(link_name)
if os.path.abspath(target) != os.path.abspath(link):
raise e
else:
raise e
else:
raise e
| apache-2.0 |
grodrigues3/test-infra | scenarios/kubernetes_verify.py | 5 | 3718 | #!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Need to figure out why this only fails on travis
# pylint: disable=bad-continuation
"""Runs verify/test-go checks for kubernetes/kubernetes."""
import argparse
import os
import re
import subprocess
import sys
BRANCH_VERSION = {
'1.2': '1.4',
'1.3': '1.4',
'master': '1.7',
}
VERSION_TAG = {
'1.4': '1.4-v20161130-8958f82',
'1.5': '1.5-v20161205-d664d14',
'1.6': '1.6-v20161205-ad918bc',
'1.7': '1.7-v20170713-c28e0556',
}
def check_output(*cmd):
"""Log and run the command, return output, raising on errors."""
print >>sys.stderr, 'Run:', cmd
return subprocess.check_output(cmd)
def check(*cmd):
"""Log and run the command, raising on errors."""
print >>sys.stderr, 'Run:', cmd
subprocess.check_call(cmd)
def main(branch, script, force):
"""Test branch using script, optionally forcing verify checks."""
# If branch has 3-part version, only take first 2 parts.
verify_branch = re.match(r'master|release-(\d+\.\d+)', branch)
if not verify_branch:
raise ValueError(branch)
# Extract version if any.
ver = verify_branch.group(1) or verify_branch.group(0)
tag = VERSION_TAG[BRANCH_VERSION.get(ver, ver)]
force = 'y' if force else 'n'
artifacts = '%s/_artifacts' % os.environ['WORKSPACE']
k8s = os.getcwd()
if not os.path.basename(k8s) == 'kubernetes':
raise ValueError(k8s)
check('rm', '-rf', '.gsutil')
remote = 'bootstrap-upstream'
uri = 'https://github.com/kubernetes/kubernetes.git'
current_remotes = check_output('git', 'remote')
if re.search('^%s$' % remote, current_remotes, flags=re.MULTILINE):
check('git', 'remote', 'remove', remote)
check('git', 'remote', 'add', remote, uri)
check('git', 'remote', 'set-url', '--push', remote, 'no_push')
# If .git is cached between runs this data may be stale
check('git', 'fetch', remote)
if not os.path.isdir(artifacts):
os.makedirs(artifacts)
check(
'docker', 'run', '--rm=true', '--privileged=true',
'-v', '/var/run/docker.sock:/var/run/docker.sock',
'-v', '/etc/localtime:/etc/localtime:ro',
'-v', '%s:/go/src/k8s.io/kubernetes' % k8s,
'-v', '%s:/workspace/artifacts' % artifacts,
'-e', 'KUBE_FORCE_VERIFY_CHECKS=%s' % force,
'-e', 'KUBE_VERIFY_GIT_BRANCH=%s' % branch,
'-e', 'REPO_DIR=%s' % k8s, # hack/lib/swagger.sh depends on this
'gcr.io/k8s-testimages/kubekins-test:%s' % tag,
'bash', '-c', 'cd kubernetes && %s' % script,
)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(
'Runs verification checks on the kubernetes repo')
PARSER.add_argument(
'--branch', default='master', help='Upstream target repo')
PARSER.add_argument(
'--force', action='store_true', help='Force all verify checks')
PARSER.add_argument(
'--script',
default='./hack/jenkins/test-dockerized.sh',
help='Script in kubernetes/kubernetes that runs checks')
ARGS = PARSER.parse_args()
main(ARGS.branch, ARGS.script, ARGS.force)
| apache-2.0 |
nghia-huynh/gem5-stable | configs/topologies/Pt2Pt.py | 47 | 2741 | # Copyright (c) 2011 Advanced Micro Devices, Inc.
# 2011 Massachusetts Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Brad Beckmann
# Tushar Krishna
from m5.params import *
from m5.objects import *
from BaseTopology import SimpleTopology
class Pt2Pt(SimpleTopology):
description='Pt2Pt'
def __init__(self, controllers):
self.nodes = controllers
def makeTopology(self, options, network, IntLink, ExtLink, Router):
nodes = self.nodes
# Create an individual router for each controller, and connect all to all.
routers = [Router(router_id=i) for i in range(len(nodes))]
network.routers = routers
ext_links = [ExtLink(link_id=i, ext_node=n, int_node=routers[i])
for (i, n) in enumerate(nodes)]
network.ext_links = ext_links
link_count = len(nodes)
int_links = []
for i in xrange(len(nodes)):
for j in xrange(len(nodes)):
if (i != j):
link_count += 1
int_links.append(IntLink(link_id=link_count,
node_a=routers[i],
node_b=routers[j]))
network.int_links = int_links
| bsd-3-clause |
gwq5210/litlib | thirdparty/sources/boost_1_60_0/tools/build/test/chain.py | 44 | 1155 | #!/usr/bin/python
# Copyright 2003 Dave Abrahams
# Copyright 2002, 2003 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# This tests that :
# 1) the 'make' correctly assigns types to produced targets
# 2) if 'make' creates targets of type CPP, they are correctly used.
import BoostBuild
t = BoostBuild.Tester(use_test_config=False)
# In order to correctly link this app, 'b.cpp', created by a 'make' rule, should
# be compiled.
t.write("jamroot.jam", "import gcc ;")
t.write("jamfile.jam", r'''
import os ;
if [ os.name ] = NT
{
actions create
{
echo int main() {} > $(<)
}
}
else
{
actions create
{
echo "int main() {}" > $(<)
}
}
IMPORT $(__name__) : create : : create ;
exe a : l dummy.cpp ;
# Needs to be a static lib for Windows - main() cannot appear in DLL.
static-lib l : a.cpp b.cpp ;
make b.cpp : : create ;
''')
t.write("a.cpp", "")
t.write("dummy.cpp", "// msvc needs at least one object file\n")
t.run_build_system()
t.expect_addition("bin/$toolset/debug/a.exe")
t.cleanup()
| gpl-3.0 |
HaebinShin/tensorflow | tensorflow/examples/skflow/language_model.py | 14 | 3299 | # encoding: utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import tensorflow as tf
from tensorflow.contrib import learn
### Training data
CORPUS_FILENAME = "europarl-v6.fr-en.en"
MAX_DOC_LENGTH = 10
def training_data(filename):
f = open(filename)
for line in f:
yield line
def iter_docs(docs):
for doc in docs:
n_parts = int(math.ceil(float(len(doc)) / MAX_DOC_LENGTH))
for part in range(n_parts):
offset_begin = part * MAX_DOC_LENGTH
offset_end = offset_begin + MAX_DOC_LENGTH
inp = np.zeros(MAX_DOC_LENGTH, dtype=np.int32)
out = np.zeros(MAX_DOC_LENGTH, dtype=np.int32)
inp[:min(offset_end - offset_begin, len(doc) - offset_begin)] = doc[offset_begin:offset_end]
out[:min(offset_end - offset_begin, len(doc) - offset_begin - 1)] = doc[offset_begin + 1:offset_end + 1]
yield inp, out
def unpack_xy(iter_obj):
X, y = itertools.tee(iter_obj)
return (item[0] for item in X), (item[1] for item in y)
byte_processor = learn.preprocessing.ByteProcessor(
max_document_length=MAX_DOC_LENGTH)
data = training_data(CORPUS_FILENAME)
data = byte_processor.transform(data)
X, y = unpack_xy(iter_docs(data))
### Model
HIDDEN_SIZE = 10
def seq_autoencoder(X, y):
"""Sequence auto-encoder with RNN."""
inputs = learn.ops.one_hot_matrix(X, 256)
in_X, in_y, out_y = learn.ops.seq2seq_inputs(inputs, y, MAX_DOC_LENGTH, MAX_DOC_LENGTH)
encoder_cell = tf.nn.rnn_cell.GRUCell(HIDDEN_SIZE)
decoder_cell = tf.nn.rnn_cell.OutputProjectionWrapper(tf.nn.rnn_cell.GRUCell(HIDDEN_SIZE), 256)
decoding, _, sampling_decoding, _ = learn.ops.rnn_seq2seq(in_X, in_y, encoder_cell, decoder_cell)
return learn.ops.sequence_classifier(decoding, out_y, sampling_decoding)
def get_language_model(hidden_size):
"""Returns a language model with given hidden size."""
def language_model(X, y):
inputs = learn.ops.one_hot_matrix(X, 256)
inputs = tf.unpack(inputs, axis=1)
target = tf.unpack(y, axis=1)
encoder_cell = tf.nn.rnn_cell.OutputProjectionWrapper(tf.nn.rnn_cell.GRUCell(hidden_size),256)
output, _ = tf.nn.rnn(encoder_cell, inputs, dtype=tf.float32)
return learn.ops.sequence_classifier(output, target)
return language_model
### Training model.
estimator = learn.TensorFlowEstimator(model_fn=get_language_model(HIDDEN_SIZE),
n_classes=256, optimizer='Adam',
learning_rate=0.01, steps=1000,
batch_size=64, continue_training=True)
estimator.fit(X, y)
| apache-2.0 |
oceanobservatories/mi-instrument | mi/dataset/parser/test/test_zplsc_c_dcl.py | 5 | 5641 | #!/usr/bin/env python
"""
@package mi.dataset.parser.test.test_zplsc_c_dcl
@file mi-dataset/mi/dataset/parser/test/test_zplsc_c_dcl.py
@author Richard Han (Raytheon), Ronald Ronquillo (Raytheon)
@brief Test code for a zplsc_c_dcl data parser
"""
import os
from nose.plugins.attrib import attr
from mi.core.log import get_logger
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.driver.zplsc_c.dcl.resource import RESOURCE_PATH
from mi.dataset.parser.utilities import particle_to_yml
from mi.dataset.parser.zplsc_c_dcl import ZplscCDclParser
from mi.dataset.test.test_parser import ParserUnitTestCase
log = get_logger()
MODULE_NAME = 'mi.dataset.parser.zplsc_c_dcl'
CLASS_NAME = 'ZplscCInstrumentDataParticle'
PARTICLE_TYPE = 'zplsc_c_instrument'
@attr('UNIT', group='mi')
class ZplscCDclParserUnitTestCase(ParserUnitTestCase):
"""
Zplsc_c_dcl Parser unit test suite
"""
def create_zplsc_c_dcl_parser(self, file_handle):
"""
This function creates a ZplscCDCL parser for recovered data.
"""
return ZplscCDclParser(self.config, file_handle, self.rec_exception_callback)
def file_path(self, filename):
log.debug('resource path = %s, file name = %s', RESOURCE_PATH, filename)
return os.path.join(RESOURCE_PATH, filename)
def create_yml(self, particles, filename):
particle_to_yml(particles, os.path.join(RESOURCE_PATH, filename))
def rec_exception_callback(self, exception):
"""
Call back method to watch what comes in via the exception callback
"""
self.exception_callback_value.append(exception)
self.exceptions_detected += 1
def setUp(self):
ParserUnitTestCase.setUp(self)
self.config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: MODULE_NAME,
DataSetDriverConfigKeys.PARTICLE_CLASS: CLASS_NAME
}
self.exception_callback_value = []
self.exceptions_detected = 0
def test_zplsc_c_dcl_parser(self):
"""
Test Zplsc C DCL parser
Just test that it is able to parse the file and records are generated.
"""
log.debug('===== START TEST ZPLSC_C_DCL Parser =====')
with open(self.file_path('20150406.zplsc.log')) as in_file:
parser = self.create_zplsc_c_dcl_parser(in_file)
# In a single read, get all particles in this file.
result = parser.get_records(15)
self.assertEqual(len(result), 1)
self.assertListEqual(self.exception_callback_value, [])
log.debug('===== END TEST ZPLSC_C_DCL Parser =====')
def test_telem(self):
"""
Read a file and pull out a data particle.
Verify that the results are those we expected.
"""
log.debug('===== START TEST TELEM =====')
with open(self.file_path('20150407.zplsc.log')) as in_file:
parser = self.create_zplsc_c_dcl_parser(in_file)
# In a single read, get all particles for this file.
result = parser.get_records(15)
self.assertEqual(len(result), 15)
self.assert_particles(result, '20150407.zplsc.yml', RESOURCE_PATH)
self.assertListEqual(self.exception_callback_value, [])
log.debug('===== END TEST TELEM =====')
def test_variable_num_of_channels(self):
"""
Read a file and pull out a data particle.
Verify that the results are those we expected.
All test log files usually contain 4 channels with 19 bins each.
This tests a manually edited log file to exercise the logic for handling a variable
number of channels and number of bins.
"""
log.debug('===== START TEST VARIABLE NUM OF CHANNELS =====')
with open(self.file_path('20150407.zplsc_var_channels.log')) as in_file:
parser = self.create_zplsc_c_dcl_parser(in_file)
# In a single read, get all particles for this file.
result = parser.get_records(15)
self.assertEqual(len(result), 15)
self.assert_particles(result, '20150407.zplsc_var_channels.yml', RESOURCE_PATH)
self.assertListEqual(self.exception_callback_value, [])
log.debug('===== END TEST VARIABLE NUM OF CHANNELS =====')
def test_bad_data(self):
"""
Ensure that bad data is skipped when it exists.
See '20150407.zplsc_corrupt.log' file for line by line details of expected errors.
"""
log.debug('===== START TEST BAD DATA =====')
with open(self.file_path('20150407.zplsc_corrupt.log')) as in_file:
parser = self.create_zplsc_c_dcl_parser(in_file)
# In a single read, get all particles for this file.
result = parser.get_records(100)
self.assertEqual(len(result), 1)
self.assertEqual(len(self.exception_callback_value), 6)
for i in range(len(self.exception_callback_value)):
log.debug('Exception: %s', self.exception_callback_value[i])
log.debug('===== END TEST BAD DATA =====')
def test_bug_9692(self):
"""
Test to verify change made works with DCL
timestamps containing seconds >59
"""
with open(self.file_path('20150407A.zplsc.log')) as in_file:
parser = self.create_zplsc_c_dcl_parser(in_file)
# In a single read, get all particles for this file.
result = parser.get_records(5)
self.assertEqual(len(result), 3)
self.assertListEqual(self.exception_callback_value, [])
| bsd-2-clause |
chugunovyar/factoryForBuild | env/lib/python2.7/site-packages/scipy/optimize/_lsq/lsq_linear.py | 40 | 12643 | """Linear least squares with bound constraints on independent variables."""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.linalg import norm, lstsq
from scipy.sparse import issparse, csr_matrix
from scipy.sparse.linalg import LinearOperator, lsmr
from scipy.optimize import OptimizeResult
from .common import in_bounds, compute_grad
from .trf_linear import trf_linear
from .bvls import bvls
def prepare_bounds(bounds, n):
lb, ub = [np.asarray(b, dtype=float) for b in bounds]
if lb.ndim == 0:
lb = np.resize(lb, n)
if ub.ndim == 0:
ub = np.resize(ub, n)
return lb, ub
TERMINATION_MESSAGES = {
-1: "The algorithm was not able to make progress on the last iteration.",
0: "The maximum number of iterations is exceeded.",
1: "The first-order optimality measure is less than `tol`.",
2: "The relative change of the cost function is less than `tol`.",
3: "The unconstrained solution is optimal."
}
def lsq_linear(A, b, bounds=(-np.inf, np.inf), method='trf', tol=1e-10,
lsq_solver=None, lsmr_tol=None, max_iter=None, verbose=0):
r"""Solve a linear least-squares problem with bounds on the variables.
Given a m-by-n design matrix A and a target vector b with m elements,
`lsq_linear` solves the following optimization problem::
minimize 0.5 * ||A x - b||**2
subject to lb <= x <= ub
This optimization problem is convex, hence a found minimum (if iterations
have converged) is guaranteed to be global.
Parameters
----------
A : array_like, sparse matrix of LinearOperator, shape (m, n)
Design matrix. Can be `scipy.sparse.linalg.LinearOperator`.
b : array_like, shape (m,)
Target vector.
bounds : 2-tuple of array_like, optional
Lower and upper bounds on independent variables. Defaults to no bounds.
Each array must have shape (n,) or be a scalar, in the latter
case a bound will be the same for all variables. Use ``np.inf`` with
an appropriate sign to disable bounds on all or some variables.
method : 'trf' or 'bvls', optional
Method to perform minimization.
* 'trf' : Trust Region Reflective algorithm adapted for a linear
least-squares problem. This is an interior-point-like method
and the required number of iterations is weakly correlated with
the number of variables.
* 'bvls' : Bounded-Variable Least-Squares algorithm. This is
an active set method, which requires the number of iterations
comparable to the number of variables. Can't be used when `A` is
sparse or LinearOperator.
Default is 'trf'.
tol : float, optional
Tolerance parameter. The algorithm terminates if a relative change
of the cost function is less than `tol` on the last iteration.
Additionally the first-order optimality measure is considered:
* ``method='trf'`` terminates if the uniform norm of the gradient,
scaled to account for the presence of the bounds, is less than
`tol`.
* ``method='bvls'`` terminates if Karush-Kuhn-Tucker conditions
are satisfied within `tol` tolerance.
lsq_solver : {None, 'exact', 'lsmr'}, optional
Method of solving unbounded least-squares problems throughout
iterations:
* 'exact' : Use dense QR or SVD decomposition approach. Can't be
used when `A` is sparse or LinearOperator.
* 'lsmr' : Use `scipy.sparse.linalg.lsmr` iterative procedure
which requires only matrix-vector product evaluations. Can't
be used with ``method='bvls'``.
If None (default) the solver is chosen based on type of `A`.
lsmr_tol : None, float or 'auto', optional
Tolerance parameters 'atol' and 'btol' for `scipy.sparse.linalg.lsmr`
If None (default), it is set to ``1e-2 * tol``. If 'auto', the
tolerance will be adjusted based on the optimality of the current
iterate, which can speed up the optimization process, but is not always
reliable.
max_iter : None or int, optional
Maximum number of iterations before termination. If None (default), it
is set to 100 for ``method='trf'`` or to the number of variables for
``method='bvls'`` (not counting iterations for 'bvls' initialization).
verbose : {0, 1, 2}, optional
Level of algorithm's verbosity:
* 0 : work silently (default).
* 1 : display a termination report.
* 2 : display progress during iterations.
Returns
-------
OptimizeResult with the following fields defined:
x : ndarray, shape (n,)
Solution found.
cost : float
Value of the cost function at the solution.
fun : ndarray, shape (m,)
Vector of residuals at the solution.
optimality : float
First-order optimality measure. The exact meaning depends on `method`,
refer to the description of `tol` parameter.
active_mask : ndarray of int, shape (n,)
Each component shows whether a corresponding constraint is active
(that is, whether a variable is at the bound):
* 0 : a constraint is not active.
* -1 : a lower bound is active.
* 1 : an upper bound is active.
Might be somewhat arbitrary for the `trf` method as it generates a
sequence of strictly feasible iterates and active_mask is determined
within a tolerance threshold.
nit : int
Number of iterations. Zero if the unconstrained solution is optimal.
status : int
Reason for algorithm termination:
* -1 : the algorithm was not able to make progress on the last
iteration.
* 0 : the maximum number of iterations is exceeded.
* 1 : the first-order optimality measure is less than `tol`.
* 2 : the relative change of the cost function is less than `tol`.
* 3 : the unconstrained solution is optimal.
message : str
Verbal description of the termination reason.
success : bool
True if one of the convergence criteria is satisfied (`status` > 0).
See Also
--------
nnls : Linear least squares with non-negativity constraint.
least_squares : Nonlinear least squares with bounds on the variables.
Notes
-----
The algorithm first computes the unconstrained least-squares solution by
`numpy.linalg.lstsq` or `scipy.sparse.linalg.lsmr` depending on
`lsq_solver`. This solution is returned as optimal if it lies within the
bounds.
Method 'trf' runs the adaptation of the algorithm described in [STIR]_ for
a linear least-squares problem. The iterations are essentially the same as
in the nonlinear least-squares algorithm, but as the quadratic function
model is always accurate, we don't need to track or modify the radius of
a trust region. The line search (backtracking) is used as a safety net
when a selected step does not decrease the cost function. Read more
detailed description of the algorithm in `scipy.optimize.least_squares`.
Method 'bvls' runs a Python implementation of the algorithm described in
[BVLS]_. The algorithm maintains active and free sets of variables, on
each iteration chooses a new variable to move from the active set to the
free set and then solves the unconstrained least-squares problem on free
variables. This algorithm is guaranteed to give an accurate solution
eventually, but may require up to n iterations for a problem with n
variables. Additionally, an ad-hoc initialization procedure is
implemented, that determines which variables to set free or active
initially. It takes some number of iterations before actual BVLS starts,
but can significantly reduce the number of further iterations.
References
----------
.. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, "A Subspace, Interior,
and Conjugate Gradient Method for Large-Scale Bound-Constrained
Minimization Problems," SIAM Journal on Scientific Computing,
Vol. 21, Number 1, pp 1-23, 1999.
.. [BVLS] P. B. Start and R. L. Parker, "Bounded-Variable Least-Squares:
an Algorithm and Applications", Computational Statistics, 10,
129-141, 1995.
Examples
--------
In this example a problem with a large sparse matrix and bounds on the
variables is solved.
>>> from scipy.sparse import rand
>>> from scipy.optimize import lsq_linear
...
>>> np.random.seed(0)
...
>>> m = 20000
>>> n = 10000
...
>>> A = rand(m, n, density=1e-4)
>>> b = np.random.randn(m)
...
>>> lb = np.random.randn(n)
>>> ub = lb + 1
...
>>> res = lsq_linear(A, b, bounds=(lb, ub), lsmr_tol='auto', verbose=1)
# may vary
The relative change of the cost function is less than `tol`.
Number of iterations 16, initial cost 1.5039e+04, final cost 1.1112e+04,
first-order optimality 4.66e-08.
"""
if method not in ['trf', 'bvls']:
raise ValueError("`method` must be 'trf' or 'bvls'")
if lsq_solver not in [None, 'exact', 'lsmr']:
raise ValueError("`solver` must be None, 'exact' or 'lsmr'.")
if verbose not in [0, 1, 2]:
raise ValueError("`verbose` must be in [0, 1, 2].")
if issparse(A):
A = csr_matrix(A)
elif not isinstance(A, LinearOperator):
A = np.atleast_2d(A)
if method == 'bvls':
if lsq_solver == 'lsmr':
raise ValueError("method='bvls' can't be used with "
"lsq_solver='lsmr'")
if not isinstance(A, np.ndarray):
raise ValueError("method='bvls' can't be used with `A` being "
"sparse or LinearOperator.")
if lsq_solver is None:
if isinstance(A, np.ndarray):
lsq_solver = 'exact'
else:
lsq_solver = 'lsmr'
elif lsq_solver == 'exact' and not isinstance(A, np.ndarray):
raise ValueError("`exact` solver can't be used when `A` is "
"sparse or LinearOperator.")
if len(A.shape) != 2: # No ndim for LinearOperator.
raise ValueError("`A` must have at most 2 dimensions.")
if len(bounds) != 2:
raise ValueError("`bounds` must contain 2 elements.")
if max_iter is not None and max_iter <= 0:
raise ValueError("`max_iter` must be None or positive integer.")
m, n = A.shape
b = np.atleast_1d(b)
if b.ndim != 1:
raise ValueError("`b` must have at most 1 dimension.")
if b.size != m:
raise ValueError("Inconsistent shapes between `A` and `b`.")
lb, ub = prepare_bounds(bounds, n)
if lb.shape != (n,) and ub.shape != (n,):
raise ValueError("Bounds have wrong shape.")
if np.any(lb >= ub):
raise ValueError("Each lower bound must be strictly less than each "
"upper bound.")
if lsq_solver == 'exact':
x_lsq = np.linalg.lstsq(A, b)[0]
elif lsq_solver == 'lsmr':
x_lsq = lsmr(A, b, atol=tol, btol=tol)[0]
if in_bounds(x_lsq, lb, ub):
r = A.dot(x_lsq) - b
cost = 0.5 * np.dot(r, r)
termination_status = 3
termination_message = TERMINATION_MESSAGES[termination_status]
g = compute_grad(A, r)
g_norm = norm(g, ord=np.inf)
if verbose > 0:
print(termination_message)
print("Final cost {0:.4e}, first-order optimality {1:.2e}"
.format(cost, g_norm))
return OptimizeResult(
x=x_lsq, fun=r, cost=cost, optimality=g_norm,
active_mask=np.zeros(n), nit=0, status=termination_status,
message=termination_message, success=True)
if method == 'trf':
res = trf_linear(A, b, x_lsq, lb, ub, tol, lsq_solver, lsmr_tol,
max_iter, verbose)
elif method == 'bvls':
res = bvls(A, b, x_lsq, lb, ub, tol, max_iter, verbose)
res.message = TERMINATION_MESSAGES[res.status]
res.success = res.status > 0
if verbose > 0:
print(res.message)
print("Number of iterations {0}, initial cost {1:.4e}, "
"final cost {2:.4e}, first-order optimality {3:.2e}."
.format(res.nit, res.initial_cost, res.cost, res.optimality))
del res.initial_cost
return res
| gpl-3.0 |
google/neuroglancer | python/examples/extend_segments_tool.py | 3 | 9608 | #!/usr/bin/env python2
"""Tool for extending via equivalences a set of segments."""
from __future__ import absolute_import, print_function
import argparse
import copy
import os
import webbrowser
import neuroglancer
from neuroglancer.json_utils import decode_json, encode_json
neuroglancer.set_static_content_source(url='http://localhost:8080')
def get_segmentation_layer(layers):
for layer in layers:
if isinstance(layer.layer, neuroglancer.SegmentationLayer):
return layer
class Annotator(object):
def __init__(self, filename):
self.filename = filename
self.point_annotation_layer_name = 'false-merges'
self.states = []
self.state_index = None
viewer = self.viewer = neuroglancer.Viewer()
self.other_state_segment_ids = dict()
viewer.actions.add('anno-next-state', lambda s: self.next_state())
viewer.actions.add('anno-prev-state', lambda s: self.prev_state())
viewer.actions.add('anno-save', lambda s: self.save())
viewer.actions.add('anno-show-all', lambda s: self.set_combined_state())
viewer.actions.add('anno-add-segments-from-state',
lambda s: self.add_segments_from_state(s.viewer_state))
with viewer.config_state.txn() as s:
s.input_event_bindings.viewer['pageup'] = 'anno-prev-state'
s.input_event_bindings.viewer['pagedown'] = 'anno-next-state'
s.input_event_bindings.viewer['control+keys'] = 'anno-save'
s.input_event_bindings.viewer['control+keya'] = 'anno-show-all'
viewer.shared_state.add_changed_callback(self.on_state_changed)
self.cur_message = None
if not self.load():
self.set_state_index(None)
def on_state_changed(self):
self.update_message()
def update_message(self):
if self.state_index is None:
message = '[No state selected]'
else:
message = '[%d/%d] ' % (self.state_index, len(self.states))
segments = self.get_state_segment_ids(self.viewer.state)
warnings = []
for segment_id in segments:
other_state = self.other_state_segment_ids.get(segment_id)
if other_state is not None:
warnings.append('Segment %d also in state %d' % (segment_id, other_state))
if warnings:
message += 'WARNING: ' + ', '.join(warnings)
if message != self.cur_message:
with self.viewer.config_state.txn() as s:
if message is not None:
s.status_messages['status'] = message
else:
s.status_messages.pop('status')
self.cur_message = message
def load(self):
if not os.path.exists(self.filename):
return False
self.state_index = None
with open(self.filename, 'r') as f:
loaded_state = decode_json(f.read())
self.states = [neuroglancer.ViewerState(x) for x in loaded_state['states']]
self.set_state_index(loaded_state['state_index'])
return True
def set_state_index_relative(self, amount):
if self.state_index is None:
new_state = 0
else:
new_state = (self.state_index + amount + len(self.states)) % len(self.states)
self.set_state_index(new_state)
def next_state(self):
self.set_state_index_relative(1)
def prev_state(self):
self.set_state_index_relative(-1)
def set_state_index(self, index):
self._grab_viewer_state()
self.state_index = index
if index is None:
self.viewer.set_state(neuroglancer.ViewerState())
else:
self.viewer.set_state(self.states[index])
other_ids = self.other_state_segment_ids
other_ids.clear()
other_ids[0] = -1
for i, state in enumerate(self.states):
if i == self.state_index:
continue
for x in self.get_state_segment_ids(state):
other_ids[x] = i
self.update_message()
def get_duplicate_segment_ids(self):
self._grab_viewer_state()
other_ids = dict()
other_ids[0] = [-1]
for i, state in enumerate(self.states):
for x in self.get_state_segment_ids(state):
other_ids.setdefault(x, []).append(i)
for segment_id in other_ids:
state_numbers = other_ids[segment_id]
if len(state_numbers) > 1:
print('%d in %r' % (segment_id, state_numbers))
def _grab_viewer_state(self):
if self.state_index is not None:
self.states[self.state_index] = copy.deepcopy(self.viewer.state)
def save(self):
self._grab_viewer_state()
tmp_filename = self.filename + '.tmp'
with open(tmp_filename, 'wb') as f:
f.write(
encode_json(
dict(states=[s.to_json() for s in self.states], state_index=self.state_index)))
os.rename(tmp_filename, self.filename)
print('Saved state to: %s' % (self.filename, ))
def get_state_segment_ids(self, state):
return get_segmentation_layer(state.layers).segments
def get_existing_segment_ids(self):
ids = set()
for state in self.states:
ids.update(self.get_state_segment_ids(state))
return ids
def add_segments_from_state(self, base_state):
if isinstance(base_state, basestring):
base_state = neuroglancer.parse_url(base_state)
elif isinstance(base_state, dict):
base_state = neuroglancer.ViewerState(base_state)
segment_ids = self.get_state_segment_ids(base_state)
existing_segment_ids = self.get_existing_segment_ids()
for segment_id in segment_ids:
if segment_id in existing_segment_ids:
print('Skipping redundant segment id %d' % segment_id)
continue
self.states.append(self.make_initial_state(segment_id, base_state))
if self.state_index is None:
self.next_state()
def make_initial_state(self, segment_id, base_state):
state = copy.deepcopy(base_state)
segments = self.get_state_segment_ids(state)
segments.clear()
segments.add(segment_id)
state.layers[self.point_annotation_layer_name] = neuroglancer.PointAnnotationLayer()
return state
def remove_zero_segments(self):
for state in self.states:
segment_ids = self.get_state_segment_ids(state)
if 0 in segment_ids:
segment_ids.remove(0)
def set_combined_state(self):
state = self.make_combined_state()
if state is None:
print('No states')
else:
self.set_state_index(None)
self.viewer.set_state(state)
def make_combined_state(self):
if len(self.states) == 0:
return None
state = copy.deepcopy(self.states[0])
layer = get_segmentation_layer(state.layers)
layer.segments.clear()
points = state.layers[self.point_annotation_layer_name].points
for other_state in self.states:
other_segments = self.get_state_segment_ids(other_state)
if other_segments:
u_result = layer.equivalences.union(*other_segments)
layer.segments.add(u_result)
points.extend(other_state.layers[self.point_annotation_layer_name].points)
return state
def show(self):
webbrowser.open_new(self.viewer.get_viewer_url())
def get_viewer_url(self):
return self.viewer.get_viewer_url()
def get_sets(self):
sets = []
for other_state in self.states:
other_segments = self.get_state_segment_ids(other_state)
if other_segments:
sets.append(sorted(other_segments))
return sets
def print_combined_state_url(self):
print(neuroglancer.to_url(self.make_combined_state()))
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument('filename', type=str)
ap.add_argument(
'-a',
'--add-segments-from-url',
type=str,
nargs='*',
default=[],
help='Add a new state for each selected segment specified by a Neuroglancer URL.')
ap.add_argument(
'-n', '--no-webbrowser', action='store_true', help='Don\'t open the webbrowser.')
ap.add_argument('--print-sets', action='store_true', help='Print the sets of supervoxels.')
ap.add_argument(
'--print-combined-state',
action='store_true',
help='Prints a neuroglancer link for the combined state.')
ap.add_argument(
'--print-summary',
action='store_true',
help='Prints a neuroglancer link for the combined state.')
args = ap.parse_args()
anno = Annotator(args.filename)
for url in args.add_segments_from_url:
anno.add_segments_from_state(url)
if args.print_sets:
print(repr(anno.get_sets()))
if args.print_combined_state:
anno.print_combined_state_url()
if args.print_summary:
print('<html>')
print('<h1>%s</h1>' % args.filename)
print(
'<a href="%s">Neuroglancer</a><br/>' % neuroglancer.to_url(anno.make_combined_state()))
print(repr(anno.get_sets()))
print('</html>')
else:
print(anno.get_viewer_url())
if not args.no_webbrowser:
anno.show()
| apache-2.0 |
ibackus/ICgen | calc_rho.py | 3 | 12109 | # -*- coding: utf-8 -*-
"""
Calculates rho(z) to maintain hydrostatic equilibrium in a thin disc.
Assumes uniform temperature in the disc, and an infinite disc where
rho can be treated (at least locally) as only a function of z.
Created on Mon Jan 20 12:30:06 2014
@author: ibackus
"""
# ICgen packages
import isaac
# External packages
import numpy as np
import scipy
import scipy.integrate as nInt
import scipy.optimize as opt
from scipy.interpolate import interp1d
from scipy.optimize.nonlin import NoConvergence
import pynbody
from pynbody.array import SimArray
from warnings import warn
import sys
def rho_z(sigma, T, r, settings):
"""
rho,z = rho_z(...)
Calculates rho(z) to maintain hydrostatic equilibrium in a thin disc.
Assumes uniform temperature in the disc, and an infinite disc where
rho can be treated (locally) as only a function of z.
Only calculates for z>=0, since the density is assumed to be symmetric
about z=0
The initial guess for rho (a gaussian) only really seems to work for
Mstar >> Mdisc. Otherwise the solution can diverge violently.
* NUMERICAL CALCULATION OF RHO(Z) *
The calculation proceeds using several steps.
1) Make an initial guess for I, the integral of rho from z to inf. This
is an error function
2) Modify length scale of the initial guess to minimize the residual
for the differential equation governing I. Use this as the new
initial guess.
3) Find the root I(z) for the differential equation governing I, with
the boundary condition that I(0) = sigma/2
4) Set rho = -dI/dz
5) Find the root rho(z) for the diff. eq. governing rho.
6) In order to satisfy the BC on I, scale rho so that:
Integral(rho) = I(0)
7) Repeat (5) and (6) until rho is rescaled by a factor closer to unity
than rho_tol
Steps 5-7 are done because the solution for I does not seem to
satisfy the diff. eq. for rho very well. But doing it this way
allows rho to satisfy the surface density profile
* Arguments *
sigma - The surface density at r
__stdout__
T - the temperature at r
r - The radius at which rho is being calculated. Should have units
settings - ICobj settings (ie, ICobj.settings)
* Output *
Returns a 1D SimArray (see pynbody) of rho(z) and a 1D SimArray of z,
with the same units as ICobj.settings.rho_calc.zmax
"""
# Parse settings
rho_tol = settings.rho_calc.rho_tol
nz = settings.rho_calc.nz
zmax = settings.rho_calc.zmax
m = settings.physical.m
M = settings.physical.M
# Physical constants
kB = SimArray(1.0,'k')
G = SimArray(1.0,'G')
# Set up default units
mass_unit = M.units
length_unit = zmax.units
r = (r.in_units(length_unit)).copy()
# Initial conditions/physical parameters
rho_int = 0.5*sigma.in_units(mass_unit/length_unit**2) # Integral of rho from 0 to inf
a = (G*M*m/(kB*T)).in_units(length_unit)
b = (2*np.pi*G*m/(kB*T)).in_units(length_unit/mass_unit)
z0guess = np.sqrt(2*r*r*r/a).in_units(length_unit)# Est. scale height of disk
z0_dummy = (2/(b*sigma)).in_units(length_unit)
z = np.linspace(0.0,zmax,nz)
dz = z[[1]]-z[[0]]
# Echo parameters used
print '***********************************************'
print '* Calculating rho(z)'
print '***********************************************'
print 'sigma = {0} {1}'.format(sigma,sigma.units)
print 'zmax = {0} {1}'.format(zmax,zmax.units)
print 'r = {0} {1}'.format(r,r.units)
print 'molecular mass = {0} {1}'.format(m,m.units)
print 'Star mass = {0} {1}'.format(M,M.units)
print 'Temperature = {0} {1}'.format(T,T.units)
print ''
print 'rho_tol = {0}'.format(rho_tol)
print 'nz = {0}'.format(nz)
print '***********************************************'
print 'a = {0} {1}'.format(a,a.units)
print 'b = {0} {1}'.format(b,b.units)
print 'z0guess = {0} {1}'.format(z0guess,z0guess.units)
print '***********************************************'
print 'z0 (from sech^2) = {0} {1}'.format(z0_dummy,z0_dummy.units)
# --------------------------------------------------------
# STRIP THE UNITS FROM EVERYTHING!!!
# This has to be done because many of the scipy/numpy functions used cannot
# handle pynbody units. Before returning z, rho, or anything else, the
# Units must be re-introduced
# --------------------------------------------------------
rho_int, a, b, z0guess, z0_dummy, z, dz, r, T, sigma \
= isaac.strip_units([rho_int, a, b, z0guess, z0_dummy, z, dz, r, T, sigma])
# --------------------------------------------------------
# Check sigma and T
# --------------------------------------------------------
if sigma < 1e-100:
warn('Sigma too small. setting rho = 0')
rho0 = np.zeros(len(z))
# Set up units
rho0 = isaac.set_units(rho0, mass_unit/length_unit**3)
z = isaac.set_units(z, length_unit)
return rho0, z
if T > 1e100:
warn('Temperature too large. Setting rho = 0')
rho0 = np.zeros(len(z))
# Set up units
rho0 = isaac.set_units(rho0, mass_unit/length_unit**3)
z = isaac.set_units(z, length_unit)
return rho0, z
# -------------------------------------------------------------------
# FUNCTION DEFINITIONS
# -------------------------------------------------------------------
def dI_dz(I_in):
"""
Finite difference approximation of dI/dz, assuming I is odd around I(0)
"""
I = I_in.copy()
dI = np.zeros(len(I))
# Fourth order center differencing
dI[0] = (-I[2] + 8*I[1] - 7*I[0])/(6*dz)
dI[1] = (-I[3] + 8*I[2] - 6*I[0] - I[1])/(12*dz)
dI[2:-2] = (-I[4:] + 8*I[3:-1] -8*I[1:-3] + I[0:-4])/(12*dz)
# Second order backward differencing for right edge
dI[-2:] = (3*I[-2:] -4*I[-3:-1] + I[-4:-2])/(2*dz)
return dI
def d2I_dz2(I_in):
# Finite difference for d2I/dz2 assuming it is 0 at the origin
I = I_in.copy()
d2I = np.zeros(len(I))
# Boundary condition
d2I[0] = 0
# Centered 4th order finite difference
d2I[1] = (-I[3] + 16*I[2] - 30*I[1] + 16*I[0] -(2*I[0] - I[1]))/(12*dz**2)
d2I[2:-2] = (-I[4:] + 16*I[3:-1] - 30*I[2:-2] + 16*I[1:-3] - I[0:-4])/(12*(dz**2))
# second order backward difference for right edge
d2I[-2:] = (-2*I[-2:] + 5*I[-3:-1] -4*I[-4:-2] + I[-5:-3])/dz**2
return d2I
def Ires(I_in):
"""
Calculate the residual for the differential equation governing I,
the integral of rho from z to "infinity."
"""
# DEFINE INITIAL CONDITION:
I = I_in.copy()
I[0] = rho_int
#I[-1] = 0.0
weight = 1.0
res = d2I_dz2(I) + dI_dz(I)*(a*z/((z**2 + r**2)**(1.5)) + 2*b*(I[0] - I))
return weight*res
def drho_dz(rho_in):
"""
Fourth order, centered finite difference for d(rho)/dz, assumes that
rho is an even function. The right-hand boundary is done using
backward differencing
"""
rho = rho_in.copy()
drho = np.zeros(len(rho))
drho[0] = 0.0 # defined by boundary condition, rho[0] = max(rho)
drho[1] = (-rho[3] + 8*rho[2] - 8*rho[0] + rho[1])/(12*dz)
drho[2:-2] = (-rho[4:] + 8*rho[3:-1] - 8*rho[1:-3] + rho[0:-4])/(12*dz)
drho[-2:] = (3*rho[-2:] - 4*rho[-3:-1] + rho[-4:-2])/(2*dz)
return drho
def residual(rho_in):
"""
Estimate d(rho)/dz
"""
rho = rho_in.copy()
# Estimate integral of rho
I = np.zeros(len(rho))
I[1:] = nInt.cumtrapz(rho,z)
# Estimate residual
res = drho_dz(rho) + a*rho*z/((z**2 + r**2)**(1.5)) + 2*b*rho*I
return res
def erf_res(scale_size):
testfct = rho_int*(1 - scipy.special.erf(z/scale_size))
return abs(Ires(testfct)).sum()
pass
# -------------------------------------------------------------------
# FIND RHO
# -------------------------------------------------------------------
maxiter = 40
# Estimate the scale length of the error function
z0 = opt.fminbound(erf_res,z0guess/100.0,5.0*z0guess)
print 'Length scale guess: {0} {1}'.format(z0guess, length_unit)
print 'Final length scale: {0} {1}'.format(z0, length_unit)
# Begin by finding I, the integral of rho (from z to inf)
# Assuming rho is gaussian, I is an error function
guess = rho_int*(1 - scipy.special.erf(z/z0))
# Find the root of the differential equation for I
f_tol = rho_int * 6e-6
try:
Isol = opt.newton_krylov(Ires,guess,maxiter=maxiter,f_tol=f_tol)
except NoConvergence:
# Assume it didn't converge because f_tol was too strict
# Read exception
xepshun = sys.exc_info()
# Extract rho from the exception
Isol = xepshun[1][0]
# rho is the negative derivative
rho0 = -dI_dz(Isol)
# Now apply the diff eq on rho
for n in range(maxiter):
print 'Iteration {0}'.format(n+1)
f_tol = rho0.max() * 6e-6
try:
rho0 = opt.newton_krylov(residual,rho0,maxiter=maxiter, f_tol=f_tol)
except:
# Assume it didn't converge because f_tol was too strict
# Read exception
xepshun = sys.exc_info()
# Extract rho from the exception
rho0 = xepshun[1][0]
rho_scale = rho_int/nInt.cumtrapz(rho0,z)[-1]
print 'Scaling rho by {0}'.format(rho_scale)
rho0 = rho0*rho_scale
if abs(1-rho_scale) < rho_tol - 1:
break
if n >= maxiter:
print 'Warning: solution to rho did not converge for r = {0}'.format(r)
# Re-introduce units
rho0 = isaac.set_units(rho0, mass_unit/length_unit**3)
z = isaac.set_units(z, length_unit)
return SimArray(rho0,'Msol au**-3'), SimArray(z,'au')
def cdfinv_z(z,rho):
"""
Calculates the inverse of the cumulative distribution function for
probability as a function of z for a given r
*** Arguments ***
* z * z positions to calculate over. 1D array
* rho * Density as a function of z. Treated as an un-normalized
probability. 1D array
IF Z doesn't have units, units of 'au' are assumed
*** Returns ***
Returns the inverse normalized CDF as 1D spline interpolation
"""
# Check for units
if pynbody.units.has_units(z):
zunit = z.units
else:
zunit = pynbody.units.au
# Calculate the CDF from prob
nz = len(z)
f = np.zeros(nz)
f[1:] = nInt.cumtrapz(rho,z)
if f.max() <= 0.0:
# The density (rho) is zero here for all z or neg or something.
# Make all particles go to z = 0.0
def finv(m_in):
return m_in*0.0
return finv
f /= f.max()
# Calculate the inverse CDF.
# Assume CDF is approximately monotonic and sort to force it to be
ind = f.argsort()
f = f[ind]
z = z[ind]
# Drop values where CDF is constant (ie, prob = 0)
mask = np.ones(nz,dtype='bool')
for n in range(1,nz):
if f[n] == f[n-1]:
mask[n] = False
f = f[mask]
z = z[mask]
finv_spline = interp1d(f,z,kind='linear')
def finv(m):
return SimArray(finv_spline(m), zunit)
return finv
| mit |
appleseedhq/cortex | python/IECoreMaya/MeshOpHolderUtil.py | 5 | 12545 | ##########################################################################
#
# Copyright (c) 2008-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import maya.OpenMaya as OpenMaya
import maya.cmds as cmds
import IECoreMaya
import IECore
def __getFloat3PlugValue(plug):
# Retrieve the value as an MObject
object = plug.asMObject()
# Convert the MObject to a float3
numDataFn = OpenMaya.MFnNumericData(object)
xParam = OpenMaya.MScriptUtil()
xParam.createFromDouble(0.0)
xPtr = xParam.asFloatPtr()
yParam = OpenMaya.MScriptUtil()
yParam.createFromDouble(0.0)
yPtr = yParam.asFloatPtr()
zParam = OpenMaya.MScriptUtil()
zParam.createFromDouble(0.0)
zPtr = zParam.asFloatPtr()
numDataFn.getData3Float(xPtr, yPtr, zPtr)
return OpenMaya.MFloatVector(
OpenMaya.MScriptUtil(xPtr).asFloat(),
OpenMaya.MScriptUtil(yPtr).asFloat(),
OpenMaya.MScriptUtil(zPtr).asFloat()
)
def __hasTweaks( meshDagPath ):
fnDN = OpenMaya.MFnDependencyNode( meshDagPath.node() )
# Tweaks exist only if the multi "pnts" attribute contains plugs
# which contain non-zero tweak values.
tweakPlug = fnDN.findPlug("pnts")
if not tweakPlug.isNull():
if not tweakPlug.isArray():
raise RuntimeError( "tweakPlug is not an array plug" )
numElements = tweakPlug.numElements()
for i in range(numElements):
tweak = tweakPlug.elementByPhysicalIndex(i)
if not tweak.isNull():
tweakData = __getFloat3PlugValue(tweak)
if 0 != tweakData.x or 0 != tweakData.y or 0 != tweakData.z:
return True
return False
def __hasHistory( meshDagPath ):
fnDN = OpenMaya.MFnDependencyNode( meshDagPath.node() )
return fnDN.findPlug("inMesh").isConnected()
def __processUpstreamNode(data, meshDagPath, dgModifier):
if __hasHistory( meshDagPath ):
# Just swap the connections around
tempPlugArray = OpenMaya.MPlugArray()
data.meshNodeDestPlug.connectedTo(tempPlugArray, True, False)
assert( tempPlugArray.length() == 1 )
data.upstreamNodeSrcPlug = OpenMaya.MPlug( tempPlugArray[0] )
data.upstreamNodeShape = data.upstreamNodeSrcPlug.node()
data.upstreamNodeSrcAttr = data.upstreamNodeSrcPlug.attribute()
dgModifier.disconnect(data.upstreamNodeSrcPlug, data.meshNodeDestPlug)
dgModifier.doIt()
else:
# Duplicate mesh, mark as "intermediate", and reconnect in the DAG
dagNodeFn = OpenMaya.MFnDagNode( data.meshNodeShape )
meshNodeShapeName = dagNodeFn.name()
data.upstreamNodeTransform = dagNodeFn.duplicate(False, False)
dagNodeFn.setObject(data.upstreamNodeTransform)
fDagModifier = OpenMaya.MDagModifier()
if dagNodeFn.childCount() < 1:
raise RuntimeError( "Duplicated mesh has no shape" )
data.upstreamNodeShape = dagNodeFn.child(0)
fDagModifier.reparentNode(data.upstreamNodeShape, data.meshNodeTransform)
fDagModifier.renameNode( data.upstreamNodeShape, meshNodeShapeName+"Orig" )
fDagModifier.doIt()
dagNodeFn.setObject(data.upstreamNodeShape)
dagNodeFn.setIntermediateObject(True)
data.upstreamNodeSrcAttr = dagNodeFn.attribute("outMesh")
data.upstreamNodeSrcPlug = dagNodeFn.findPlug("outMesh")
fDagModifier.deleteNode(data.upstreamNodeTransform)
fDagModifier.doIt()
def __processTweaks(data, dgModifier, modifierNode):
tweakIndexArray = OpenMaya.MIntArray()
fnDN = OpenMaya.MFnDependencyNode()
tweakDataArray = OpenMaya.MObjectArray()
tweakSrcConnectionCountArray = OpenMaya.MIntArray()
tweakSrcConnectionPlugArray = OpenMaya.MPlugArray()
tweakDstConnectionCountArray = OpenMaya.MIntArray()
tweakDstConnectionPlugArray = OpenMaya.MPlugArray()
tempPlugArray = OpenMaya.MPlugArray()
tweakNode = dgModifier.createNode("polyTweak")
fnDN.setObject(tweakNode)
tweakNodeSrcAttr = fnDN.attribute("output")
tweakNodeDestAttr = fnDN.attribute("inputPolymesh")
tweakNodeTweakAttr = fnDN.attribute("tweak")
fnDN.setObject(data.meshNodeShape)
meshTweakPlug = fnDN.findPlug("pnts")
if not meshTweakPlug.isArray() :
raise RuntimeError( "meshTweakPlug is not an array plug" )
numElements = meshTweakPlug.numElements()
for i in range(numElements):
tweak = meshTweakPlug.elementByPhysicalIndex(i)
if not tweak.isNull():
tweakIndexArray.append( tweak.logicalIndex() )
tweakData = tweak.asMObject()
tweakDataArray.append(tweakData)
if not tweak.isCompound():
raise RuntimeError( "Element tweak plug is not a compound" )
numChildren = tweak.numChildren()
for j in range(numChildren):
tweakChild = tweak.child(j)
if tweakChild.isConnected():
tempPlugArray.clear()
if tweakChild.connectedTo(tempPlugArray, False, True):
numSrcConnections = tempPlugArray.length()
tweakSrcConnectionCountArray.append(numSrcConnections)
for k in range(numSrcConnections):
tweakSrcConnectionPlugArray.append(tempPlugArray[k])
dgModifier.disconnect(tweakChild, tempPlugArray[k])
else:
tweakSrcConnectionCountArray.append(0)
tempPlugArray.clear()
if tweakChild.connectedTo(tempPlugArray, True, False):
assert( tempPlugArray.length() == 1 )
tweakDstConnectionCountArray.append(1)
tweakDstConnectionPlugArray.append(tempPlugArray[0])
dgModifier.disconnect(tempPlugArray[0], tweakChild)
else:
tweakDstConnectionCountArray.append(0)
else:
tweakSrcConnectionCountArray.append(0)
tweakDstConnectionCountArray.append(0)
polyTweakPlug = OpenMaya.MPlug(tweakNode, tweakNodeTweakAttr)
numTweaks = tweakIndexArray.length()
srcOffset = 0
dstOffset = 0
for i in range(numTweaks):
tweak = polyTweakPlug.elementByLogicalIndex(tweakIndexArray[i])
tweak.setMObject(tweakDataArray[i])
if not tweak.isCompound():
raise RuntimeError( "Element plug 'tweak' is not a compound" )
numChildren = tweak.numChildren()
for j in range(numChildren):
tweakChild = tweak.child(j)
if 0 < tweakSrcConnectionCountArray[i*numChildren + j]:
k = 0
while (k < tweakSrcConnectionCountArray[i*numChildren + j]):
dgModifier.connect(tweakChild, tweakSrcConnectionPlugArray[srcOffset])
srcOffset += 1
k += 1
if 0 < tweakDstConnectionCountArray[i*numChildren + j]:
dgModifier.connect(tweakDstConnectionPlugArray[dstOffset], tweakChild)
dstOffset += 1
tweakDestPlug = OpenMaya.MPlug( tweakNode, tweakNodeDestAttr )
dgModifier.connect( data.upstreamNodeSrcPlug, tweakDestPlug )
tweakSrcPlug = OpenMaya.MPlug( tweakNode, tweakNodeSrcAttr)
modifierDestPlug = OpenMaya.MPlug( modifierNode, data.modifierNodeDestAttr )
dgModifier.connect( tweakSrcPlug, modifierDestPlug )
def __connectNodes( modifierNode, meshDagPath ):
class MeshOpHolderData:
def __init__(self):
self.meshNodeTransform = OpenMaya.MObject()
self.meshNodeShape = OpenMaya.MObject()
self.meshNodeDestPlug = OpenMaya.MPlug()
self.meshNodeDestAttr = OpenMaya.MObject()
self.upstreamNodeTransform = OpenMaya.MObject()
self.upstreamNodeShape = OpenMaya.MObject()
self.upstreamNodeSrcPlug = OpenMaya.MPlug()
self.upstreamNodeSrcAttr = OpenMaya.MObject()
self.modifierNodeSrcAttr = OpenMaya.MObject()
self.modifierNodeDestAttr = OpenMaya.MObject()
data = MeshOpHolderData()
fnDN = OpenMaya.MFnDependencyNode( modifierNode )
data.modifierNodeSrcAttr = fnDN.attribute("result")
data.modifierNodeDestAttr = fnDN.attribute("parm_input")
data.meshNodeShape = meshDagPath.node()
dagNodeFn = OpenMaya.MFnDagNode( data.meshNodeShape )
if dagNodeFn.parentCount() == 0:
raise RuntimeError( "Mesh shape has no parent transform" )
data.meshNodeTransform = dagNodeFn.parent(0)
data.meshNodeDestPlug = dagNodeFn.findPlug("inMesh")
data.meshNodeDestAttr = data.meshNodeDestPlug.attribute()
dgModifier = OpenMaya.MDGModifier()
__processUpstreamNode(data, meshDagPath, dgModifier)
if __hasTweaks( meshDagPath ):
__processTweaks(data, dgModifier, modifierNode)
else:
modifierDestPlug = OpenMaya.MPlug(modifierNode, data.modifierNodeDestAttr)
dgModifier.connect(data.upstreamNodeSrcPlug, modifierDestPlug)
modifierSrcPlug = OpenMaya.MPlug(modifierNode, data.modifierNodeSrcAttr)
meshDestAttr = OpenMaya.MPlug(data.meshNodeShape, data.meshNodeDestAttr)
dgModifier.connect(modifierSrcPlug, meshDestAttr)
dgModifier.doIt()
def __setParameters( op, kw ):
for paramName, paramValue in kw.items():
op.parameters().setValidatedParameterValue( paramName, paramValue )
def __createMeshOpNode( className, classVersion, **kw ):
shortClassName = className.split( '/' ).pop()
modifierNodeName = cmds.createNode( "ieOpHolderNode", name = shortClassName + "#" )
ph = IECoreMaya.FnParameterisedHolder( modifierNodeName )
op = ph.setParameterised( className, classVersion, "IECORE_OP_PATHS" )
__setParameters( op, kw )
selList = OpenMaya.MSelectionList()
selList.add( modifierNodeName )
modifierNode = OpenMaya.MObject()
s = selList.getDependNode( 0, modifierNode )
return modifierNode
def __applyMeshOp( meshNode, className, classVersion, kw ):
op = IECore.ClassLoader.defaultOpLoader().load( className, classVersion )
__setParameters( op, **kw )
# \todo Apply op and convert result back into original object
def create( meshDagPath, className, classVersion, **kw):
if type(meshDagPath) is str:
sel = OpenMaya.MSelectionList()
sel.add( meshDagPath )
meshDagPath = OpenMaya.MDagPath()
sel.getDagPath( 0, meshDagPath)
meshDagPath.extendToShape()
constructionHistoryEnabled = maya.mel.eval("constructionHistory -q -tgl")
if not __hasHistory( meshDagPath ) and constructionHistoryEnabled == 0:
# \todo we can't actually do this right now because we're unable to convert the resultant MeshPrimitive
# back into the original meshNode MObject given to us
raise RuntimeError( "Currently unable to apply MeshOp in-place " )
meshNode = meshDagPath.node()
__applyMeshOp(meshNode, className, classVersion, **kw )
return None
else:
modifierNode = __createMeshOpNode( className, classVersion, **kw )
__connectNodes( modifierNode, meshDagPath )
fnDN = OpenMaya.MFnDependencyNode( modifierNode )
return str( fnDN.name() )
def createUI( className, classVersion, **kw ):
# \todo This below selection determination code fails with an unclear error if
# a mesh component is currently selected
selectedTransforms = cmds.ls( selection = True, type = "transform" ) or []
selectedTransformMeshShapes = cmds.listRelatives( selectedTransforms, type = "mesh" ) or []
selectedMeshes = cmds.ls( selection = True, type = "mesh" ) or []
selectedMeshes += selectedTransformMeshShapes
if not selectedMeshes:
raise RuntimeError( "No mesh selected" )
modifierNodes = []
for mesh in selectedMeshes:
sel = OpenMaya.MSelectionList()
sel.add( mesh )
meshDagPath = OpenMaya.MDagPath()
sel.getDagPath( 0, meshDagPath)
meshDagPath.extendToShape()
modifierNode = create( meshDagPath, className, classVersion, **kw )
if modifierNode :
modifierNodes += [ modifierNode ]
return modifierNodes
| bsd-3-clause |
wdurhamh/statsmodels | statsmodels/tsa/vector_ar/svar_model.py | 21 | 23901 | """
Vector Autoregression (VAR) processes
References
----------
Lutkepohl (2005) New Introduction to Multiple Time Series Analysis
"""
from __future__ import print_function, division
from statsmodels.compat.python import range
import numpy as np
import numpy.linalg as npl
from numpy.linalg import slogdet
from statsmodels.tools.numdiff import (approx_hess, approx_fprime)
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tsa.vector_ar.irf import IRAnalysis
from statsmodels.tsa.vector_ar.var_model import VARProcess, \
VARResults
import statsmodels.tsa.vector_ar.util as util
import statsmodels.tsa.base.tsa_model as tsbase
from statsmodels.compat.numpy import np_matrix_rank
mat = np.array
def svar_ckerr(svar_type, A, B):
if A is None and (svar_type == 'A' or svar_type == 'AB'):
raise ValueError('SVAR of type A or AB but A array not given.')
if B is None and (svar_type == 'B' or svar_type == 'AB'):
raise ValueError('SVAR of type B or AB but B array not given.')
class SVAR(tsbase.TimeSeriesModel):
"""
Fit VAR and then estimate structural components of A and B, defined:
.. math:: Ay_t = A_1 y_{t-1} + \ldots + A_p y_{t-p} + B\var(\epsilon_t)
Parameters
----------
endog : array-like
1-d endogenous response variable. The independent variable.
dates : array-like
must match number of rows of endog
svar_type : str
"A" - estimate structural parameters of A matrix, B assumed = I
"B" - estimate structural parameters of B matrix, A assumed = I
"AB" - estimate structural parameters indicated in both A and B matrix
A : array-like
neqs x neqs with unknown parameters marked with 'E' for estimate
B : array-like
neqs x neqs with unknown parameters marked with 'E' for estimate
References
----------
Hamilton (1994) Time Series Analysis
"""
def __init__(self, endog, svar_type, dates=None,
freq=None, A=None, B=None, missing='none'):
super(SVAR, self).__init__(endog, None, dates, freq, missing=missing)
#(self.endog, self.names,
# self.dates) = data_util.interpret_data(endog, names, dates)
self.y = self.endog #keep alias for now
self.neqs = self.endog.shape[1]
types = ['A', 'B', 'AB']
if svar_type not in types:
raise ValueError('SVAR type not recognized, must be in '
+ str(types))
self.svar_type = svar_type
svar_ckerr(svar_type, A, B)
#initialize A, B as I if not given
#Initialize SVAR masks
if A is None:
A = np.identity(self.neqs)
self.A_mask = A_mask = np.zeros(A.shape, dtype=bool)
else:
A_mask = np.logical_or(A == 'E', A == 'e')
self.A_mask = A_mask
if B is None:
B = np.identity(self.neqs)
self.B_mask = B_mask = np.zeros(B.shape, dtype=bool)
else:
B_mask = np.logical_or(B == 'E', B == 'e')
self.B_mask = B_mask
# convert A and B to numeric
#TODO: change this when masked support is better or with formula
#integration
Anum = np.zeros(A.shape, dtype=float)
Anum[~A_mask] = A[~A_mask]
Anum[A_mask] = np.nan
self.A = Anum
Bnum = np.zeros(B.shape, dtype=float)
Bnum[~B_mask] = B[~B_mask]
Bnum[B_mask] = np.nan
self.B = Bnum
#LikelihoodModel.__init__(self, endog)
#super(SVAR, self).__init__(endog)
def fit(self, A_guess=None, B_guess=None, maxlags=None, method='ols',
ic=None, trend='c', verbose=False, s_method='mle',
solver="bfgs", override=False, maxiter=500, maxfun=500):
"""
Fit the SVAR model and solve for structural parameters
Parameters
----------
A_guess : array-like, optional
A vector of starting values for all parameters to be estimated
in A.
B_guess : array-like, optional
A vector of starting values for all parameters to be estimated
in B.
maxlags : int
Maximum number of lags to check for order selection, defaults to
12 * (nobs/100.)**(1./4), see select_order function
method : {'ols'}
Estimation method to use
ic : {'aic', 'fpe', 'hqic', 'bic', None}
Information criterion to use for VAR order selection.
aic : Akaike
fpe : Final prediction error
hqic : Hannan-Quinn
bic : Bayesian a.k.a. Schwarz
verbose : bool, default False
Print order selection output to the screen
trend, str {"c", "ct", "ctt", "nc"}
"c" - add constant
"ct" - constant and trend
"ctt" - constant, linear and quadratic trend
"nc" - co constant, no trend
Note that these are prepended to the columns of the dataset.
s_method : {'mle'}
Estimation method for structural parameters
solver : {'nm', 'newton', 'bfgs', 'cg', 'ncg', 'powell'}
Solution method
See statsmodels.base for details
override : bool, default False
If True, returns estimates of A and B without checking
order or rank condition
maxiter : int, default 500
Number of iterations to perform in solution method
maxfun : int
Number of function evaluations to perform
Notes
-----
Lutkepohl pp. 146-153
Hamilton pp. 324-336
Returns
-------
est : SVARResults
"""
lags = maxlags
if ic is not None:
selections = self.select_order(maxlags=maxlags, verbose=verbose)
if ic not in selections:
raise Exception("%s not recognized, must be among %s"
% (ic, sorted(selections)))
lags = selections[ic]
if verbose:
print('Using %d based on %s criterion' % (lags, ic))
else:
if lags is None:
lags = 1
self.nobs = len(self.endog) - lags
# initialize starting parameters
start_params = self._get_init_params(A_guess, B_guess)
return self._estimate_svar(start_params, lags, trend=trend,
solver=solver, override=override,
maxiter=maxiter, maxfun=maxfun)
def _get_init_params(self, A_guess, B_guess):
"""
Returns either the given starting or .1 if none are given.
"""
var_type = self.svar_type.lower()
n_masked_a = self.A_mask.sum()
if var_type in ['ab', 'a']:
if A_guess is None:
A_guess = np.array([.1]*n_masked_a)
else:
if len(A_guess) != n_masked_a:
msg = 'len(A_guess) = %s, there are %s parameters in A'
raise ValueError(msg % (len(A_guess), n_masked_a))
else:
A_guess = []
n_masked_b = self.B_mask.sum()
if var_type in ['ab', 'b']:
if B_guess is None:
B_guess = np.array([.1]*n_masked_b)
else:
if len(B_guess) != n_masked_b:
msg = 'len(B_guess) = %s, there are %s parameters in B'
raise ValueError(msg % (len(B_guess), n_masked_b))
else:
B_guess = []
return np.r_[A_guess, B_guess]
def _estimate_svar(self, start_params, lags, maxiter, maxfun,
trend='c', solver="nm", override=False):
"""
lags : int
trend : string or None
As per above
"""
k_trend = util.get_trendorder(trend)
y = self.endog
z = util.get_var_endog(y, lags, trend=trend, has_constant='raise')
y_sample = y[lags:]
# Lutkepohl p75, about 5x faster than stated formula
var_params = np.linalg.lstsq(z, y_sample)[0]
resid = y_sample - np.dot(z, var_params)
# Unbiased estimate of covariance matrix $\Sigma_u$ of the white noise
# process $u$
# equivalent definition
# .. math:: \frac{1}{T - Kp - 1} Y^\prime (I_T - Z (Z^\prime Z)^{-1}
# Z^\prime) Y
# Ref: Lutkepohl p.75
# df_resid right now is T - Kp - 1, which is a suggested correction
avobs = len(y_sample)
df_resid = avobs - (self.neqs * lags + k_trend)
sse = np.dot(resid.T, resid)
#TODO: should give users the option to use a dof correction or not
omega = sse / df_resid
self.sigma_u = omega
A, B = self._solve_AB(start_params, override=override,
solver=solver,
maxiter=maxiter,
maxfun=maxfun)
A_mask = self.A_mask
B_mask = self.B_mask
return SVARResults(y, z, var_params, omega, lags,
names=self.endog_names, trend=trend,
dates=self.data.dates, model=self,
A=A, B=B, A_mask=A_mask, B_mask=B_mask)
def loglike(self, params):
"""
Loglikelihood for SVAR model
Notes
-----
This method assumes that the autoregressive parameters are
first estimated, then likelihood with structural parameters
is estimated
"""
#TODO: this doesn't look robust if A or B is None
A = self.A
B = self.B
A_mask = self.A_mask
B_mask = self.B_mask
A_len = len(A[A_mask])
B_len = len(B[B_mask])
if A is not None:
A[A_mask] = params[:A_len]
if B is not None:
B[B_mask] = params[A_len:A_len+B_len]
nobs = self.nobs
neqs = self.neqs
sigma_u = self.sigma_u
W = np.dot(npl.inv(B),A)
trc_in = np.dot(np.dot(W.T,W),sigma_u)
sign, b_logdet = slogdet(B**2) #numpy 1.4 compat
b_slogdet = sign * b_logdet
likl = -nobs/2. * (neqs * np.log(2 * np.pi) - \
np.log(npl.det(A)**2) + b_slogdet + \
np.trace(trc_in))
return likl
def score(self, AB_mask):
"""
Return the gradient of the loglike at AB_mask.
Parameters
----------
AB_mask : unknown values of A and B matrix concatenated
Notes
-----
Return numerical gradient
"""
loglike = self.loglike
return approx_fprime(AB_mask, loglike, epsilon=1e-8)
def hessian(self, AB_mask):
"""
Returns numerical hessian.
"""
loglike = self.loglike
return approx_hess(AB_mask, loglike)
def _solve_AB(self, start_params, maxiter, maxfun, override=False,
solver='bfgs'):
"""
Solves for MLE estimate of structural parameters
Parameters
----------
override : bool, default False
If True, returns estimates of A and B without checking
order or rank condition
solver : str or None, optional
Solver to be used. The default is 'nm' (Nelder-Mead). Other
choices are 'bfgs', 'newton' (Newton-Raphson), 'cg'
conjugate, 'ncg' (non-conjugate gradient), and 'powell'.
maxiter : int, optional
The maximum number of iterations. Default is 500.
maxfun : int, optional
The maximum number of function evalutions.
Returns
-------
A_solve, B_solve: ML solutions for A, B matrices
"""
#TODO: this could stand a refactor
A_mask = self.A_mask
B_mask = self.B_mask
A = self.A
B = self.B
A_len = len(A[A_mask])
A[A_mask] = start_params[:A_len]
B[B_mask] = start_params[A_len:]
if override == False:
J = self._compute_J(A, B)
self.check_order(J)
self.check_rank(J)
else: #TODO: change to a warning?
print("Order/rank conditions have not been checked")
retvals = super(SVAR, self).fit(start_params=start_params,
method=solver, maxiter=maxiter,
maxfun=maxfun, ftol=1e-20, disp=0).params
A[A_mask] = retvals[:A_len]
B[B_mask] = retvals[A_len:]
return A, B
def _compute_J(self, A_solve, B_solve):
#first compute appropriate duplication matrix
# taken from Magnus and Neudecker (1980),
#"The Elimination Matrix: Some Lemmas and Applications
# the creation of the D_n matrix follows MN (1980) directly,
#while the rest follows Hamilton (1994)
neqs = self.neqs
sigma_u = self.sigma_u
A_mask = self.A_mask
B_mask = self.B_mask
#first generate duplication matrix, see MN (1980) for notation
D_nT = np.zeros([int((1.0 / 2) * (neqs) * (neqs + 1)), neqs**2])
for j in range(neqs):
i=j
while j <= i < neqs:
u=np.zeros([int((1.0/2)*neqs*(neqs+1)), 1])
u[int(j * neqs + (i + 1) - (1.0 / 2) * (j + 1) * j - 1)] = 1
Tij=np.zeros([neqs,neqs])
Tij[i,j]=1
Tij[j,i]=1
D_nT=D_nT+np.dot(u,(Tij.ravel('F')[:,None]).T)
i=i+1
D_n=D_nT.T
D_pl=npl.pinv(D_n)
#generate S_B
S_B = np.zeros((neqs**2, len(A_solve[A_mask])))
S_D = np.zeros((neqs**2, len(B_solve[B_mask])))
j = 0
j_d = 0
if len(A_solve[A_mask]) is not 0:
A_vec = np.ravel(A_mask, order='F')
for k in range(neqs**2):
if A_vec[k] == True:
S_B[k,j] = -1
j += 1
if len(B_solve[B_mask]) is not 0:
B_vec = np.ravel(B_mask, order='F')
for k in range(neqs**2):
if B_vec[k] == True:
S_D[k,j_d] = 1
j_d +=1
#now compute J
invA = npl.inv(A_solve)
J_p1i = np.dot(np.dot(D_pl, np.kron(sigma_u, invA)), S_B)
J_p1 = -2.0 * J_p1i
J_p2 = np.dot(np.dot(D_pl, np.kron(invA, invA)), S_D)
J = np.append(J_p1, J_p2, axis=1)
return J
def check_order(self, J):
if np.size(J, axis=0) < np.size(J, axis=1):
raise ValueError("Order condition not met: "
"solution may not be unique")
def check_rank(self, J):
rank = np_matrix_rank(J)
if rank < np.size(J, axis=1):
raise ValueError("Rank condition not met: "
"solution may not be unique.")
class SVARProcess(VARProcess):
"""
Class represents a known SVAR(p) process
Parameters
----------
coefs : ndarray (p x k x k)
intercept : ndarray (length k)
sigma_u : ndarray (k x k)
names : sequence (length k)
A : neqs x neqs np.ndarray with unknown parameters marked with 'E'
A_mask : neqs x neqs mask array with known parameters masked
B : neqs x neqs np.ndarry with unknown parameters marked with 'E'
B_mask : neqs x neqs mask array with known parameters masked
Returns
-------
**Attributes**:
"""
def __init__(self, coefs, intercept, sigma_u, A_solve, B_solve,
names=None):
self.k_ar = len(coefs)
self.neqs = coefs.shape[1]
self.coefs = coefs
self.intercept = intercept
self.sigma_u = sigma_u
self.A_solve = A_solve
self.B_solve = B_solve
self.names = names
def orth_ma_rep(self, maxn=10, P=None):
"""
Unavailable for SVAR
"""
raise NotImplementedError
def svar_ma_rep(self, maxn=10, P=None):
"""
Compute Structural MA coefficient matrices using MLE
of A, B
"""
if P is None:
A_solve = self.A_solve
B_solve = self.B_solve
P = np.dot(npl.inv(A_solve), B_solve)
ma_mats = self.ma_rep(maxn=maxn)
return mat([np.dot(coefs, P) for coefs in ma_mats])
class SVARResults(SVARProcess, VARResults):
"""
Estimate VAR(p) process with fixed number of lags
Parameters
----------
endog : array
endog_lagged : array
params : array
sigma_u : array
lag_order : int
model : VAR model instance
trend : str {'nc', 'c', 'ct'}
names : array-like
List of names of the endogenous variables in order of appearance in `endog`.
dates
Returns
-------
**Attributes**
aic
bic
bse
coefs : ndarray (p x K x K)
Estimated A_i matrices, A_i = coefs[i-1]
cov_params
dates
detomega
df_model : int
df_resid : int
endog
endog_lagged
fittedvalues
fpe
intercept
info_criteria
k_ar : int
k_trend : int
llf
model
names
neqs : int
Number of variables (equations)
nobs : int
n_totobs : int
params
k_ar : int
Order of VAR process
params : ndarray (Kp + 1) x K
A_i matrices and intercept in stacked form [int A_1 ... A_p]
pvalue
names : list
variables names
resid
sigma_u : ndarray (K x K)
Estimate of white noise process variance Var[u_t]
sigma_u_mle
stderr
trenorder
tvalues
y :
ys_lagged
"""
_model_type = 'SVAR'
def __init__(self, endog, endog_lagged, params, sigma_u, lag_order,
A=None, B=None, A_mask=None, B_mask=None, model=None,
trend='c', names=None, dates=None):
self.model = model
self.y = self.endog = endog #keep alias for now
self.ys_lagged = self.endog_lagged = endog_lagged #keep alias for now
self.dates = dates
self.n_totobs, self.neqs = self.y.shape
self.nobs = self.n_totobs - lag_order
k_trend = util.get_trendorder(trend)
if k_trend > 0: # make this the polynomial trend order
trendorder = k_trend - 1
else:
trendorder = None
self.k_trend = k_trend
self.trendorder = trendorder
self.exog_names = util.make_lag_names(names, lag_order, k_trend)
self.params = params
self.sigma_u = sigma_u
# Each matrix needs to be transposed
reshaped = self.params[self.k_trend:]
reshaped = reshaped.reshape((lag_order, self.neqs, self.neqs))
# Need to transpose each coefficient matrix
intercept = self.params[0]
coefs = reshaped.swapaxes(1, 2).copy()
#SVAR components
#TODO: if you define these here, you don't also have to define
#them in SVAR process, but I left them for now -ss
self.A = A
self.B = B
self.A_mask = A_mask
self.B_mask = B_mask
super(SVARResults, self).__init__(coefs, intercept, sigma_u, A,
B, names=names)
def irf(self, periods=10, var_order=None):
"""
Analyze structural impulse responses to shocks in system
Parameters
----------
periods : int
Returns
-------
irf : IRAnalysis
"""
A = self.A
B= self.B
P = np.dot(npl.inv(A), B)
return IRAnalysis(self, P=P, periods=periods, svar=True)
def sirf_errband_mc(self, orth=False, repl=1000, T=10,
signif=0.05, seed=None, burn=100, cum=False):
"""
Compute Monte Carlo integrated error bands assuming normally
distributed for impulse response functions
Parameters
----------
orth: bool, default False
Compute orthoganalized impulse response error bands
repl: int
number of Monte Carlo replications to perform
T: int, default 10
number of impulse response periods
signif: float (0 < signif <1)
Significance level for error bars, defaults to 95% CI
seed: int
np.random.seed for replications
burn: int
number of initial observations to discard for simulation
cum: bool, default False
produce cumulative irf error bands
Notes
-----
Lutkepohl (2005) Appendix D
Returns
-------
Tuple of lower and upper arrays of ma_rep monte carlo standard errors
"""
neqs = self.neqs
mean = self.mean()
k_ar = self.k_ar
coefs = self.coefs
sigma_u = self.sigma_u
intercept = self.intercept
df_model = self.df_model
nobs = self.nobs
ma_coll = np.zeros((repl, T+1, neqs, neqs))
A = self.A
B = self.B
A_mask = self.A_mask
B_mask = self.B_mask
A_pass = np.zeros(A.shape, dtype='|S1')
B_pass = np.zeros(B.shape, dtype='|S1')
A_pass[~A_mask] = A[~A_mask]
B_pass[~B_mask] = B[~B_mask]
A_pass[A_mask] = 'E'
B_pass[B_mask] = 'E'
if A_mask.sum() == 0:
s_type = 'B'
elif B_mask.sum() == 0:
s_type = 'A'
else:
s_type = 'AB'
g_list = []
for i in range(repl):
#discard first hundred to correct for starting bias
sim = util.varsim(coefs, intercept, sigma_u,
steps=nobs+burn)
sim = sim[burn:]
if cum == True:
if i < 10:
sol = SVAR(sim, svar_type=s_type, A=A_pass,
B=B_pass).fit(maxlags=k_ar)
g_list.append(np.append(sol.A[sol.A_mask].\
tolist(),
sol.B[sol.B_mask].\
tolist()))
ma_coll[i] = sol.svar_ma_rep(maxn=T).cumsum(axis=0)
elif i >= 10:
if i == 10:
mean_AB = np.mean(g_list, axis = 0)
split = len(A_pass[A_mask])
opt_A = mean_AB[:split]
opt_A = mean_AB[split:]
ma_coll[i] = SVAR(sim, svar_type=s_type, A=A_pass,
B=B_pass).fit(maxlags=k_ar,\
A_guess=opt_A, B_guess=opt_B).\
svar_ma_rep(maxn=T).cumsum(axis=0)
elif cum == False:
if i < 10:
sol = SVAR(sim, svar_type=s_type, A=A_pass,
B=B_pass).fit(maxlags=k_ar)
g_list.append(np.append(sol.A[A_mask].tolist(),
sol.B[B_mask].tolist()))
ma_coll[i] = sol.svar_ma_rep(maxn=T)
elif i >= 10:
if i == 10:
mean_AB = np.mean(g_list, axis = 0)
split = len(A[A_mask])
opt_A = mean_AB[:split]
opt_B = mean_AB[split:]
ma_coll[i] = SVAR(sim, svar_type=s_type, A=A_pass,
B=B_pass).fit(maxlags=k_ar,\
A_guess = opt_A, B_guess = opt_B).\
svar_ma_rep(maxn=T)
ma_sort = np.sort(ma_coll, axis=0) #sort to get quantiles
index = round(signif/2*repl)-1,round((1-signif/2)*repl)-1
lower = ma_sort[index[0],:, :, :]
upper = ma_sort[index[1],:, :, :]
return lower, upper
| bsd-3-clause |
beeverycreative/BEEweb | src/octoprint/plugins/cura/profile.py | 2 | 35704 | # coding=utf-8
from __future__ import absolute_import, division, print_function
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
import re
from builtins import range
class SupportLocationTypes(object):
NONE = "none"
TOUCHING_BUILDPLATE = "buildplate"
EVERYWHERE = "everywhere"
class SupportDualTypes(object):
BOTH = "both"
FIRST = "first"
SECOND = "second"
class SupportTypes(object):
GRID = "grid"
LINES = "lines"
class PlatformAdhesionTypes(object):
NONE = "none"
BRIM = "brim"
RAFT = "raft"
class MachineShapeTypes(object):
SQUARE = "square"
CIRCULAR = "circular"
class RetractionCombingTypes(object):
OFF = "off"
ALL = "all"
NO_SKIN = "no skin"
class GcodeFlavors(object):
REPRAP = (0, "reprap")
ULTIGCODE = (1, "ultigcode")
MAKERBOT = (2, "makerbot")
BFB = (3, "bfb")
MACH3 = (4, "mach3")
REPRAP_VOLUME = (5, "reprap_volume")
defaults = dict(
layer_height=0.1,
wall_thickness=0.8,
solid_layer_thickness=0.6,
print_temperature=[220, 0, 0, 0],
print_bed_temperature=0,
platform_adhesion=PlatformAdhesionTypes.NONE,
filament_diameter=[2.85, 0, 0, 0],
filament_flow=100.0,
bottom_thickness=0.3,
first_layer_width_factor=100.0,
object_sink=0.0,
fill_density=20,
solid_top=True,
solid_bottom=True,
fill_overlap=15,
perimeter_before_infill=False,
# speeds
print_speed=50.0,
travel_speed=150.0,
bottom_layer_speed=20.0,
infill_speed=0.0,
solidarea_speed=0.0,
outer_shell_speed=0.0,
inner_shell_speed=0.0,
# dual extrusion
overlap_dual=0.15,
wipe_tower=False,
wipe_tower_volume=15,
ooze_shield=False,
# retraction
retraction_enable=True,
retraction_speed=40.0,
retraction_amount=4.5,
retraction_dual_amount=16.5,
retraction_min_travel=1.5,
retraction_combing=True,
retraction_minimal_extrusion=0.02,
retraction_hop=0.0,
# cooling
cool_min_layer_time=5,
fan_enabled=True,
fan_full_height=0.5,
fan_speed=100,
fan_speed_max=100,
cool_min_feedrate=10,
cool_head_lift=False,
# support
support=SupportLocationTypes.NONE,
support_type=SupportTypes.GRID,
support_angle=60.0,
support_fill_rate=15,
support_xy_distance=0.7,
support_z_distance=0.15,
support_dual_extrusion=SupportDualTypes.BOTH,
# platform adhesion
skirt_line_count=1,
skirt_gap=3.0,
skirt_minimal_length=150.0,
brim_line_count=20,
raft_margin=5.0,
raft_line_spacing=3.0,
raft_base_thickness=0.3,
raft_base_linewidth=1.0,
raft_interface_thickness=0.27,
raft_interface_linewidth=0.4,
raft_airgap_all=0.0,
raft_airgap=0.22,
raft_surface_layers=2,
raft_surface_thickness=0.27,
raft_surface_linewidth=0.4,
# repairing
fix_horrible_union_all_type_a=True,
fix_horrible_union_all_type_b=False,
fix_horrible_use_open_bits=False,
fix_horrible_extensive_stitching=False,
# extras
spiralize=False,
follow_surface=False,
machine_width=205,
machine_depth=205,
machine_center_is_zero=False,
has_heated_bed=False,
gcode_flavor=GcodeFlavors.REPRAP,
extruder_amount=1,
steps_per_e=0,
start_gcode=[
# 1 extruder
""";Sliced at: {day} {date} {time}
;Basic settings: Layer height: {layer_height} Walls: {wall_thickness} Fill: {fill_density}
;M190 S{print_bed_temperature} ;Uncomment to add your own bed temperature line
;M109 S{print_temperature} ;Uncomment to add your own temperature line
G21 ;metric values
G90 ;absolute positioning
M82 ;set extruder to absolute mode
M107 ;start with the fan off
G28 X0 Y0 ;move X/Y to min endstops
G28 Z0 ;move Z to min endstops
G1 Z15.0 F{travel_speed} ;move the platform down 15mm
G92 E0 ;zero the extruded length
G1 F200 E3 ;extrude 3mm of feed stock
G92 E0 ;zero the extruded length again
G1 F{travel_speed}
;Put printing message on LCD screen
M117 Printing...
""",
# 2 extruders
""";Sliced at: {day} {date} {time}
;Basic settings: Layer height: {layer_height} Walls: {wall_thickness} Fill: {fill_density}
;M190 S{print_bed_temperature} ;Uncomment to add your own bed temperature line
;M104 S{print_temperature} ;Uncomment to add your own temperature line
;M109 T1 S{print_temperature2} ;Uncomment to add your own temperature line
;M109 T0 S{print_temperature} ;Uncomment to add your own temperature line
G21 ;metric values
G90 ;absolute positioning
M107 ;start with the fan off
G28 X0 Y0 ;move X/Y to min endstops
G28 Z0 ;move Z to min endstops
G1 Z15.0 F{travel_speed} ;move the platform down 15mm
T1 ;Switch to the 2nd extruder
G92 E0 ;zero the extruded length
G1 F200 E10 ;extrude 10mm of feed stock
G92 E0 ;zero the extruded length again
G1 F200 E-{retraction_dual_amount}
T0 ;Switch to the first extruder
G92 E0 ;zero the extruded length
G1 F200 E10 ;extrude 10mm of feed stock
G92 E0 ;zero the extruded length again
G1 F{travel_speed}
;Put printing message on LCD screen
M117 Printing...
""",
# 3 extruders
""";Sliced at: {day} {date} {time}
;Basic settings: Layer height: {layer_height} Walls: {wall_thickness} Fill: {fill_density}
;M190 S{print_bed_temperature} ;Uncomment to add your own bed temperature line
;M104 S{print_temperature} ;Uncomment to add your own temperature line
;M109 T1 S{print_temperature2} ;Uncomment to add your own temperature line
;M109 T0 S{print_temperature} ;Uncomment to add your own temperature line
G21 ;metric values
G90 ;absolute positioning
M107 ;start with the fan off
G28 X0 Y0 ;move X/Y to min endstops
G28 Z0 ;move Z to min endstops
G1 Z15.0 F{travel_speed} ;move the platform down 15mm
T2 ;Switch to the 2nd extruder
G92 E0 ;zero the extruded length
G1 F200 E10 ;extrude 10mm of feed stock
G92 E0 ;zero the extruded length again
G1 F200 E-{retraction_dual_amount}
T1 ;Switch to the 2nd extruder
G92 E0 ;zero the extruded length
G1 F200 E10 ;extrude 10mm of feed stock
G92 E0 ;zero the extruded length again
G1 F200 E-{retraction_dual_amount}
T0 ;Switch to the first extruder
G92 E0 ;zero the extruded length
G1 F200 E10 ;extrude 10mm of feed stock
G92 E0 ;zero the extruded length again
G1 F{travel_speed}
;Put printing message on LCD screen
M117 Printing...
""",
# 4 extruders
""";Sliced at: {day} {date} {time}
;Basic settings: Layer height: {layer_height} Walls: {wall_thickness} Fill: {fill_density}
;M190 S{print_bed_temperature} ;Uncomment to add your own bed temperature line
;M104 S{print_temperature} ;Uncomment to add your own temperature line
;M109 T2 S{print_temperature2} ;Uncomment to add your own temperature line
;M109 T1 S{print_temperature2} ;Uncomment to add your own temperature line
;M109 T0 S{print_temperature} ;Uncomment to add your own temperature line
G21 ;metric values
G90 ;absolute positioning
M107 ;start with the fan off
G28 X0 Y0 ;move X/Y to min endstops
G28 Z0 ;move Z to min endstops
G1 Z15.0 F{travel_speed} ;move the platform down 15mm
T3 ;Switch to the 4th extruder
G92 E0 ;zero the extruded length
G1 F200 E10 ;extrude 10mm of feed stock
G92 E0 ;zero the extruded length again
G1 F200 E-{retraction_dual_amount}
T2 ;Switch to the 3th extruder
G92 E0 ;zero the extruded length
G1 F200 E10 ;extrude 10mm of feed stock
G92 E0 ;zero the extruded length again
G1 F200 E-{retraction_dual_amount}
T1 ;Switch to the 2nd extruder
G92 E0 ;zero the extruded length
G1 F200 E10 ;extrude 10mm of feed stock
G92 E0 ;zero the extruded length again
G1 F200 E-{retraction_dual_amount}
T0 ;Switch to the first extruder
G92 E0 ;zero the extruded length
G1 F200 E10 ;extrude 10mm of feed stock
G92 E0 ;zero the extruded length again
G1 F{travel_speed}
;Put printing message on LCD screen
M117 Printing...
"""
],
end_gcode=[
# 1 extruder
""";End GCode
M104 S0 ;extruder heater off
M140 S0 ;heated bed heater off (if you have it)
G91 ;relative positioning
G1 E-1 F300 ;retract the filament a bit before lifting the nozzle, to release some of the pressure
G1 Z+0.5 E-5 X-20 Y-20 F{travel_speed} ;move Z up a bit and retract filament even more
G28 X0 Y0 ;move X/Y to min endstops, so the head is out of the way
M84 ;steppers off
G90 ;absolute positioning
;{profile_string}
""",
# 2 extruders
""";End GCode
M104 T0 S0 ;extruder heater off
M104 T1 S0 ;extruder heater off
M140 S0 ;heated bed heater off (if you have it)
G91 ;relative positioning
G1 E-1 F300 ;retract the filament a bit before lifting the nozzle, to release some of the pressure
G1 Z+0.5 E-5 X-20 Y-20 F{travel_speed} ;move Z up a bit and retract filament even more
G28 X0 Y0 ;move X/Y to min endstops, so the head is out of the way
M84 ;steppers off
G90 ;absolute positioning
;{profile_string}
""",
# 3 extruders
""";End GCode
M104 T0 S0 ;extruder heater off
M104 T1 S0 ;extruder heater off
M104 T2 S0 ;extruder heater off
M140 S0 ;heated bed heater off (if you have it)
G91 ;relative positioning
G1 E-1 F300 ;retract the filament a bit before lifting the nozzle, to release some of the pressure
G1 Z+0.5 E-5 X-20 Y-20 F{travel_speed} ;move Z up a bit and retract filament even more
G28 X0 Y0 ;move X/Y to min endstops, so the head is out of the way
M84 ;steppers off
G90 ;absolute positioning
;{profile_string}
""",
# 4 extruders
""";End GCode
M104 T0 S0 ;extruder heater off
M104 T1 S0 ;extruder heater off
M104 T2 S0 ;extruder heater off
M104 T3 S0 ;extruder heater off
M140 S0 ;heated bed heater off (if you have it)
G91 ;relative positioning
G1 E-1 F300 ;retract the filament a bit before lifting the nozzle, to release some of the pressure
G1 Z+0.5 E-5 X-20 Y-20 F{travel_speed} ;move Z up a bit and retract filament even more
G28 X0 Y0 ;move X/Y to min endstops, so the head is out of the way
M84 ;steppers off
G90 ;absolute positioning
;{profile_string}
"""
],
preSwitchExtruder_gcode=""";Switch between the current extruder and the next extruder, when printing with multiple extruders.
;This code is added before the T(n)
""",
postSwitchExtruder_gcode=""";Switch between the current extruder and the next extruder, when printing with multiple extruders.
;This code is added after the T(n)
"""
)
class Profile(object):
regex_extruder_offset = re.compile("extruder_offset_([xy])(\d)")
regex_filament_diameter = re.compile("filament_diameter(\d?)")
regex_print_temperature = re.compile("print_temperature(\d?)")
regex_strip_comments = re.compile(";.*$", flags=re.MULTILINE)
@classmethod
def from_cura_ini(cls, path):
import logging
logger = logging.getLogger("octoprint.plugin.cura.profile")
import os
if not os.path.exists(path) or not os.path.isfile(path):
logger.warn("Path {path} does not exist or is not a file, cannot import".format(**locals()))
return None
import ConfigParser
config = ConfigParser.ConfigParser()
try:
config.read(path)
except:
logger.exception("Error while reading profile INI file from {path}".format(**locals()))
return None
arrayified_options = ["print_temperature", "filament_diameter", "start.gcode", "end.gcode"]
translated_options = dict(
inset0_speed="outer_shell_speed",
insetx_speed="inner_shell_speed",
layer0_width_factor="first_layer_width_factor",
simple_mode="follow_surface",
)
translated_options["start.gcode"] = "start_gcode"
translated_options["end.gcode"] = "end_gcode"
value_conversions = dict(
platform_adhesion={
"None": PlatformAdhesionTypes.NONE,
"Brim": PlatformAdhesionTypes.BRIM,
"Raft": PlatformAdhesionTypes.RAFT
},
support={
"None": SupportLocationTypes.NONE,
"Touching buildplate": SupportLocationTypes.TOUCHING_BUILDPLATE,
"Everywhere": SupportLocationTypes.EVERYWHERE
},
support_type={
"Lines": SupportTypes.LINES,
"Grid": SupportTypes.GRID
},
support_dual_extrusion={
"Both": SupportDualTypes.BOTH,
"First extruder": SupportDualTypes.FIRST,
"Second extruder": SupportDualTypes.SECOND
},
retraction_combing={
"Off": RetractionCombingTypes.OFF,
"All": RetractionCombingTypes.ALL,
"No Skin": RetractionCombingTypes.NO_SKIN
}
)
result = dict()
for section in config.sections():
if not section in ("profile", "alterations", "machine"):
continue
for option in config.options(section):
ignored = False
key = option
if section == "machine" and key != "gcode_flavor":
continue
# try to fetch the value in the correct type
try:
value = config.getboolean(section, option)
except:
# no boolean, try int
try:
value = config.getint(section, option)
except:
# no int, try float
try:
value = config.getfloat(section, option)
except:
# no float, use str
value = config.get(section, option)
index = None
for opt in arrayified_options:
# if there's a period, the index comes before it
optsplit = opt.split('.')
keysplit = key.split('.')
if key.startswith(optsplit[0]) and keysplit[1:] == optsplit[1:]:
if key == opt:
index = 0
else:
try:
# try to convert the target index, e.g. print_temperature2 => print_temperature[1]
index = int(keysplit[0][len(optsplit[0]):]) - 1
except ValueError:
# ignore entries for which that fails
ignored = True
key = opt
break
if ignored:
continue
if key in translated_options:
# if the key has to be translated to a new value, do that now
key = translated_options[key]
if key in value_conversions and value in value_conversions[key]:
value = value_conversions[key][value]
if key == "gcode_flavor":
value = parse_gcode_flavor(value)
if index is not None:
# if we have an array to fill, make sure the target array exists and has the right size
if not key in result:
result[key] = []
if len(result[key]) <= index:
for n in range(index - len(result[key]) + 1):
result[key].append(None)
result[key][index] = value
else:
# just set the value if there's no array to fill
result[key] = value
# merge it with our default settings, the imported profile settings taking precedence
return cls.merge_profile(result)
@classmethod
def merge_profile(cls, profile, overrides=None):
result = dict()
for key in defaults.keys():
r = cls.merge_profile_key(key, profile, overrides=overrides)
if r is not None:
result[key] = r
return result
@classmethod
def merge_profile_key(cls, key, profile, overrides=None):
profile_value = None
override_value = None
if not key in defaults:
return None
import copy
result = copy.deepcopy(defaults[key])
if key in profile:
profile_value = profile[key]
if overrides and key in overrides:
override_value = overrides[key]
if profile_value is None and override_value is None:
# neither override nor profile, no need to handle this key further
return None
if key in ("filament_diameter", "print_temperature", "start_gcode", "end_gcode"):
# the array fields need some special treatment. Basically something like this:
#
# override_value: [None, "b"]
# profile_value : ["a" , None, "c"]
# default_value : ["d" , "e" , "f", "g"]
#
# should merge to something like this:
#
# ["a" , "b" , "c", "g"]
#
# So override > profile > default, if neither override nor profile value are available
# the default value should just be left as is
for x in range(len(result)):
if override_value is not None and x < len(override_value) and override_value[x] is not None:
# we have an override value for this location, so we use it
result[x] = override_value[x]
elif profile_value is not None and x < len(profile_value) and profile_value[x] is not None:
# we have a profile value for this location, so we use it
result[x] = profile_value[x]
else:
# just change the result value to the override_value if available, otherwise to the profile_value if
# that is given, else just leave as is
if override_value is not None:
result = override_value
elif profile_value is not None:
result = profile_value
return result
def __init__(self, profile, printer_profile, posX, posY, overrides=None):
self._profile = self.__class__.merge_profile(profile, overrides=overrides)
self._printer_profile = printer_profile
self._posX = posX
self._posY = posY
def profile(self):
import copy
return copy.deepcopy(self._profile)
def get(self, key):
if key in ("machine_width", "machine_depth", "machine_center_is_zero"):
if key == "machine_width":
return self._printer_profile["volume"]["width"]
elif key == "machine_depth":
return self._printer_profile["volume"]["depth"]
elif key == "machine_height":
return self._printer_profile["volume"]["height"]
elif key == "machine_center_is_zero":
return self._printer_profile["volume"]["formFactor"] == "circular" or self._printer_profile["volume"]["origin"] == "center"
else:
return None
elif key == "extruder_amount":
return self._printer_profile["extruder"]["count"]
elif key.startswith("extruder_offset_"):
extruder_offsets = self._printer_profile["extruder"]["offsets"]
match = Profile.regex_extruder_offset.match(key)
if not match:
return 0.0
axis, number = match.groups()
axis = axis.lower()
number = int(number)
if not axis in ("x", "y"):
return 0.0
if number >= len(extruder_offsets):
return 0.0
if axis == "x":
return extruder_offsets[number][0]
elif axis == "y":
return extruder_offsets[number][1]
else:
return 0.0
elif key == "has_heated_bed":
return self._printer_profile["heatedBed"]
elif key.startswith("filament_diameter"):
match = Profile.regex_filament_diameter.match(key)
if not match:
return 0.0
diameters = self._get("filament_diameter")
if not match.group(1):
return diameters[0]
index = int(match.group(1)) - 1
if index >= len(diameters) or index < 0:
return 0.0
return diameters[index]
elif key.startswith("print_temperature"):
match = Profile.regex_print_temperature.match(key)
if not match:
return 0.0
temperatures = self._get("print_temperature")
if not match.group(1):
return temperatures[0]
index = int(match.group(1)) - 1
if index >= len(temperatures) or index < 0:
return 0.0
return temperatures[index]
else:
return self._get(key)
def _get(self, key):
if key in self._profile:
return self._profile[key]
elif key in defaults:
return defaults[key]
else:
return None
def get_int(self, key, default=None):
value = self.get(key)
if value is None:
return default
try:
return int(value)
except ValueError:
return default
def get_float(self, key, default=None):
value = self.get(key)
if value is None:
return default
if isinstance(value, (str, unicode, basestring)):
value = value.replace(",", ".").strip()
try:
return float(value)
except ValueError:
return default
def get_boolean(self, key, default=None):
value = self.get(key)
if value is None:
return default
if isinstance(value, bool):
return value
elif isinstance(value, (str, unicode, basestring)):
return value.lower() == "true" or value.lower() == "yes" or value.lower() == "on" or value == "1"
elif isinstance(value, (int, float)):
return value > 0
else:
return value == True
def get_microns(self, key, default=None):
value = self.get_float(key, default=None)
if value is None:
return default
return int(value * 1000)
def get_gcode_template(self, key, extruder_count=1):
if key in self._profile:
gcode = self._profile[key]
else:
gcode = defaults[key]
if key in ("start_gcode", "end_gcode"):
return gcode[extruder_count-1]
else:
return gcode
def get_profile_string(self):
import base64
import zlib
import copy
profile = copy.deepcopy(defaults)
profile.update(self._profile)
for key in ("print_temperature", "print_temperature2", "print_temperature3", "print_temperature4",
"filament_diameter", "filament_diameter2", "filament_diameter3", "filament_diameter4"):
profile[key] = self.get(key)
result = []
for k, v in profile.items():
if isinstance(v, (str, unicode)):
result.append("{k}={v}".format(k=k, v=v.encode("utf-8")))
else:
result.append("{k}={v}".format(k=k, v=v))
return base64.b64encode(zlib.compress("\b".join(result), 9))
def replaceTagMatch(self, m):
import time
pre = m.group(1)
tag = m.group(2)
if tag == 'time':
return pre + time.strftime('%H:%M:%S')
if tag == 'date':
return pre + time.strftime('%d-%m-%Y')
if tag == 'day':
return pre + ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'][int(time.strftime('%w'))]
if tag == 'profile_string':
return pre + 'CURA_OCTO_PROFILE_STRING:%s' % (self.get_profile_string())
if pre == 'F' and tag == 'max_z_speed':
f = self.get_float("travel_speed") * 60
elif pre == 'F' and tag in ['print_speed', 'retraction_speed', 'travel_speed', 'bottom_layer_speed', 'cool_min_feedrate']:
f = self.get_float(tag) * 60
elif self.get(tag):
f = self.get(tag)
else:
return '%s?%s?' % (pre, tag)
if (f % 1) == 0:
return pre + str(int(f))
return pre + str(f)
def get_gcode(self, key, extruder_count=1):
prefix = ""
postfix = ""
if self.get("gcode_flavor") == GcodeFlavors.ULTIGCODE:
if key == "end_gcode":
return "M25 ;Stop reading from this point on.\n;CURA_OCTO_PROFILE_STRING:%s\n" % (self.get_profile_string())
return ""
if key == "start_gcode":
contents = self.get_gcode_template("start_gcode", extruder_count=extruder_count)
prefix += self.get_start_gcode_prefix(contents)
else:
contents = self.get_gcode_template(key, extruder_count=extruder_count)
return unicode(prefix + re.sub("(.)\{([^\}]*)\}", self.replaceTagMatch, contents).rstrip() + '\n' + postfix).strip().encode('utf-8') + '\n'
def get_start_gcode_prefix(self, contents):
extruder_count = self.get_int("extruder_amount")
prefix = ""
gcode_parameter_key = "S"
if self.get("gcode_flavor") == GcodeFlavors.MACH3:
gcode_parameter_key = "P"
e_steps = self.get_float("steps_per_e")
if e_steps > 0:
prefix += "M92 E{e_steps}\n".format(e_steps=e_steps)
temp = self.get_float("print_temperature")
bed_temp = 0
if self.get_boolean("has_heated_bed"):
bed_temp = self.get_float("print_bed_temperature")
include_bed_temp = bed_temp > 0 and not "{print_bed_temperature}" in Profile.regex_strip_comments.sub("", contents)
if include_bed_temp:
prefix += "M140 {param}{bed_temp}\n".format(param=gcode_parameter_key, bed_temp=bed_temp)
if temp > 0 and not "{print_temperature}" in Profile.regex_strip_comments.sub("", contents):
if extruder_count > 0:
def temp_line(temp, extruder, param, template):
t = temp
if extruder > 0:
print_temp = self.get_float("print_temperature%d" % (extruder + 1))
if print_temp > 0:
t = print_temp
return template.format(extruder=extruder, temp=t, param=param)
prefix_preheat = ""
prefix_waitheat = ""
for n in range(0, extruder_count):
if n > 0:
prefix_preheat += temp_line(temp, n, gcode_parameter_key, "M104 T{extruder} {param}{temp}\n")
prefix_waitheat += temp_line(temp, n, gcode_parameter_key, "M109 T{extruder} {param}{temp}\n")
prefix += prefix_preheat + prefix_waitheat + "T0\n"
else:
prefix += "M109 {param}{temp}\n".format(param=gcode_parameter_key, temp=temp)
if include_bed_temp:
prefix += "M190 {param}{bed_temp}\n".format(param=gcode_parameter_key, bed_temp=bed_temp)
return prefix
def calculate_edge_width_and_line_count(self):
wall_thickness = self.get_float("wall_thickness")
nozzle_size = self._printer_profile["extruder"]["nozzleDiameter"]
if self.get_boolean("spiralize") or self.get_boolean("follow_surface"):
return wall_thickness, 1
if wall_thickness < 0.01:
return nozzle_size, 0
if wall_thickness < nozzle_size:
return wall_thickness, 1
edge_width = None
line_count = int(wall_thickness / (nozzle_size - 0.0001))
if line_count < 1:
edge_width = nozzle_size
line_count = 1
line_width = wall_thickness / line_count
line_width_alt = wall_thickness / (line_count + 1)
if line_width > nozzle_size * 1.5:
return line_width_alt, line_count + 1
if not edge_width:
edge_width = line_width
return edge_width, line_count
def calculate_solid_layer_count(self):
layer_height = self.get_float("layer_height")
solid_thickness = self.get_float("solid_layer_thickness")
if layer_height == 0.0:
return 1
import math
return int(math.ceil((solid_thickness - 0.0001) / layer_height))
def calculate_minimal_extruder_count(self):
extruder_count = self.get("extruder_amount")
if extruder_count < 2:
return 1
if self.get("support") == SupportLocationTypes.NONE:
return 1
if self.get("support_dual_extrusion") == SupportDualTypes.SECOND:
return 2
return 1
def get_pos_x(self):
if self._posX is not None:
try:
return int(float(self._posX))
except ValueError:
pass
return int(self.get_float("machine_width") / 2.0 ) if not self.get_boolean("machine_center_is_zero") else 0.0
def get_pos_y(self):
if self._posY is not None:
try:
return int(float(self._posY))
except ValueError:
pass
return int(self.get_float("machine_depth") / 2.0) if not self.get_boolean("machine_center_is_zero") else 0.0
def convert_to_engine(self, used_extruders=1):
edge_width, line_count = self.calculate_edge_width_and_line_count()
solid_layer_count = self.calculate_solid_layer_count()
extruder_count = self.get_int("extruder_amount")
minimal_extruder_count = self.calculate_minimal_extruder_count()
actual_extruder_count = max(minimal_extruder_count, used_extruders)
settings = {
"layerThickness": self.get_microns("layer_height"),
"initialLayerThickness": self.get_microns("bottom_thickness") if self.get_float("bottom_thickness") > 0.0 else self.get_microns("layer_height"),
"filamentDiameter": self.get_microns("filament_diameter"),
"filamentFlow": self.get_int("filament_flow"),
"extrusionWidth": edge_width * 1000,
"layer0extrusionWidth": int(edge_width * self.get_float("first_layer_width_factor") / 100 * 1000),
"insetCount": line_count,
"downSkinCount": solid_layer_count if self.get_boolean("solid_bottom") else 0,
"upSkinCount": solid_layer_count if self.get_boolean("solid_top") else 0,
"infillOverlap": self.get_int("fill_overlap"),
"perimeterBeforeInfill": 1 if self.get_boolean("perimeter_before_infill") else 0,
"initialSpeedupLayers": int(4),
"initialLayerSpeed": self.get_int("bottom_layer_speed"),
"printSpeed": self.get_int("print_speed"),
"infillSpeed": self.get_int("infill_speed") if self.get_int("infill_speed") > 0 else self.get_int("print_speed"),
"inset0Speed": self.get_int("outer_shell_speed") if self.get_int("outer_shell_speed") > 0 else self.get_int("print_speed"),
"insetXSpeed": self.get_int("inner_shell_speed") if self.get_int("inner_shell_speed") > 0 else self.get_int("print_speed"),
"skinSpeed": self.get_int("solidarea_speed") if self.get_int("solidarea_speed") > 0 > 0 else self.get_int("print_speed"),
"moveSpeed": self.get_int("travel_speed"),
"fanSpeedMin": self.get_int("fan_speed") if self.get_boolean("fan_enabled") else 0,
"fanSpeedMax": self.get_int("fan_speed_max") if self.get_boolean("fan_enabled") else 0,
"supportAngle": int(-1) if self.get("support") == SupportLocationTypes.NONE else self.get_int("support_angle"),
"supportEverywhere": int(1) if self.get("support") == SupportLocationTypes.EVERYWHERE else int(0),
"supportLineDistance": int(100 * edge_width * 1000 / self.get_float("support_fill_rate")) if self.get_float("support_fill_rate") > 0 else -1,
"supportXYDistance": int(1000 * self.get_float("support_xy_distance")),
"supportZDistance": int(1000 * self.get_float("support_z_distance")),
"supportExtruder": 0 if self.get("support_dual_extrusion") == SupportDualTypes.FIRST else (1 if self.get("support_dual_extrusion") == SupportDualTypes.SECOND and minimal_extruder_count > 1 else -1),
"retractionAmount": self.get_microns("retraction_amount") if self.get_boolean("retraction_enable") else 0,
"retractionSpeed": self.get_int("retraction_speed"),
"retractionMinimalDistance": self.get_microns("retraction_min_travel"),
"retractionAmountExtruderSwitch": self.get_microns("retraction_dual_amount"),
"retractionZHop": self.get_microns("retraction_hop"),
"minimalExtrusionBeforeRetraction": self.get_microns("retraction_minimal_extrusion"),
"multiVolumeOverlap": self.get_microns("overlap_dual"),
"objectSink": max(0, self.get_microns("object_sink")),
"minimalLayerTime": self.get_int("cool_min_layer_time"),
"minimalFeedrate": self.get_int("cool_min_feedrate"),
"coolHeadLift": 1 if self.get_boolean("cool_head_lift") else 0,
"enableCombing": 1 if self.get("retraction_combing") == RetractionCombingTypes.ALL else (2 if self.get("retraction_combing") == RetractionCombingTypes.NO_SKIN else 0),
# model positioning
"posx": self.get_pos_x() * 1000, # in microns
"posy": self.get_pos_y() * 1000, # in microns
# gcodes
"startCode": self.get_gcode("start_gcode", extruder_count=actual_extruder_count),
"endCode": self.get_gcode("end_gcode", extruder_count=actual_extruder_count),
"preSwitchExtruderCode": self.get_gcode("preSwitchExtruder_gcode", extruder_count=actual_extruder_count),
"postSwitchExtruderCode": self.get_gcode("postSwitchExtruder_gcode", extruder_count=actual_extruder_count),
# fixing
"fixHorrible": 0,
}
for extruder in range(1, extruder_count):
for axis in ("x", "y"):
settings["extruderOffset[{extruder}].{axis}".format(extruder=extruder, axis=axis.upper())] = self.get("extruder_offset_{axis}{extruder}".format(extruder=extruder, axis=axis.lower()))
fanFullHeight = self.get_microns("fan_full_height")
settings["fanFullOnLayerNr"] = (fanFullHeight - settings["initialLayerThickness"] - 1) // settings["layerThickness"] + 1
if settings["fanFullOnLayerNr"] < 0:
settings["fanFullOnLayerNr"] = 0
if self.get("support_type") == SupportTypes.LINES:
settings["supportType"] = 1
# infill
if self.get_float("fill_density") == 0:
settings["sparseInfillLineDistance"] = -1
elif self.get_float("fill_density") == 100:
settings["sparseInfillLineDistance"] = settings["extrusionWidth"]
settings["downSkinCount"] = 10000
settings["upSkinCount"] = 10000
else:
settings["sparseInfillLineDistance"] = int(100 * edge_width * 1000 / self.get_float("fill_density"))
# brim/raft/skirt
if self.get("platform_adhesion") == PlatformAdhesionTypes.BRIM:
settings["skirtDistance"] = 0
settings["skirtLineCount"] = self.get_int("brim_line_count")
elif self.get("platform_adhesion") == PlatformAdhesionTypes.RAFT:
settings["skirtDistance"] = 0
settings["skirtLineCount"] = 0
settings["raftMargin"] = self.get_microns("raft_margin")
settings["raftLineSpacing"] = self.get_microns("raft_line_spacing")
settings["raftBaseThickness"] = self.get_microns("raft_base_thickness")
settings["raftBaseLinewidth"] = self.get_microns("raft_base_linewidth")
settings["raftInterfaceThickness"] = self.get_microns("raft_interface_thickness")
settings["raftInterfaceLinewidth"] = self.get_microns("raft_interface_linewidth")
settings["raftInterfaceLineSpacing"] = self.get_microns("raft_interface_linewidth") * 2
settings["raftAirGapLayer0"] = self.get_microns("raft_airgap") + self.get_microns("raft_airgap_all")
settings["raftAirGap"] = self.get_microns("raft_airgap_all")
settings["raftBaseSpeed"] = self.get_int("bottom_layer_speed")
settings["raftFanSpeed"] = 0
settings["raftSurfaceThickness"] = self.get_microns("raft_surface_thickness")
settings["raftSurfaceLinewidth"] = self.get_microns("raft_surface_linewidth")
settings["raftSurfaceLineSpacing"] = self.get_microns("raft_surface_linewidth")
settings["raftSurfaceLayers"] = self.get_int("raft_surface_layers")
settings["raftSurfaceSpeed"] = self.get_int("bottom_layer_speed")
else:
settings["skirtDistance"] = self.get_microns("skirt_gap")
settings["skirtLineCount"] = self.get_int("skirt_line_count")
settings["skirtMinLength"] = self.get_microns("skirt_minimal_length")
# fixing
if self.get_boolean("fix_horrible_union_all_type_a"):
settings["fixHorrible"] |= 0x01
if self.get_boolean("fix_horrible_union_all_type_b"):
settings["fixHorrible"] |= 0x02
if self.get_boolean("fix_horrible_use_open_bits"):
settings["fixHorrible"] |= 0x10
if self.get_boolean("fix_horrible_extensive_stitching"):
settings["fixHorrible"] |= 0x04
if settings["layerThickness"] <= 0:
settings["layerThickness"] = 1000
# gcode flavor
settings["gcodeFlavor"] = self.get("gcode_flavor")[0]
# extras
if self.get_boolean("spiralize"):
settings["spiralizeMode"] = 1
if self.get_boolean("follow_surface"):
settings["simpleMode"] = 1
# dual extrusion
if self.get_boolean("wipe_tower") and extruder_count > 1:
import math
settings["wipeTowerSize"] = int(math.sqrt(self.get_float("wipe_tower_volume") * 1000 * 1000 * 1000 / settings["layerThickness"]))
if self.get_boolean("ooze_shield"):
settings["enableOozeShield"] = 1
return settings
def parse_gcode_flavor(value):
value = value.lower()
if "reprap" in value and ("volume" in value or "volumatric" in value):
return GcodeFlavors.REPRAP_VOLUME
elif "ultigcode" in value:
return GcodeFlavors.ULTIGCODE
elif "makerbot" in value:
return GcodeFlavors.MAKERBOT
elif "bfb" in value:
return GcodeFlavors.BFB
elif "mach3" in value:
return GcodeFlavors.MACH3
else:
return GcodeFlavors.REPRAP
| agpl-3.0 |
y12uc231/edx-platform | lms/lib/comment_client/user.py | 144 | 6343 | from .utils import merge_dict, perform_request, CommentClientRequestError
import models
import settings
class User(models.Model):
accessible_fields = [
'username', 'follower_ids', 'upvoted_ids', 'downvoted_ids',
'id', 'external_id', 'subscribed_user_ids', 'children', 'course_id',
'group_id', 'subscribed_thread_ids', 'subscribed_commentable_ids',
'subscribed_course_ids', 'threads_count', 'comments_count',
'default_sort_key'
]
updatable_fields = ['username', 'external_id', 'default_sort_key']
initializable_fields = updatable_fields
metric_tag_fields = ['course_id']
base_url = "{prefix}/users".format(prefix=settings.PREFIX)
default_retrieve_params = {'complete': True}
type = 'user'
@classmethod
def from_django_user(cls, user):
return cls(id=str(user.id),
external_id=str(user.id),
username=user.username)
def follow(self, source):
params = {'source_type': source.type, 'source_id': source.id}
response = perform_request(
'post',
_url_for_subscription(self.id),
params,
metric_action='user.follow',
metric_tags=self._metric_tags + ['target.type:{}'.format(source.type)],
)
def unfollow(self, source):
params = {'source_type': source.type, 'source_id': source.id}
response = perform_request(
'delete',
_url_for_subscription(self.id),
params,
metric_action='user.unfollow',
metric_tags=self._metric_tags + ['target.type:{}'.format(source.type)],
)
def vote(self, voteable, value):
if voteable.type == 'thread':
url = _url_for_vote_thread(voteable.id)
elif voteable.type == 'comment':
url = _url_for_vote_comment(voteable.id)
else:
raise CommentClientRequestError("Can only vote / unvote for threads or comments")
params = {'user_id': self.id, 'value': value}
response = perform_request(
'put',
url,
params,
metric_action='user.vote',
metric_tags=self._metric_tags + ['target.type:{}'.format(voteable.type)],
)
voteable._update_from_response(response)
def unvote(self, voteable):
if voteable.type == 'thread':
url = _url_for_vote_thread(voteable.id)
elif voteable.type == 'comment':
url = _url_for_vote_comment(voteable.id)
else:
raise CommentClientRequestError("Can only vote / unvote for threads or comments")
params = {'user_id': self.id}
response = perform_request(
'delete',
url,
params,
metric_action='user.unvote',
metric_tags=self._metric_tags + ['target.type:{}'.format(voteable.type)],
)
voteable._update_from_response(response)
def active_threads(self, query_params={}):
if not self.course_id:
raise CommentClientRequestError("Must provide course_id when retrieving active threads for the user")
url = _url_for_user_active_threads(self.id)
params = {'course_id': self.course_id.to_deprecated_string()}
params = merge_dict(params, query_params)
response = perform_request(
'get',
url,
params,
metric_action='user.active_threads',
metric_tags=self._metric_tags,
paged_results=True,
)
return response.get('collection', []), response.get('page', 1), response.get('num_pages', 1)
def subscribed_threads(self, query_params={}):
if not self.course_id:
raise CommentClientRequestError("Must provide course_id when retrieving subscribed threads for the user")
url = _url_for_user_subscribed_threads(self.id)
params = {'course_id': self.course_id.to_deprecated_string()}
params = merge_dict(params, query_params)
response = perform_request(
'get',
url,
params,
metric_action='user.subscribed_threads',
metric_tags=self._metric_tags,
paged_results=True
)
return response.get('collection', []), response.get('page', 1), response.get('num_pages', 1)
def _retrieve(self, *args, **kwargs):
url = self.url(action='get', params=self.attributes)
retrieve_params = self.default_retrieve_params.copy()
retrieve_params.update(kwargs)
if self.attributes.get('course_id'):
retrieve_params['course_id'] = self.course_id.to_deprecated_string()
if self.attributes.get('group_id'):
retrieve_params['group_id'] = self.group_id
try:
response = perform_request(
'get',
url,
retrieve_params,
metric_action='model.retrieve',
metric_tags=self._metric_tags,
)
except CommentClientRequestError as e:
if e.status_code == 404:
# attempt to gracefully recover from a previous failure
# to sync this user to the comments service.
self.save()
response = perform_request(
'get',
url,
retrieve_params,
metric_action='model.retrieve',
metric_tags=self._metric_tags,
)
else:
raise
self._update_from_response(response)
def _url_for_vote_comment(comment_id):
return "{prefix}/comments/{comment_id}/votes".format(prefix=settings.PREFIX, comment_id=comment_id)
def _url_for_vote_thread(thread_id):
return "{prefix}/threads/{thread_id}/votes".format(prefix=settings.PREFIX, thread_id=thread_id)
def _url_for_subscription(user_id):
return "{prefix}/users/{user_id}/subscriptions".format(prefix=settings.PREFIX, user_id=user_id)
def _url_for_user_active_threads(user_id):
return "{prefix}/users/{user_id}/active_threads".format(prefix=settings.PREFIX, user_id=user_id)
def _url_for_user_subscribed_threads(user_id):
return "{prefix}/users/{user_id}/subscribed_threads".format(prefix=settings.PREFIX, user_id=user_id)
| agpl-3.0 |
sidnarayanan/RelativisticML | scripts/compressorPCA.py | 1 | 6195 | #!/usr/bin/python
import cPickle as pickle
import numpy as np
import ROOTInterface.Import
import ROOTInterface.Export
from sys import argv
nEvents = -1
doMultiThread = False
def divide(a):
return a[0]/a[1]
def bin(a,b,m):
return min(int(a[0]/b),m)
def angleTruncate(a):
return min(6.28,max(0,a[0]))
print "starting!"
rng = np.random.RandomState()
compressedName = 'compressedPCAWindow'
listOfRawVars = ["xformed_logchi","xformed_QGTag","xformed_QjetVol","xformed_groomedIso","xformed_sjqgtag0","xformed_sjqgtag1","xformed_sjqgtag2","xformed_tau32"]
listOfComputedVars = [] # third property is short name
listOfCuts = []
nVars = len(listOfComputedVars) + len(listOfRawVars)
listOfRawVarsNames = []
for v in listOfRawVars:
listOfRawVarsNames.append(v)
for f,v,n in listOfComputedVars:
listOfRawVarsNames.append(n)
if len(argv)>1:
ptLow = float(argv[1])
ptHigh = float(argv[2])
etaHigh = float(argv[3])
jetAlgo = argv[4]
listOfCuts.append((lambda eta: np.abs(eta[0]) < etaHigh, ['eta']))
listOfCuts.append((lambda pt: pt[0] > ptLow, ['pt']))
listOfCuts.append((lambda pt: pt[0] < ptHigh, ['pt']))
if jetAlgo=='CA15':
listOfCuts.append((lambda m: np.abs(m[0]-172.5) < 30., ['massSoftDrop']))
else:
listOfCuts.append((lambda m: np.abs(m[0]-172.5) < 20., ['massSoftDrop']))
compressedName += "_%i_%i_%.1f"%(int(ptLow),int(ptHigh),etaHigh)
compressedName = compressedName.replace('.','p')
print '%f < pT < %f && |eta| < %f, %s'%(ptLow,ptHigh,etaHigh,jetAlgo)
# dataPath = '/home/sid/scratch/data/topTagging_SDTopmass150/'
dataPath = '/home/snarayan/cms/root/topTagging_%s/'%(jetAlgo)
# dataPath = '/home/sid/scratch/data/topTagging_%s/'%(jetAlgo)
# first tagging variables
sigImporter = ROOTInterface.Import.TreeImporter(dataPath+'signal.root','jets')
for v in listOfRawVars:
sigImporter.addVar(v)
for v in listOfComputedVars:
sigImporter.addComputedVar(v)
for c in listOfCuts:
sigImporter.addCut(c)
bgImporter = sigImporter.clone(dataPath+'qcd.root','jets')
sigImporter.addFriend('disc') # to get xformed variables
bgImporter.addFriend('disc')
print "finished setting up TreeImporters"
if doMultiThread:
sigX,sigY = sigImporter.loadTreeMultithreaded(1,nEvents)
else:
sigX,sigY = sigImporter.loadTree(1,nEvents)
nSig = sigY.shape[0]
print '\tloaded %i signal'%(nSig)
if doMultiThread:
bgX,bgY = bgImporter.loadTreeMultithreaded(0,nEvents)
else:
bgX,bgY = bgImporter.loadTree(0,nEvents)
nBg = bgY.shape[0]
print '\tloaded %i background'%(nBg)
dataX = np.vstack([sigX,bgX])
dataY = np.hstack([sigY,bgY])
print 'finished loading dataX and dataY: %i events'%(dataY.shape[0])
V = np.empty([nVars,nVars])
with open(dataPath+'/pca_%i_%i_%s.pkl'%(ptLow,ptHigh,jetAlgo),'rb') as pcaFile:
eigs = pickle.load(pcaFile)
for i in xrange(nVars):
a,v = eigs[i]
print a
V[:,i] = v
truncV = V[:,1:] # kill leading component
dataX = np.dot(dataX,truncV)
nVars -= 1
# longSuffix = ('_ptGT%.1fANDptLT%.1fANDabsetaLT%.1f'%(ptLow,ptHigh,etaHigh)).replace('.','p')
# alphas = np.empty(nVars)
# V = np.empty([nVars,nVars])
# with open(dataPath+'/pca.txt') as pcaFile:
# for line in pcaFile:
# if line.find(longSuffix) >= 0:
# print line
# ll = line.split()
# if ll[0]=='alpha':
# alphas[int(ll[1])] = float(ll[-1])
# else:
# for i in xrange(nVars):
# # print i,ll[3+i]
# V[i,int(ll[1])] = float(ll[3+i])
# truncV = V[:,1:] # kill leading component
# dataX = np.dot(dataX,truncV)
# nVars -= 1
mu = dataX.mean(0)
sigma = dataX.std(0)
for i in xrange(sigma.shape[0]):
# for constant rows, do not scale
if not sigma[i]:
sigma[i] = 1
mu[i] = 0
dataX = (dataX - mu)/sigma
print "sample mu:",mu
print "sample sigma:",sigma
# now kinematic variables - mass, pt, eta, weight(?)
# sigImporter.resetVars()
# bgImporter.resetVars()
# sigImporter.resetCounter()
# bgImporter.resetCounter()
# def massBin(a):
# return bin(a,20,250)
# sigImporter.addVar('massSoftDrop')
# sigImporter.addVar('pt')
# sigImporter.addVar('eta')
# bgImporter.addVar('massSoftDrop')
# bgImporter.addVar('pt')
# bgImporter.addVar('eta')
# for c in listOfCuts:
# sigImporter.addCut(c)
# bgImporter.addCut(c)
# if doMultiThread:
# sigKinematics = sigImporter.loadTreeMultithreaded(0,nEvents)[0]
# bgKinematics = bgImporter.loadTreeMultithreaded(0,nEvents)[0]
# kinematics = np.vstack([sigKinematics,bgKinematics])
# else:
# sigKinematics = sigImporter.loadTree(0,nEvents)[0]
# bgKinematics = bgImporter.loadTree(0,nEvents)[0]
# kinematics = np.vstack([sigKinematics,bgKinematics])
# # massBinned = np.array([massBin([m]) for m in kinematics[:,0]])
# # bgImporter.resetVars()
# # bgImporter.resetCounter()
# # bgImporter.addFriend('weights')
# # bgImporter.addVar('weight')
# # for c in listOfCuts:
# # bgImporter.addCut(c)
# # bgWeights = bgImporter.loadTree(0,nEvents)[0][:,0]
# # sigWeights = sigY
# # weights = np.hstack([sigWeights,bgWeights])
# # print sigWeights.shape,bgWeights.shape,weights.shape
# # massBinned = np.array([massBin([m]) for m in kinematics[:,0]])
# print 'finished loading %i kinematics'%(kinematics.shape[0])
# print kinematics[:10]
# sigImporter = ROOTInterface.Import.TreeImporter(dataPath+'signal_weights_CA15fj.root','weights')
# bgImporter = ROOTInterface.Import.TreeImporter(dataPath+'qcd_weights_CA15fj.root','weights')
# sigImporter.addVar('weight')
# bgImporter.addVar('weight')
# weight = np.vstack([sigImporter.loadTree(0,nEvents)[0]*nBg,
# bgImporter.loadTree(0,nEvents)[0]]*nSig)
with open(dataPath+compressedName+".pkl",'wb') as pklFile:
pickle.dump({'nSig':nSig, 'nBg':nBg,
'dataX':dataX,
'dataY':dataY,
# 'kinematics':kinematics, # for plotting
# 'weights':weights,
# 'massBinned':massBinned,
'mu':mu,
'sigma':sigma,
'vars':listOfRawVarsNames},pklFile,-1)
with open(dataPath+compressedName+"_small.pkl",'wb') as pklFile:
pickle.dump({'nSig':nSig, 'nBg':nBg,
'mu':mu,
'sigma':sigma,
'vars':listOfRawVarsNames},pklFile,-1)
print 'done!'
| mit |
MyRobotLab/pyrobotlab | home/hairygael/InMoov2.minimalTorso.py | 1 | 2921 | #file : InMoov2.minimalTorso.py
# this will run with versions of MRL 1.0.107
# a very minimal script for InMoov
# although this script is very short you can still
# do voice control of a right Arm
# for any command which you say - you will be required to say a confirmation
# e.g. you say -> test stomach, InMoov will ask -> "Did you say test stomach?", you will need to
# respond with a confirmation ("yes","correct","yeah","ya")
from java.lang import String
from org.myrobotlab.service import Runtime
import urllib2
import os
# To set a directory
# Modify this line according to your directory and version of MRL
os.chdir("C:/myrobotlab/myrobotlab.1.0.107/audioFile/google/en_gb/audrey")
# the name of the local file
# remove the file if it already exist in the Audiofile directory
soundfilename="starting mouth.mp3";
try:
mp3file = urllib2.urlopen('http://www.inmoov.fr/wp-content/uploads/2015/05/starting-mouth.mp3')
output = open(soundfilename,'wb')
output.write(mp3file.read())
output.close()
except IOError:
print "Check access right on the directory"
except Exception:
print "Can't get the sound File ! Check internet Connexion"
leftPort = "COM20" #modify port according to your board
i01 = Runtime.createAndStart("i01", "InMoov")
i01.startEar()
mouth = Runtime.createAndStart("mouth","Speech")
i01.startMouth()
##############
torso = i01.startTorso("COM20") #modify port according to your board
# tweaking default torso settings
torso.topStom.setMinMax(0,180)
torso.topStom.map(0,180,67,110)
torso.midStom.setMinMax(0,180)
torso.midStom.map(0,180,60,120)
#torso.lowStom.setMinMax(0,180)
#torso.lowStom.map(0,180,60,110)
#torso.topStom.setRest(90)
#torso.midStom.setRest(90)
#torso.lowStom.setRest(90)
#################
# verbal commands
ear = i01.ear
ear.addCommand("attach everything", "i01", "attach")
ear.addCommand("disconnect everything", "i01", "detach")
ear.addCommand("attach torso", "i01.torso", "attach")
ear.addCommand("disconnect torso", "i01.torso", "detach")
ear.addCommand("rest", "python", "rest")
ear.addCommand("capture gesture", ear.getName(), "captureGesture")
ear.addCommand("manual", ear.getName(), "lockOutAllGrammarExcept", "voice control")
ear.addCommand("voice control", ear.getName(), "clearLock")
ear.addCommand("test your stomach", "python", "teststomach")
ear.addComfirmations("yes","correct","ya","yeah", "yes please", "yes of course")
ear.addNegations("no","wrong","nope","nah","no thank you", "no thanks")
ear.startListening()
def teststomach():
i01.setTorsoSpeed(0.75,0.55,0.75)
i01.moveTorso(90,90,90)
sleep(2)
i01.moveTorso(45,90,90)
sleep(4)
i01.moveTorso(90,90,90)
sleep(2)
i01.moveTorso(135,90,90)
sleep(4)
i01.moveTorso(90,90,90)
sleep(2)
i01.moveTorso(90,45,90)
sleep(3)
i01.moveTorso(90,135,90)
sleep(3)
i01.moveTorso(90,90,45)
sleep(3)
i01.moveTorso(90,90,135)
sleep(3)
| apache-2.0 |
iismd17/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 226 | 1384 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
pedro2d10/SickRage-FR | lib/imdb/Person.py | 143 | 11509 | """
Person module (imdb package).
This module provides the Person class, used to store information about
a given person.
Copyright 2004-2010 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from copy import deepcopy
from imdb.utils import analyze_name, build_name, normalizeName, \
flatten, _Container, cmpPeople
class Person(_Container):
"""A Person.
Every information about a person can be accessed as:
personObject['information']
to get a list of the kind of information stored in a
Person object, use the keys() method; some useful aliases
are defined (as "biography" for the "mini biography" key);
see the keys_alias dictionary.
"""
# The default sets of information retrieved.
default_info = ('main', 'filmography', 'biography')
# Aliases for some not-so-intuitive keys.
keys_alias = {'biography': 'mini biography',
'bio': 'mini biography',
'aka': 'akas',
'also known as': 'akas',
'nick name': 'nick names',
'nicks': 'nick names',
'nickname': 'nick names',
'miscellaneouscrew': 'miscellaneous crew',
'crewmembers': 'miscellaneous crew',
'misc': 'miscellaneous crew',
'guest': 'notable tv guest appearances',
'guests': 'notable tv guest appearances',
'tv guest': 'notable tv guest appearances',
'guest appearances': 'notable tv guest appearances',
'spouses': 'spouse',
'salary': 'salary history',
'salaries': 'salary history',
'otherworks': 'other works',
"maltin's biography":
"biography from leonard maltin's movie encyclopedia",
"leonard maltin's biography":
"biography from leonard maltin's movie encyclopedia",
'real name': 'birth name',
'where are they now': 'where now',
'personal quotes': 'quotes',
'mini-biography author': 'imdb mini-biography by',
'biography author': 'imdb mini-biography by',
'genre': 'genres',
'portrayed': 'portrayed in',
'keys': 'keywords',
'trademarks': 'trade mark',
'trade mark': 'trade mark',
'trade marks': 'trade mark',
'trademark': 'trade mark',
'pictorials': 'pictorial',
'magazine covers': 'magazine cover photo',
'magazine-covers': 'magazine cover photo',
'tv series episodes': 'episodes',
'tv-series episodes': 'episodes',
'articles': 'article',
'keyword': 'keywords'}
# 'nick names'???
keys_tomodify_list = ('mini biography', 'spouse', 'quotes', 'other works',
'salary history', 'trivia', 'trade mark', 'news',
'books', 'biographical movies', 'portrayed in',
'where now', 'interviews', 'article',
"biography from leonard maltin's movie encyclopedia")
cmpFunct = cmpPeople
def _init(self, **kwds):
"""Initialize a Person object.
*personID* -- the unique identifier for the person.
*name* -- the name of the Person, if not in the data dictionary.
*myName* -- the nickname you use for this person.
*myID* -- your personal id for this person.
*data* -- a dictionary used to initialize the object.
*currentRole* -- a Character instance representing the current role
or duty of a person in this movie, or a Person
object representing the actor/actress who played
a given character in a Movie. If a string is
passed, an object is automatically build.
*roleID* -- if available, the characterID/personID of the currentRole
object.
*roleIsPerson* -- when False (default) the currentRole is assumed
to be a Character object, otherwise a Person.
*notes* -- notes about the given person for a specific movie
or role (e.g.: the alias used in the movie credits).
*accessSystem* -- a string representing the data access system used.
*titlesRefs* -- a dictionary with references to movies.
*namesRefs* -- a dictionary with references to persons.
*modFunct* -- function called returning text fields.
*billingPos* -- position of this person in the credits list.
"""
name = kwds.get('name')
if name and not self.data.has_key('name'):
self.set_name(name)
self.personID = kwds.get('personID', None)
self.myName = kwds.get('myName', u'')
self.billingPos = kwds.get('billingPos', None)
def _reset(self):
"""Reset the Person object."""
self.personID = None
self.myName = u''
self.billingPos = None
def _clear(self):
"""Reset the dictionary."""
self.billingPos = None
def set_name(self, name):
"""Set the name of the person."""
# XXX: convert name to unicode, if it's a plain string?
d = analyze_name(name, canonical=1)
self.data.update(d)
def _additional_keys(self):
"""Valid keys to append to the data.keys() list."""
addkeys = []
if self.data.has_key('name'):
addkeys += ['canonical name', 'long imdb name',
'long imdb canonical name']
if self.data.has_key('headshot'):
addkeys += ['full-size headshot']
return addkeys
def _getitem(self, key):
"""Handle special keys."""
if self.data.has_key('name'):
if key == 'name':
return normalizeName(self.data['name'])
elif key == 'canonical name':
return self.data['name']
elif key == 'long imdb name':
return build_name(self.data, canonical=0)
elif key == 'long imdb canonical name':
return build_name(self.data)
if key == 'full-size headshot' and self.data.has_key('headshot'):
return self._re_fullsizeURL.sub('', self.data.get('headshot', ''))
return None
def getID(self):
"""Return the personID."""
return self.personID
def __nonzero__(self):
"""The Person is "false" if the self.data does not contain a name."""
# XXX: check the name and the personID?
if self.data.has_key('name'): return 1
return 0
def __contains__(self, item):
"""Return true if this Person has worked in the given Movie,
or if the fiven Character was played by this Person."""
from Movie import Movie
from Character import Character
if isinstance(item, Movie):
for m in flatten(self.data, yieldDictKeys=1, scalar=Movie):
if item.isSame(m):
return 1
elif isinstance(item, Character):
for m in flatten(self.data, yieldDictKeys=1, scalar=Movie):
if item.isSame(m.currentRole):
return 1
return 0
def isSameName(self, other):
"""Return true if two persons have the same name and imdbIndex
and/or personID.
"""
if not isinstance(other, self.__class__):
return 0
if self.data.has_key('name') and \
other.data.has_key('name') and \
build_name(self.data, canonical=1) == \
build_name(other.data, canonical=1):
return 1
if self.accessSystem == other.accessSystem and \
self.personID and self.personID == other.personID:
return 1
return 0
isSamePerson = isSameName # XXX: just for backward compatiblity.
def __deepcopy__(self, memo):
"""Return a deep copy of a Person instance."""
p = Person(name=u'', personID=self.personID, myName=self.myName,
myID=self.myID, data=deepcopy(self.data, memo),
currentRole=deepcopy(self.currentRole, memo),
roleIsPerson=self._roleIsPerson,
notes=self.notes, accessSystem=self.accessSystem,
titlesRefs=deepcopy(self.titlesRefs, memo),
namesRefs=deepcopy(self.namesRefs, memo),
charactersRefs=deepcopy(self.charactersRefs, memo))
p.current_info = list(self.current_info)
p.set_mod_funct(self.modFunct)
p.billingPos = self.billingPos
return p
def __repr__(self):
"""String representation of a Person object."""
# XXX: add also currentRole and notes, if present?
r = '<Person id:%s[%s] name:_%s_>' % (self.personID, self.accessSystem,
self.get('long imdb canonical name'))
if isinstance(r, unicode): r = r.encode('utf_8', 'replace')
return r
def __str__(self):
"""Simply print the short name."""
return self.get('name', u'').encode('utf_8', 'replace')
def __unicode__(self):
"""Simply print the short title."""
return self.get('name', u'')
def summary(self):
"""Return a string with a pretty-printed summary for the person."""
if not self: return u''
s = u'Person\n=====\nName: %s\n' % \
self.get('long imdb canonical name', u'')
bdate = self.get('birth date')
if bdate:
s += u'Birth date: %s' % bdate
bnotes = self.get('birth notes')
if bnotes:
s += u' (%s)' % bnotes
s += u'.\n'
ddate = self.get('death date')
if ddate:
s += u'Death date: %s' % ddate
dnotes = self.get('death notes')
if dnotes:
s += u' (%s)' % dnotes
s += u'.\n'
bio = self.get('mini biography')
if bio:
s += u'Biography: %s\n' % bio[0]
director = self.get('director')
if director:
d_list = [x.get('long imdb canonical title', u'')
for x in director[:3]]
s += u'Last movies directed: %s.\n' % u'; '.join(d_list)
act = self.get('actor') or self.get('actress')
if act:
a_list = [x.get('long imdb canonical title', u'')
for x in act[:5]]
s += u'Last movies acted: %s.\n' % u'; '.join(a_list)
return s
| gpl-3.0 |
catchmrbharath/servo | tests/wpt/css-tests/tools/html5lib/html5lib/treewalkers/etree.py | 658 | 4613 | from __future__ import absolute_import, division, unicode_literals
try:
from collections import OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict
except ImportError:
OrderedDict = dict
import gettext
_ = gettext.gettext
import re
from six import text_type
from . import _base
from ..utils import moduleFactoryFactory
tag_regexp = re.compile("{([^}]*)}(.*)")
def getETreeBuilder(ElementTreeImplementation):
ElementTree = ElementTreeImplementation
ElementTreeCommentType = ElementTree.Comment("asd").tag
class TreeWalker(_base.NonRecursiveTreeWalker):
"""Given the particular ElementTree representation, this implementation,
to avoid using recursion, returns "nodes" as tuples with the following
content:
1. The current element
2. The index of the element relative to its parent
3. A stack of ancestor elements
4. A flag "text", "tail" or None to indicate if the current node is a
text node; either the text or tail of the current element (1)
"""
def getNodeDetails(self, node):
if isinstance(node, tuple): # It might be the root Element
elt, key, parents, flag = node
if flag in ("text", "tail"):
return _base.TEXT, getattr(elt, flag)
else:
node = elt
if not(hasattr(node, "tag")):
node = node.getroot()
if node.tag in ("DOCUMENT_ROOT", "DOCUMENT_FRAGMENT"):
return (_base.DOCUMENT,)
elif node.tag == "<!DOCTYPE>":
return (_base.DOCTYPE, node.text,
node.get("publicId"), node.get("systemId"))
elif node.tag == ElementTreeCommentType:
return _base.COMMENT, node.text
else:
assert type(node.tag) == text_type, type(node.tag)
# This is assumed to be an ordinary element
match = tag_regexp.match(node.tag)
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = node.tag
attrs = OrderedDict()
for name, value in list(node.attrib.items()):
match = tag_regexp.match(name)
if match:
attrs[(match.group(1), match.group(2))] = value
else:
attrs[(None, name)] = value
return (_base.ELEMENT, namespace, tag,
attrs, len(node) or node.text)
def getFirstChild(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
element, key, parents, flag = node, None, [], None
if flag in ("text", "tail"):
return None
else:
if element.text:
return element, key, parents, "text"
elif len(element):
parents.append(element)
return element[0], 0, parents, None
else:
return None
def getNextSibling(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
return None
if flag == "text":
if len(element):
parents.append(element)
return element[0], 0, parents, None
else:
return None
else:
if element.tail and flag != "tail":
return element, key, parents, "tail"
elif key < len(parents[-1]) - 1:
return parents[-1][key + 1], key + 1, parents, None
else:
return None
def getParentNode(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
return None
if flag == "text":
if not parents:
return element
else:
return element, key, parents, None
else:
parent = parents.pop()
if not parents:
return parent
else:
return parent, list(parents[-1]).index(parent), parents, None
return locals()
getETreeModule = moduleFactoryFactory(getETreeBuilder)
| mpl-2.0 |
flavour/RedHat | modules/tests/volunteer/create_volunteer_training.py | 25 | 2812 | """ Sahana Eden Automated Test - HRM004 Create Volunteer Training
@copyright: 2011-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from tests.web2unittest import SeleniumUnitTest
class CreateVolunteerTraining(SeleniumUnitTest):
def test_hrm003_create_volunteer_training(self):
"""
@case: HRM004
@description: Create a Volunteer Training
* Create Course
* Create Training Event
@TestDoc: https://docs.google.com/spreadsheet/ccc?key=0AmB3hMcgB-3idG1XNGhhRG9QWF81dUlKLXpJaFlCMFE
@Test Wiki: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Testing
@ToDo:
* Add Volunteer Participants
"""
browser = self.browser
self.login(account="admin", nexturl="vol/course/create")
self.create("hrm_course",
[( "code",
"32329408",),
( "name",
"Emergency First Aid"),
]
)
self.login(account="admin", nexturl="vol/training_event/create")
self.create("hrm_training_event",
[( "course_id",
"Emergency First Aid"),
( "site_id",
"AP Zone (Office)"),
( "start_date",
"2012-04-11 00:00:00"),
( "end_date",
"2012-04-12 00:00:00"),
( "hours",
"12"),
( "comments",
"Testing comments"),
]
) | mit |
nwiizo/workspace_2017 | ansible-modules-extras/network/a10/a10_server.py | 23 | 11362 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to manage A10 Networks slb server objects
(c) 2014, Mischa Peters <mpeters@a10networks.com>,
2016, Eric Chou <ericc@a10networks.com>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: a10_server
version_added: 1.8
short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices' server object.
description:
- Manage SLB (Server Load Balancer) server objects on A10 Networks devices via aXAPIv2.
author: "Eric Chou (@ericchou) 2016, Mischa Peters (@mischapeters) 2014"
notes:
- Requires A10 Networks aXAPI 2.1.
extends_documentation_fragment: a10
options:
partition:
version_added: "2.3"
description:
- set active-partition
required: false
default: null
server_name:
description:
- The SLB (Server Load Balancer) server name.
required: true
aliases: ['server']
server_ip:
description:
- The SLB server IPv4 address.
required: false
default: null
aliases: ['ip', 'address']
server_status:
description:
- The SLB virtual server status.
required: false
default: enabled
aliases: ['status']
choices: ['enabled', 'disabled']
server_ports:
description:
- A list of ports to create for the server. Each list item should be a
dictionary which specifies the C(port:) and C(protocol:), but can also optionally
specify the C(status:). See the examples below for details. This parameter is
required when C(state) is C(present).
required: false
default: null
state:
description:
- This is to specify the operation to create, update or remove SLB server.
required: false
default: present
choices: ['present', 'absent']
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled devices using self-signed certificates.
required: false
version_added: 2.3
default: 'yes'
choices: ['yes', 'no']
'''
RETURN = '''
#
'''
EXAMPLES = '''
# Create a new server
- a10_server:
host: a10.mydomain.com
username: myadmin
password: mypassword
partition: mypartition
server: test
server_ip: 1.1.1.100
server_ports:
- port_num: 8080
protocol: tcp
- port_num: 8443
protocol: TCP
'''
RETURN = '''
content:
description: the full info regarding the slb_server
returned: success
type: string
sample: "mynewserver"
'''
VALID_PORT_FIELDS = ['port_num', 'protocol', 'status']
def validate_ports(module, ports):
for item in ports:
for key in item:
if key not in VALID_PORT_FIELDS:
module.fail_json(msg="invalid port field (%s), must be one of: %s" % (key, ','.join(VALID_PORT_FIELDS)))
# validate the port number is present and an integer
if 'port_num' in item:
try:
item['port_num'] = int(item['port_num'])
except:
module.fail_json(msg="port_num entries in the port definitions must be integers")
else:
module.fail_json(msg="port definitions must define the port_num field")
# validate the port protocol is present, and convert it to
# the internal API integer value (and validate it)
if 'protocol' in item:
protocol = axapi_get_port_protocol(item['protocol'])
if not protocol:
module.fail_json(msg="invalid port protocol, must be one of: %s" % ','.join(AXAPI_PORT_PROTOCOLS))
else:
item['protocol'] = protocol
else:
module.fail_json(msg="port definitions must define the port protocol (%s)" % ','.join(AXAPI_PORT_PROTOCOLS))
# convert the status to the internal API integer value
if 'status' in item:
item['status'] = axapi_enabled_disabled(item['status'])
else:
item['status'] = 1
def main():
argument_spec = a10_argument_spec()
argument_spec.update(url_argument_spec())
argument_spec.update(
dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
server_name=dict(type='str', aliases=['server'], required=True),
server_ip=dict(type='str', aliases=['ip', 'address']),
server_status=dict(type='str', default='enabled', aliases=['status'], choices=['enabled', 'disabled']),
server_ports=dict(type='list', aliases=['port'], default=[]),
partition=dict(type='str', default=[]),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False
)
host = module.params['host']
partition = module.params['partition']
username = module.params['username']
password = module.params['password']
state = module.params['state']
write_config = module.params['write_config']
slb_server = module.params['server_name']
slb_server_ip = module.params['server_ip']
slb_server_status = module.params['server_status']
slb_server_ports = module.params['server_ports']
if slb_server is None:
module.fail_json(msg='server_name is required')
axapi_base_url = 'https://%s/services/rest/V2.1/?format=json' % host
session_url = axapi_authenticate(module, axapi_base_url, username, password)
# validate the ports data structure
validate_ports(module, slb_server_ports)
json_post = {
'server': {
'name': slb_server,
}
}
# add optional module parameters
if slb_server_ip:
json_post['server']['host'] = slb_server_ip
if slb_server_ports:
json_post['server']['port_list'] = slb_server_ports
if slb_server_status:
json_post['server']['status'] = axapi_enabled_disabled(slb_server_status)
slb_server_partition = axapi_call(module, session_url + '&method=system.partition.active', json.dumps({'name': partition}))
slb_server_data = axapi_call(module, session_url + '&method=slb.server.search', json.dumps({'name': slb_server}))
slb_server_exists = not axapi_failure(slb_server_data)
changed = False
if state == 'present':
if not slb_server_exists:
if not slb_server_ip:
module.fail_json(msg='you must specify an IP address when creating a server')
result = axapi_call(module, session_url + '&method=slb.server.create', json.dumps(json_post))
if axapi_failure(result):
module.fail_json(msg="failed to create the server: %s" % result['response']['err']['msg'])
changed = True
else:
def port_needs_update(src_ports, dst_ports):
'''
Checks to determine if the port definitions of the src_ports
array are in or different from those in dst_ports. If there is
a difference, this function returns true, otherwise false.
'''
for src_port in src_ports:
found = False
different = False
for dst_port in dst_ports:
if src_port['port_num'] == dst_port['port_num']:
found = True
for valid_field in VALID_PORT_FIELDS:
if src_port[valid_field] != dst_port[valid_field]:
different = True
break
if found or different:
break
if not found or different:
return True
# every port from the src exists in the dst, and none of them were different
return False
def status_needs_update(current_status, new_status):
'''
Check to determine if we want to change the status of a server.
If there is a difference between the current status of the server and
the desired status, return true, otherwise false.
'''
if current_status != new_status:
return True
return False
defined_ports = slb_server_data.get('server', {}).get('port_list', [])
current_status = slb_server_data.get('server', {}).get('status')
# we check for a needed update several ways
# - in case ports are missing from the ones specified by the user
# - in case ports are missing from those on the device
# - in case we are change the status of a server
if port_needs_update(defined_ports, slb_server_ports) or port_needs_update(slb_server_ports, defined_ports) or status_needs_update(current_status, axapi_enabled_disabled(slb_server_status)):
result = axapi_call(module, session_url + '&method=slb.server.update', json.dumps(json_post))
if axapi_failure(result):
module.fail_json(msg="failed to update the server: %s" % result['response']['err']['msg'])
changed = True
# if we changed things, get the full info regarding
# the service group for the return data below
if changed:
result = axapi_call(module, session_url + '&method=slb.server.search', json.dumps({'name': slb_server}))
else:
result = slb_server_data
elif state == 'absent':
if slb_server_exists:
result = axapi_call(module, session_url + '&method=slb.server.delete', json.dumps({'name': slb_server}))
changed = True
else:
result = dict(msg="the server was not present")
# if the config has changed, save the config unless otherwise requested
if changed and write_config:
write_result = axapi_call(module, session_url + '&method=system.action.write_memory')
if axapi_failure(write_result):
module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg'])
# log out of the session nicely and exit
axapi_call(module, session_url + '&method=session.close')
module.exit_json(changed=changed, content=result)
# ansible module imports
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import url_argument_spec
from ansible.module_utils.a10 import axapi_call, a10_argument_spec, axapi_authenticate, axapi_failure, axapi_get_port_protocol, axapi_enabled_disabled
if __name__ == '__main__':
main()
| mit |
iKrishneel/py-vgdl | examples/gridphysics/boulderdash.py | 3 | 1826 | '''
VGDL example: Boulder Dash.
@author: Julian Togelius and Tom Schaul
'''
boulderdash_level = """
wwwwwwwwwwwwwwwwwwwwwwwwww
w...o.xx.o......o..xoxx..w
w...oooooo........o..o...w
w....xxx.........o.oxoo.ow
wx...............oxo...oow
wwwwwwwwww........o...wxxw
wb ...co..............wxxw
w ........Ao....o....wxxw
wooo............. ....w..w
w......x....wwwwx x.oow..w
wc .....x..ooxxo ....w..w
w ..E..........b ..w
wwwwwwwwwwwwwwwwwwwwwwwwww
"""
boulderdash_game = """
BasicGame
SpriteSet
sword > Flicker color=LIGHTGRAY limit=1 singleton=True
dirt > Immovable color=BROWN
exitdoor > Immovable color=GREEN
diamond > Resource color=YELLOW limit=10 shrinkfactor=0.25
boulder > Missile orientation=DOWN color=GRAY speed=0.2
moving >
avatar > ShootAvatar stype=sword
enemy > RandomNPC
crab > color=RED
butterfly > color=PINK
LevelMapping
. > dirt
E > exitdoor
o > boulder
x > diamond
c > crab
b > butterfly
InteractionSet
dirt avatar > killSprite
dirt sword > killSprite
diamond avatar > collectResource scoreChange=5
diamond avatar > killSprite
moving wall > stepBack
moving boulder > stepBack
avatar boulder > killIfFromAbove
avatar butterfly > killSprite
avatar crab > killSprite
boulder dirt > stepBack
boulder wall > stepBack
boulder boulder > stepBack
boulder diamond > stepBack
enemy dirt > stepBack
enemy diamond > stepBack
crab butterfly > killSprite
butterfly crab > transformTo stype=diamond scoreChange=1
exitdoor avatar > killIfOtherHasMore resource=diamond limit=9 scoreChange=100
TerminationSet
SpriteCounter stype=avatar limit=0 win=False
SpriteCounter stype=exitdoor limit=0 win=True
"""
if __name__ == "__main__":
from vgdl.core import VGDLParser
VGDLParser.playGame(boulderdash_game, boulderdash_level) | bsd-3-clause |
GandaG/unitypackage-ci | .deploy/travis/main_parser.py | 2 | 4523 | #!/usr/bin/env python
from misc_parser import *
from gh_parser import *
from asset_parser import *
from docs_parser import *
from deploy_setup import *
import copy, os
parse_misc()
unity_vers = parse_unity_version()
unity_vers_url = get_available_unity_vers()[unity_vers]
rebuild_yml = {
"language": ["objective-c"],
"install": ["sh ./.deploy/travis/unity_install.sh %s %s" % (unity_vers, unity_vers_url)],
"script": ["sh ./.deploy/travis/unity_build.sh"],
"before_deploy": ["sh ./.deploy/travis/pre_deploy.sh"],
"deploy": [],
"env": {
"global": [
"verbose=%s" % os.environ["verbose"],
"TRAVIS_TAG=%s" % os.environ["TRAVIS_TAG"]
]
}
}
try:
os.environ["GH_TOKEN"]
except KeyError:
gh_token_present = False
else:
gh_token_present = True
if (os.environ["TRAVIS_PULL_REQUEST"] == "false" and
os.environ["TRAVIS_TAG"].strip() and
gh_token_present):
deploy_yml = copy.deepcopy(rebuild_yml)
ini_docs = parse_docs()
if ini_docs:
#deploy_yml["deploy"].append(ini_docs) # waiting for travis to fix their custom script provider
deploy_yml["after_success"] = ini_docs
print '------------------------------------------------------------------------------------------------------------------------'
print "Deployment to Github Pages accepted. -----------------------------------------------------------------------------------"
print '------------------------------------------------------------------------------------------------------------------------'
deploy_yml["env"]["global"].extend(parse_docs_options())
ini_gh = parse_gh()
if ini_gh:
deploy_yml["deploy"].append(ini_gh)
print '------------------------------------------------------------------------------------------------------------------------'
print "Deployment to Github Releases accepted. --------------------------------------------------------------------------------"
print '------------------------------------------------------------------------------------------------------------------------'
deploy_yml["env"]["global"].extend(parse_gh_options())
ini_asset = parse_asset()
if ini_asset:
deploy_yml["deploy"].append(ini_asset)
print '------------------------------------------------------------------------------------------------------------------------'
print "Deployment to Unity's Asset Store accepted. ----------------------------------------------------------------------------"
print '------------------------------------------------------------------------------------------------------------------------'
if rebuild_yml == deploy_yml:
print '------------------------------------------------------------------------------------------------------------------------'
print "Skipping deployment. ---------------------------------------------------------------------------------------------------"
print '------------------------------------------------------------------------------------------------------------------------'
else:
deploy_setup(os.environ["GH_TOKEN"], deploy_yml)
else:
print '------------------------------------------------------------------------------------------------------------------------'
print "Skipping deployment. ---------------------------------------------------------------------------------------------------"
print '------------------------------------------------------------------------------------------------------------------------'
#you only get here if there is no deployment since deploy_setup calls exit on success.
if os.environ["always_run"] == "True": #move on to the build steps. This needs to be invoked like this to be able to pass the env vars created here.
if (os.system("sh ./.deploy/travis/unity_install.sh %s %s" % (unity_vers, unity_vers_url)) == 0 and
os.system("sh ./.deploy/travis/unity_build.sh") == 0):
exit(0)
else:
exit(1)
else:
print '------------------------------------------------------------------------------------------------------------------------'
print "Skipping build steps. ---------------------------------------------------------------------------------------------------"
print '------------------------------------------------------------------------------------------------------------------------'
| mit |
Immortalin/python-for-android | python3-alpha/python3-src/Lib/unittest/signals.py | 162 | 1658 | import signal
import weakref
from functools import wraps
__unittest = True
class _InterruptHandler(object):
def __init__(self, default_handler):
self.called = False
self.default_handler = default_handler
def __call__(self, signum, frame):
installed_handler = signal.getsignal(signal.SIGINT)
if installed_handler is not self:
# if we aren't the installed handler, then delegate immediately
# to the default handler
self.default_handler(signum, frame)
if self.called:
self.default_handler(signum, frame)
self.called = True
for result in _results.keys():
result.stop()
_results = weakref.WeakKeyDictionary()
def registerResult(result):
_results[result] = 1
def removeResult(result):
return bool(_results.pop(result, None))
_interrupt_handler = None
def installHandler():
global _interrupt_handler
if _interrupt_handler is None:
default_handler = signal.getsignal(signal.SIGINT)
_interrupt_handler = _InterruptHandler(default_handler)
signal.signal(signal.SIGINT, _interrupt_handler)
def removeHandler(method=None):
if method is not None:
@wraps(method)
def inner(*args, **kwargs):
initial = signal.getsignal(signal.SIGINT)
removeHandler()
try:
return method(*args, **kwargs)
finally:
signal.signal(signal.SIGINT, initial)
return inner
global _interrupt_handler
if _interrupt_handler is not None:
signal.signal(signal.SIGINT, _interrupt_handler.default_handler)
| apache-2.0 |
peter-ch/MultiNEAT | examples/gym/lunar_lander.py | 1 | 4499 |
import gym
import time
import MultiNEAT as NEAT
import MultiNEAT.viz as viz
import random as rnd
import pickle
import numpy as np
import cv2
from tqdm import tqdm
rng = NEAT.RNG()
rng.TimeSeed()
params = NEAT.Parameters()
params.PopulationSize = 150
params.DynamicCompatibility = True
params.WeightDiffCoeff = 1.0
params.CompatTreshold = 2.0
params.YoungAgeTreshold = 15
params.SpeciesMaxStagnation = 15
params.OldAgeTreshold = 35
params.MinSpecies = 2
params.MaxSpecies = 4
params.RouletteWheelSelection = False
params.Elitism = True
params.RecurrentProb = 0.15
params.OverallMutationRate = 0.2
params.MutateWeightsProb = 0.8
params.MutateNeuronTimeConstantsProb = 0.1
params.MutateNeuronBiasesProb = 0.1
params.WeightMutationMaxPower = 0.5
params.WeightReplacementMaxPower = 1.0
params.MutateWeightsSevereProb = 0.5
params.WeightMutationRate = 0.25
params.TimeConstantMutationMaxPower = 0.1
params.BiasMutationMaxPower = params.WeightMutationMaxPower
params.MaxWeight = 8
params.MutateAddNeuronProb = 0.1
params.MutateAddLinkProb = 0.2
params.MutateRemLinkProb = 0.0
params.MinActivationA = 1.0
params.MaxActivationA = 6.0
params.MinNeuronTimeConstant = 0.04
params.MaxNeuronTimeConstant = 0.24
params.MinNeuronBias = -params.MaxWeight
params.MaxNeuronBias = params.MaxWeight
params.ActivationFunction_SignedSigmoid_Prob = 0.0
params.ActivationFunction_UnsignedSigmoid_Prob = 0.0
params.ActivationFunction_Tanh_Prob = 1.0
params.ActivationFunction_SignedStep_Prob = 0.0
params.ActivationFunction_Linear_Prob = 0.0
params.CrossoverRate = 0.75 # mutate only 0.25
params.MultipointCrossoverRate = 0.4
params.SurvivalRate = 0.2
params.MutateNeuronTraitsProb = 0
params.MutateLinkTraitsProb = 0
trials = 15
render_during_training = 0
g = NEAT.Genome(0, 8 +1, 0, 4, False,
NEAT.ActivationFunction.TANH, NEAT.ActivationFunction.TANH, 0, params, 0, 1)
pop = NEAT.Population(g, params, True, 1.0, rnd.randint(0, 1000))
hof = []
maxf_ever = 0
env = gym.make('LunarLander-v2')
def interact_with_nn():
global out
inp = observation.tolist()
net.Input(inp + [1.0])
#print(inp)
net.ActivateLeaky(0.1)
out = list(net.Output())
#print(np.argmax(list(out)))
#out[0] *= 10.0
#if out[0] < 0.0: out[0] = -2.0
#if out[0] > 0.0: out[0] = 2.0
return inp
def do_trial():
global observation, reward, t, img, action, done, info, avg_reward
observation = env.reset()
net.Flush()
f = 0
for t in range(300):
if render_during_training:
time.sleep(0.01)
env.render()
# interact with NN
inp = interact_with_nn()
if render_during_training:
img = viz.Draw(net)
cv2.imshow("current best", img)
cv2.waitKey(1)
action = np.argmax(out)
observation, reward, done, info = env.step(action)
if done: break
f += reward
avg_reward += f
return avg_reward
try:
for generation in range(20):
for i_episode, genome in tqdm(enumerate(NEAT.GetGenomeList(pop))):
net = NEAT.NeuralNetwork()
genome.BuildPhenotype(net)
avg_reward = 0
for trial in range(trials):
avg_reward += do_trial()
avg_reward /= trials
#print(avg_reward)
genome.SetFitness(1000000 + avg_reward)
maxf = max([x.GetFitness() for x in NEAT.GetGenomeList(pop)])
print('Generation: {}, max fitness: {}'.format(generation, maxf))
if maxf > maxf_ever:
hof.append(pickle.dumps(pop.GetBestGenome()))
maxf_ever = maxf
pop.Epoch()
except KeyboardInterrupt:
pass
print('Replaying forever..')
if hof:
while True:
try:
observation = env.reset()
net = NEAT.NeuralNetwork()
g = pickle.loads(hof[-1])
g.BuildPhenotype(net)
reward = 0
for t in range(250):
time.sleep(0.01)
env.render()
# interact with NN
interact_with_nn()
# render NN
img = viz.Draw(net)
cv2.imshow("current best", img)
cv2.waitKey(1)
action = np.argmax(out)
observation, reward, done, info = env.step(action)
if done:
break
except Exception as ex:
print(ex)
time.sleep(0.2)
| lgpl-3.0 |
angr/angr | angr/engines/pcode/arch/ArchPcode_MIPS_BE_32_micro.py | 1 | 15047 | ###
### This file was automatically generated
###
from archinfo.arch import register_arch, Endness, Register
from .common import ArchPcode
class ArchPcode_MIPS_BE_32_micro(ArchPcode):
name = 'MIPS:BE:32:micro'
pcode_arch = 'MIPS:BE:32:micro'
description = 'MIPS32 32-bit addresses, big endian, with microMIPS'
bits = 32
ip_offset = 0x80
sp_offset = 0x74
bp_offset = sp_offset
instruction_endness = Endness.BE
register_list = [
Register('zero', 4, 0x0),
Register('at', 4, 0x4),
Register('v0', 4, 0x8),
Register('v1', 4, 0xc),
Register('a0', 4, 0x10),
Register('a1', 4, 0x14),
Register('a2', 4, 0x18),
Register('a3', 4, 0x1c),
Register('t0', 4, 0x20),
Register('t1', 4, 0x24),
Register('t2', 4, 0x28),
Register('t3', 4, 0x2c),
Register('t4', 4, 0x30),
Register('t5', 4, 0x34),
Register('t6', 4, 0x38),
Register('t7', 4, 0x3c),
Register('s0', 4, 0x40),
Register('s1', 4, 0x44),
Register('s2', 4, 0x48),
Register('s3', 4, 0x4c),
Register('s4', 4, 0x50),
Register('s5', 4, 0x54),
Register('s6', 4, 0x58),
Register('s7', 4, 0x5c),
Register('t8', 4, 0x60),
Register('t9', 4, 0x64),
Register('k0', 4, 0x68),
Register('k1', 4, 0x6c),
Register('gp', 4, 0x70),
Register('sp', 4, 0x74),
Register('s8', 4, 0x78),
Register('ra', 4, 0x7c),
Register('pc', 4, 0x80, alias_names=('ip',)),
Register('f0_1', 8, 0x1000),
Register('f1', 4, 0x1000),
Register('f0', 4, 0x1004),
Register('f2_3', 8, 0x1008),
Register('f3', 4, 0x1008),
Register('f2', 4, 0x100c),
Register('f4_5', 8, 0x1010),
Register('f5', 4, 0x1010),
Register('f4', 4, 0x1014),
Register('f6_7', 8, 0x1018),
Register('f7', 4, 0x1018),
Register('f6', 4, 0x101c),
Register('f8_9', 8, 0x1020),
Register('f9', 4, 0x1020),
Register('f8', 4, 0x1024),
Register('f10_11', 8, 0x1028),
Register('f11', 4, 0x1028),
Register('f10', 4, 0x102c),
Register('f12_13', 8, 0x1030),
Register('f13', 4, 0x1030),
Register('f12', 4, 0x1034),
Register('f14_15', 8, 0x1038),
Register('f15', 4, 0x1038),
Register('f14', 4, 0x103c),
Register('f16_17', 8, 0x1040),
Register('f17', 4, 0x1040),
Register('f16', 4, 0x1044),
Register('f18_19', 8, 0x1048),
Register('f19', 4, 0x1048),
Register('f18', 4, 0x104c),
Register('f20_21', 8, 0x1050),
Register('f21', 4, 0x1050),
Register('f20', 4, 0x1054),
Register('f22_23', 8, 0x1058),
Register('f23', 4, 0x1058),
Register('f22', 4, 0x105c),
Register('f24_25', 8, 0x1060),
Register('f25', 4, 0x1060),
Register('f24', 4, 0x1064),
Register('f26_27', 8, 0x1068),
Register('f27', 4, 0x1068),
Register('f26', 4, 0x106c),
Register('f28_29', 8, 0x1070),
Register('f29', 4, 0x1070),
Register('f28', 4, 0x1074),
Register('f30_31', 8, 0x1078),
Register('f31', 4, 0x1078),
Register('f30', 4, 0x107c),
Register('fir', 4, 0x1200),
Register('fccr', 4, 0x1204),
Register('fexr', 4, 0x1208),
Register('fenr', 4, 0x120c),
Register('fcsr', 4, 0x1210),
Register('index', 4, 0x2000),
Register('random', 4, 0x2004),
Register('entrylo0', 4, 0x2008),
Register('entrylo1', 4, 0x200c),
Register('context', 4, 0x2010),
Register('pagemask', 4, 0x2014),
Register('wired', 4, 0x2018),
Register('hwrena', 4, 0x201c),
Register('badvaddr', 4, 0x2020),
Register('count', 4, 0x2024),
Register('entryhi', 4, 0x2028),
Register('compare', 4, 0x202c),
Register('status', 4, 0x2030),
Register('cause', 4, 0x2034),
Register('epc', 4, 0x2038),
Register('prid', 4, 0x203c),
Register('config', 4, 0x2040),
Register('lladdr', 4, 0x2044),
Register('watchlo', 4, 0x2048),
Register('watchhi', 4, 0x204c),
Register('xcontext', 4, 0x2050),
Register('cop0_reg21', 4, 0x2054),
Register('cop0_reg22', 4, 0x2058),
Register('debug', 4, 0x205c),
Register('depc', 4, 0x2060),
Register('perfcnt', 4, 0x2064),
Register('errctl', 4, 0x2068),
Register('cacheerr', 4, 0x206c),
Register('taglo', 4, 0x2070),
Register('taghi', 4, 0x2074),
Register('errorepc', 4, 0x2078),
Register('desave', 4, 0x207c),
Register('mvpcontrol', 4, 0x2100),
Register('vpecontrol', 4, 0x2104),
Register('tcstatus', 4, 0x2108),
Register('cop0_reg3.1', 4, 0x210c),
Register('contextconfig', 4, 0x2110),
Register('pagegrain', 4, 0x2114),
Register('srsconf0', 4, 0x2118),
Register('cop0_reg7.1', 4, 0x211c),
Register('cop0_reg8.1', 4, 0x2120),
Register('cop0_reg9.1', 4, 0x2124),
Register('cop0_reg10.1', 4, 0x2128),
Register('cop0_reg11.1', 4, 0x212c),
Register('intctl', 4, 0x2130),
Register('cop0_reg13.1', 4, 0x2134),
Register('cop0_reg14.1', 4, 0x2138),
Register('ebase', 4, 0x213c),
Register('config1', 4, 0x2140),
Register('cop0_reg17.1', 4, 0x2144),
Register('watchlo.1', 4, 0x2148),
Register('watchhi.1', 4, 0x214c),
Register('cop0_reg20.1', 4, 0x2150),
Register('cop0_reg21.1', 4, 0x2154),
Register('cop0_reg22.1', 4, 0x2158),
Register('tracecontrol', 4, 0x215c),
Register('cop0_reg24.1', 4, 0x2160),
Register('perfcnt.1', 4, 0x2164),
Register('cop0_reg26.1', 4, 0x2168),
Register('cacheerr.1', 4, 0x216c),
Register('datalo.1', 4, 0x2170),
Register('datahi.1', 4, 0x2174),
Register('cop0_reg30.1', 4, 0x2178),
Register('cop0_reg31.1', 4, 0x217c),
Register('mvpconf0', 4, 0x2200),
Register('vpeconf0', 4, 0x2204),
Register('tcbind', 4, 0x2208),
Register('cop0_reg3.2', 4, 0x220c),
Register('cop0_reg4.2', 4, 0x2210),
Register('cop0_reg5.2', 4, 0x2214),
Register('srsconf1', 4, 0x2218),
Register('cop0_reg7.2', 4, 0x221c),
Register('cop0_reg8.2', 4, 0x2220),
Register('cop0_reg9.2', 4, 0x2224),
Register('cop0_reg10.2', 4, 0x2228),
Register('cop0_reg11.2', 4, 0x222c),
Register('srsctl', 4, 0x2230),
Register('cop0_reg13.2', 4, 0x2234),
Register('cop0_reg14.2', 4, 0x2238),
Register('cop0_reg15.2', 4, 0x223c),
Register('config2', 4, 0x2240),
Register('cop0_reg17.2', 4, 0x2244),
Register('watchlo.2', 4, 0x2248),
Register('watchhi.2', 4, 0x224c),
Register('cop0_reg20.2', 4, 0x2250),
Register('cop0_reg21.2', 4, 0x2254),
Register('cop0_reg22.2', 4, 0x2258),
Register('tracecontrol2', 4, 0x225c),
Register('cop0_reg24.2', 4, 0x2260),
Register('perfcnt.2', 4, 0x2264),
Register('cop0_reg26.2', 4, 0x2268),
Register('cacheerr.2', 4, 0x226c),
Register('taglo.2', 4, 0x2270),
Register('taghi.2', 4, 0x2274),
Register('cop0_reg30.2', 4, 0x2278),
Register('cop0_reg31.2', 4, 0x227c),
Register('mvpconf1', 4, 0x2300),
Register('vpeconf1', 4, 0x2304),
Register('tcrestart', 4, 0x2308),
Register('cop0_reg3.3', 4, 0x230c),
Register('cop0_reg4.3', 4, 0x2310),
Register('cop0_reg5.3', 4, 0x2314),
Register('srsconf2', 4, 0x2318),
Register('cop0_reg7.3', 4, 0x231c),
Register('cop0_reg8.3', 4, 0x2320),
Register('cop0_reg9.3', 4, 0x2324),
Register('cop0_reg10.3', 4, 0x2328),
Register('cop0_reg11.3', 4, 0x232c),
Register('srsmap', 4, 0x2330),
Register('cop0_reg13.3', 4, 0x2334),
Register('cop0_reg14.3', 4, 0x2338),
Register('cop0_reg15.3', 4, 0x233c),
Register('config3', 4, 0x2340),
Register('cop0_reg17.3', 4, 0x2344),
Register('watchlo.3', 4, 0x2348),
Register('watchhi.3', 4, 0x234c),
Register('cop0_reg20.3', 4, 0x2350),
Register('cop0_reg21.3', 4, 0x2354),
Register('cop0_reg22.3', 4, 0x2358),
Register('usertracedata', 4, 0x235c),
Register('cop0_reg24.3', 4, 0x2360),
Register('perfcnt.3', 4, 0x2364),
Register('cop0_reg26.3', 4, 0x2368),
Register('cacheerr.3', 4, 0x236c),
Register('datalo.3', 4, 0x2370),
Register('datahi.3', 4, 0x2374),
Register('cop0_reg30.3', 4, 0x2378),
Register('cop0_reg31.3', 4, 0x237c),
Register('cop0_reg0.4', 4, 0x2400),
Register('yqmask', 4, 0x2404),
Register('tchalt', 4, 0x2408),
Register('cop0_reg3.4', 4, 0x240c),
Register('cop0_reg4.4', 4, 0x2410),
Register('cop0_reg5.4', 4, 0x2414),
Register('srsconf3', 4, 0x2418),
Register('cop0_reg7.4', 4, 0x241c),
Register('cop0_reg8.4', 4, 0x2420),
Register('cop0_reg9.4', 4, 0x2424),
Register('cop0_reg10.4', 4, 0x2428),
Register('cop0_reg11.4', 4, 0x242c),
Register('cop0_reg12.4', 4, 0x2430),
Register('cop0_reg13.4', 4, 0x2434),
Register('cop0_reg14.4', 4, 0x2438),
Register('cop0_reg15.4', 4, 0x243c),
Register('cop0_reg16.4', 4, 0x2440),
Register('cop0_reg17.4', 4, 0x2444),
Register('watchlo.4', 4, 0x2448),
Register('watchhi.4', 4, 0x244c),
Register('cop0_reg20.4', 4, 0x2450),
Register('cop0_reg21.4', 4, 0x2454),
Register('cop0_reg22.4', 4, 0x2458),
Register('tracebpc', 4, 0x245c),
Register('cop0_reg24.4', 4, 0x2460),
Register('perfcnt.4', 4, 0x2464),
Register('cop0_reg26.4', 4, 0x2468),
Register('cacheerr.4', 4, 0x246c),
Register('taglo.4', 4, 0x2470),
Register('taghi.4', 4, 0x2474),
Register('cop0_reg30.4', 4, 0x2478),
Register('cop0_reg31.4', 4, 0x247c),
Register('cop0_reg0.5', 4, 0x2500),
Register('vpeschedule', 4, 0x2504),
Register('tccontext', 4, 0x2508),
Register('cop0_reg3.5', 4, 0x250c),
Register('cop0_reg4.5', 4, 0x2510),
Register('cop0_reg5.5', 4, 0x2514),
Register('srsconf4', 4, 0x2518),
Register('cop0_reg7.5', 4, 0x251c),
Register('cop0_reg8.5', 4, 0x2520),
Register('cop0_reg9.5', 4, 0x2524),
Register('cop0_reg10.5', 4, 0x2528),
Register('cop0_reg11.5', 4, 0x252c),
Register('cop0_reg12.5', 4, 0x2530),
Register('cop0_reg13.5', 4, 0x2534),
Register('cop0_reg14.5', 4, 0x2538),
Register('cop0_reg15.5', 4, 0x253c),
Register('cop0_reg16.5', 4, 0x2540),
Register('cop0_reg17.5', 4, 0x2544),
Register('watchlo.5', 4, 0x2548),
Register('watchhi.5', 4, 0x254c),
Register('cop0_reg20.5', 4, 0x2550),
Register('cop0_reg21.5', 4, 0x2554),
Register('cop0_reg22.5', 4, 0x2558),
Register('cop0_reg23.5', 4, 0x255c),
Register('cop0_reg24.5', 4, 0x2560),
Register('perfcnt.5', 4, 0x2564),
Register('cop0_reg26.5', 4, 0x2568),
Register('cacheerr.5', 4, 0x256c),
Register('datalo.5', 4, 0x2570),
Register('datahi.5', 4, 0x2574),
Register('cop0_reg30.5', 4, 0x2578),
Register('cop0_reg31.5', 4, 0x257c),
Register('cop0_reg0.6', 4, 0x2600),
Register('vpeschefback', 4, 0x2604),
Register('tcschedule', 4, 0x2608),
Register('cop0_reg3.6', 4, 0x260c),
Register('cop0_reg4.6', 4, 0x2610),
Register('cop0_reg5.6', 4, 0x2614),
Register('cop0_reg6.6', 4, 0x2618),
Register('cop0_reg7.6', 4, 0x261c),
Register('cop0_reg8.6', 4, 0x2620),
Register('cop0_reg9.6', 4, 0x2624),
Register('cop0_reg10.6', 4, 0x2628),
Register('cop0_reg11.6', 4, 0x262c),
Register('cop0_reg12.6', 4, 0x2630),
Register('cop0_reg13.6', 4, 0x2634),
Register('cop0_reg14.6', 4, 0x2638),
Register('cop0_reg15.6', 4, 0x263c),
Register('cop0_reg16.6', 4, 0x2640),
Register('cop0_reg17.6', 4, 0x2644),
Register('watchlo.6', 4, 0x2648),
Register('watchhi.6', 4, 0x264c),
Register('cop0_reg20.6', 4, 0x2650),
Register('cop0_reg21.6', 4, 0x2654),
Register('cop0_reg22.6', 4, 0x2658),
Register('cop0_reg23.6', 4, 0x265c),
Register('cop0_reg24.6', 4, 0x2660),
Register('perfcnt.6', 4, 0x2664),
Register('cop0_reg26.6', 4, 0x2668),
Register('cacheerr.6', 4, 0x266c),
Register('taglo.6', 4, 0x2670),
Register('taghi.6', 4, 0x2674),
Register('cop0_reg30.6', 4, 0x2678),
Register('cop0_reg31.6', 4, 0x267c),
Register('cop0_reg0.7', 4, 0x2700),
Register('vpeopt', 4, 0x2704),
Register('tcschefback', 4, 0x2708),
Register('cop0_reg3.7', 4, 0x270c),
Register('cop0_reg4.7', 4, 0x2710),
Register('cop0_reg5.7', 4, 0x2714),
Register('cop0_reg6.7', 4, 0x2718),
Register('cop0_reg7.7', 4, 0x271c),
Register('cop0_reg8.7', 4, 0x2720),
Register('cop0_reg9.7', 4, 0x2724),
Register('cop0_reg10.7', 4, 0x2728),
Register('cop0_reg11.7', 4, 0x272c),
Register('cop0_reg12.7', 4, 0x2730),
Register('cop0_reg13.7', 4, 0x2734),
Register('cop0_reg14.7', 4, 0x2738),
Register('cop0_reg15.7', 4, 0x273c),
Register('cop0_reg16.7', 4, 0x2740),
Register('cop0_reg17.7', 4, 0x2744),
Register('watchlo.7', 4, 0x2748),
Register('watchhi.7', 4, 0x274c),
Register('cop0_reg20.7', 4, 0x2750),
Register('cop0_reg21.7', 4, 0x2754),
Register('cop0_reg22.7', 4, 0x2758),
Register('cop0_reg23.7', 4, 0x275c),
Register('cop0_reg24.7', 4, 0x2760),
Register('perfcnt.7', 4, 0x2764),
Register('cop0_reg26.7', 4, 0x2768),
Register('cacheerr.7', 4, 0x276c),
Register('datalo.7', 4, 0x2770),
Register('datahi.7', 4, 0x2774),
Register('cop0_reg30.7', 4, 0x2778),
Register('cop0_reg31.7', 4, 0x277c),
Register('hi', 4, 0x3000),
Register('lo', 4, 0x3004),
Register('hi1', 4, 0x3008),
Register('lo1', 4, 0x300c),
Register('hi2', 4, 0x3010),
Register('lo2', 4, 0x3014),
Register('hi3', 4, 0x3018),
Register('lo3', 4, 0x301c),
Register('tsp', 4, 0x3020),
Register('isamodeswitch', 1, 0x3f00),
Register('contextreg', 4, 0x4000)
]
register_arch(['mips:be:32:micro'], 32, Endness.BE, ArchPcode_MIPS_BE_32_micro)
| bsd-2-clause |
jnvandermeer/PythonFeedback | idlex-1.11.2/idlexlib/extensions/DocViewer.py | 7 | 12238 | # IDLEX EXTENSION
"""
Documentation Viewer Extension
Version: 0.2
Author: Roger D. Serwy
roger.serwy@gmail.com
Date: 2009-05-29
Date: 2011-12-26 - modified to work with IdleX and Python 3
It provides "Documentation Viewer" under "Help"
Add these lines to config-extensions.def
Parts of this code is based on a patch submitted to the
Python Software Foundation under to Apache 2 License, per
a contributor agreement.
See http://bugs.python.org/issue964437 for the patch
"""
config_extension_def = """
[DocViewer]
enable=1
enable_editor=1
enable_shell=1
calltip=1
[DocViewer_cfgBindings]
docviewer-window=
"""
# TODO:
# - sanitize command input box
from idlelib.configHandler import idleConf
import idlelib.IOBinding as IOBinding
from idlelib.EditorWindow import EditorWindow
from idlelib.OutputWindow import OutputWindow
from idlelib.Delegator import Delegator
from idlelib.HyperParser import HyperParser
import idlelib.WindowList as WindowList
import idlelib.SearchDialog as SearchDialog
import time
import sys
if sys.version < '3':
from Tkinter import *
else:
from tkinter import *
def get_cfg(cfg, type="bool", default=True):
return idleConf.GetOption("extensions", "DocViewer",
cfg, type=type, default=default)
def set_cfg(cfg, b):
return idleConf.SetOption("extensions", "DocViewer",
cfg,'%s' % b)
class DocViewer:
menudefs = [
('help', [None,
('Documentation Viewer', '<<docviewer-window>>'),
]),]
def __init__(self, editwin):
self.editwin = editwin
self.top = editwin.top
text = self.editwin.text
text.bind("<<docviewer-calltip>>", self.calltip_event)
text.event_add("<<docviewer-calltip>>", "<KeyRelease-parenleft>")
text.bind("<<docviewer-window>>", self.do_calltip)
self.docWindow = docWindow
def do_calltip(self, event=None):
docWindow.show(self.editwin)
self.calltip_event()
def calltip_event(self, event=None):
window = self.docWindow.window
if window is None:
return
if not window.update_calltip.get():
# don't process calltip event
return
# get calltip
# code borrows from CallTips.py::open_calltip
evalfuncs = False
hp = HyperParser(self.editwin, "insert")
sur_paren = hp.get_surrounding_brackets('(')
if not sur_paren:
return
hp.set_index(sur_paren[0])
name = hp.get_expression()
if not name or (not evalfuncs and name.find('(') != -1):
return
w = window
w.entry.delete("0", "end")
w.entry.insert("insert", name)
w.get_doc()
def show_docviewer(self, event=None):
# create a window with two text boxes and a label
self.docWindow.show(self.editwin)
class DocDelegator(Delegator):
""" Prevent modifications to the text widget that displays the documentation.
Text may only be inserted if the .enabled value is True.
"""
def insert(self, index, chars, tags=None):
try:
self.entry.insert('insert', chars)
self.entry.focus()
except Exception as err:
print(' Internal DocDelegator Error:', err)
def delete(self, index1, index2=None):
pass
class DocWindowHandler(object):
""" For handling a singleton instance of the DocViewer"""
def __init__(self):
self.window = None
WindowList.registry.register_callback(self.check_close)
def show(self, editwin, near=None):
if self.window is None:
shell = editwin.flist.open_shell()
interp = shell.interp
win = DocumentationWindow(flist=editwin.flist,
interp=interp,
editwin=shell)
self.window = win
win.top.bind('<Destroy>', self.destroy, '+')
self.nearwindow(editwin.top)
def nearwindow(self, near):
w = self.window.top
w.withdraw()
geom = (near.winfo_rootx() + 10, near.winfo_rooty() + 10)
w.geometry('=+%d+%d' % geom)
w.deiconify()
w.lift()
def check_close(self, event=None):
""" callback function to make sure the DocumentationWindow is
not the last instance. If so, then close it.
"""
if self.window is None:
return
d = WindowList.registry.dict
t = str(self.window.top)
if len(d) == 1:
if t in d:
self.window.close()
else:
#Strange situation. DocViewer is open, but not it the dict.
#This should not happen.
pass
def destroy(self, event=None):
self.window = None
docWindow = DocWindowHandler()
class DocumentationWindow(EditorWindow):
""" Create an editor window for the purpose of displaying documentation """
def __init__(self, flist=None, interp=None, editwin=None):
EditorWindow.__init__(self, flist)
self.interp = interp
self.editwin = editwin
# TODO: figure out better way to eliminate menu bar
m = Menu(self.root)
self.top.config(menu=m)
root = self.top
self.doc_frame = doc_frame = Frame(root)
# make first line
f_top = Frame(doc_frame)
label = Label(f_top, text='Help on:')
self.entry = entry = Entry(f_top)
entry.bind('<Return>', self.get_doc)
self.update_calltip = IntVar(root)
check = Checkbutton(f_top, text='Update from Calltip',
variable=self.update_calltip)
check.var = self.update_calltip
if get_cfg('calltip'):
check.select()
f_top.pack(side='top', fill=X, padx=5)
label.pack(side='left')
entry.pack(side='left', fill=X, expand=True, padx=5, ipadx=5)
check.pack(side='right')
# make command buttons
f_cmd = Frame(doc_frame)
f_cmd.pack(side='top', fill=X, padx=3)
self.button_showdoc = Button(f_cmd, text='Show Doc String',
default='active',
command=self.get_doc)
self.button_showhelp = Button(f_cmd, text='Show help()',
command=self.get_help)
button_search = Button(f_cmd, text='Search Text',
command=self.search)
button_close = Button(f_cmd, text='Close',
command=self.close)
button_close.pack(side='right')
self.button_showdoc.pack(side='left')
self.button_showhelp.pack(side='left')
button_search.pack(side='left')
doc_frame.pack(side=TOP, before=self.text_frame, fill=X)
# change focused widget to entry box
self.entry.focus_set()
self.top.focused_widget = self.entry
# remove unneeded stuff
self.per.removefilter(self.undo)
self._rmcolorizer()
#self.status_bar.pack_forget()
# add a delegator to prevent editing of text widget
self.doc_del = DocDelegator()
self.doc_del.entry = self.entry
self.per.insertfilter(self.doc_del)
self.text._insert = self.doc_del.delegate.insert
self.text._delete = self.doc_del.delegate.delete
self.text.configure(wrap='none')
keySetName = idleConf.CurrentKeys()
find_bindings = idleConf.GetKeyBinding(keySetName, '<<find>>')
for key_event in find_bindings:
#self.entry.event_add('<<find>>', key_event)
self.entry.bind(key_event, lambda e: self.text.event_generate('<<find>>'))
def get_standard_extension_names(self):
# Only load SearchBar if needed
ret = []
#a = idleConf.GetExtensions(editor_only=True)
#if 'SearchBar' in a:
# ret.append('SearchBar')
return ret
def search(self, event=None):
self.text.focus_set()
self.text.update_idletasks()
self.text.event_generate('<<find>>')
self.text.update_idletasks()
return "break"
def get_help(self, event=None):
#self.button_showhelp.configure(default='active')
#self.button_showdoc.configure(default='disabled')
b = self.entry.get().strip()
if not b:
return
cmd = """if 1:
try:
help(%s)
except:
print("'%s' not found")""" % (b,b)
self.process_request(cmd)
def get_doc(self, event=None):
#self.button_showhelp.configure(default='disabled')
#self.button_showdoc.configure(default='active')
b = self.entry.get().strip()
if not b:
return
cmd = """if 1:
try:
if hasattr(%s, '__doc__'):
print(%s.__doc__)
else:
print("%s doesn't have a doc string")
except:
print("'%s' not found in the shell's namespace.")""" % ((b,)*4)
cmd2 = """if 1:
print "====Displaying %s.__doc__"
print
try:
if hasattr(%s, '__doc__'):
print(%s.__doc__)
else:
print("%s doesn't have a doc string")
except:
print("'%s' not found in the shell's namespace.")
print()
print()
print("====Displaying help(%s)")
print()
try:
help(%s)
except:
print("'%s' not found in the shell's namespace.") """ % ((b,)*8)
self.process_request(cmd)
def process_request(self, cmd=None):
if cmd is None:
return
try:
test = compile(cmd, '', 'exec')
except Exception as err:
t = 'Unable to process your request.\nIs your given object in the namespace?'
self.text._delete('1.0', 'end')
self.text._insert('1.0', t)
return
interp = self.interp
editwin = self.editwin
self.count = 0
if editwin.executing:
self.text._insert(1.0, "The shell currently is executing a command.\n" \
"Please try again when the shell is done executing.\n")
return
editwin.text.mark_set("iomark2", "iomark")
self.text._delete("1.0", "end")
# redirect output from PyShell to DocViewer
def insert_bridge(self, index, chars, tags=None):
#self.count += 1
#if self.count < 50:
self.text.insert(index, chars, tags)
__insert = editwin.text.insert
editwin.text.insert = insert_bridge
def mywrite(s, tags=()):
if tags in ('stdout', 'stderr'):
# send to me
self.text._insert('insert', s,tags)
__write = editwin.write
editwin.write = mywrite
interp.runcommand(cmd)
# go into a loop, until help has arrived :)
while editwin.executing:
editwin.text.update_idletasks()
time.sleep(0.05)
# restore output to PyShell
editwin.text.insert = __insert
editwin.write = __write
editwin.text.mark_set("iomark", "iomark2")
def close(self):
set_cfg('calltip', self.update_calltip.get())
# remove all references
if 0:
self.doc_frame.destroy()
self.editwin = None
self.interp = None
self.text._delete = None
self.text._insert = None
self.per.removefilter(self.doc_del)
self.undo = None
self.doc_del = None
#EditorWindow.close(self)
self._close()
self.top.destroy()
#print 'refcount: ', sys.getrefcount(DocViewer.WINDOW)
DocViewer.WINDOW = None
def short_title(self):
return "IDLE Documentation Viewer"
| gpl-2.0 |
ajgallegog/gem5_arm | src/dev/Pci.py | 30 | 8427 | # Copyright (c) 2013 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2005-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from m5.SimObject import SimObject
from m5.params import *
from m5.proxy import *
from Device import BasicPioDevice, DmaDevice, PioDevice
class PciConfigAll(BasicPioDevice):
type = 'PciConfigAll'
cxx_header = "dev/pciconfigall.hh"
platform = Param.Platform(Parent.any, "Platform this device is part of.")
bus = Param.UInt8(0x00, "PCI bus to act as config space for")
size = Param.MemorySize32('16MB', "Size of config space")
pio_latency = '30ns'
pio_addr = 0 # will be overridden by platform-based calculation
class PciDevice(DmaDevice):
type = 'PciDevice'
cxx_class = 'PciDevice'
cxx_header = "dev/pcidev.hh"
abstract = True
platform = Param.Platform(Parent.any, "Platform this device is part of.")
config = SlavePort("PCI configuration space port")
pci_bus = Param.Int("PCI bus")
pci_dev = Param.Int("PCI device number")
pci_func = Param.Int("PCI function code")
pio_latency = Param.Latency('30ns', "Programmed IO latency")
config_latency = Param.Latency('20ns', "Config read or write latency")
VendorID = Param.UInt16("Vendor ID")
DeviceID = Param.UInt16("Device ID")
Command = Param.UInt16(0, "Command")
Status = Param.UInt16(0, "Status")
Revision = Param.UInt8(0, "Device")
ProgIF = Param.UInt8(0, "Programming Interface")
SubClassCode = Param.UInt8(0, "Sub-Class Code")
ClassCode = Param.UInt8(0, "Class Code")
CacheLineSize = Param.UInt8(0, "System Cacheline Size")
LatencyTimer = Param.UInt8(0, "PCI Latency Timer")
HeaderType = Param.UInt8(0, "PCI Header Type")
BIST = Param.UInt8(0, "Built In Self Test")
BAR0 = Param.UInt32(0x00, "Base Address Register 0")
BAR1 = Param.UInt32(0x00, "Base Address Register 1")
BAR2 = Param.UInt32(0x00, "Base Address Register 2")
BAR3 = Param.UInt32(0x00, "Base Address Register 3")
BAR4 = Param.UInt32(0x00, "Base Address Register 4")
BAR5 = Param.UInt32(0x00, "Base Address Register 5")
BAR0Size = Param.MemorySize32('0B', "Base Address Register 0 Size")
BAR1Size = Param.MemorySize32('0B', "Base Address Register 1 Size")
BAR2Size = Param.MemorySize32('0B', "Base Address Register 2 Size")
BAR3Size = Param.MemorySize32('0B', "Base Address Register 3 Size")
BAR4Size = Param.MemorySize32('0B', "Base Address Register 4 Size")
BAR5Size = Param.MemorySize32('0B', "Base Address Register 5 Size")
BAR0LegacyIO = Param.Bool(False, "Whether BAR0 is hardwired legacy IO")
BAR1LegacyIO = Param.Bool(False, "Whether BAR1 is hardwired legacy IO")
BAR2LegacyIO = Param.Bool(False, "Whether BAR2 is hardwired legacy IO")
BAR3LegacyIO = Param.Bool(False, "Whether BAR3 is hardwired legacy IO")
BAR4LegacyIO = Param.Bool(False, "Whether BAR4 is hardwired legacy IO")
BAR5LegacyIO = Param.Bool(False, "Whether BAR5 is hardwired legacy IO")
LegacyIOBase = Param.Addr(0x0, "Base Address for Legacy IO")
CardbusCIS = Param.UInt32(0x00, "Cardbus Card Information Structure")
SubsystemID = Param.UInt16(0x00, "Subsystem ID")
SubsystemVendorID = Param.UInt16(0x00, "Subsystem Vendor ID")
ExpansionROM = Param.UInt32(0x00, "Expansion ROM Base Address")
CapabilityPtr = Param.UInt8(0x00, "Capability List Pointer offset")
InterruptLine = Param.UInt8(0x00, "Interrupt Line")
InterruptPin = Param.UInt8(0x00, "Interrupt Pin")
MaximumLatency = Param.UInt8(0x00, "Maximum Latency")
MinimumGrant = Param.UInt8(0x00, "Minimum Grant")
# Capabilities List structures for PCIe devices
# PMCAP - PCI Power Management Capability
PMCAPBaseOffset = \
Param.UInt8(0x00, "Base offset of PMCAP in PCI Config space")
PMCAPNextCapability = \
Param.UInt8(0x00, "Pointer to next capability block")
PMCAPCapId = \
Param.UInt8(0x00, "Specifies this is the Power Management capability")
PMCAPCapabilities = \
Param.UInt16(0x0000, "PCI Power Management Capabilities Register")
PMCAPCtrlStatus = \
Param.UInt16(0x0000, "PCI Power Management Control and Status")
# MSICAP - Message Signaled Interrupt Capability
MSICAPBaseOffset = \
Param.UInt8(0x00, "Base offset of MSICAP in PCI Config space")
MSICAPNextCapability = \
Param.UInt8(0x00, "Pointer to next capability block")
MSICAPCapId = Param.UInt8(0x00, "Specifies this is the MSI Capability")
MSICAPMsgCtrl = Param.UInt16(0x0000, "MSI Message Control")
MSICAPMsgAddr = Param.UInt32(0x00000000, "MSI Message Address")
MSICAPMsgUpperAddr = Param.UInt32(0x00000000, "MSI Message Upper Address")
MSICAPMsgData = Param.UInt16(0x0000, "MSI Message Data")
MSICAPMaskBits = Param.UInt32(0x00000000, "MSI Interrupt Mask Bits")
MSICAPPendingBits = Param.UInt32(0x00000000, "MSI Pending Bits")
# MSIXCAP - MSI-X Capability
MSIXCAPBaseOffset = \
Param.UInt8(0x00, "Base offset of MSIXCAP in PCI Config space")
MSIXCAPNextCapability = \
Param.UInt8(0x00, "Pointer to next capability block")
MSIXCAPCapId = Param.UInt8(0x00, "Specifices this the MSI-X Capability")
MSIXMsgCtrl = Param.UInt16(0x0000, "MSI-X Message Control")
MSIXTableOffset = \
Param.UInt32(0x00000000, "MSI-X Table Offset and Table BIR")
MSIXPbaOffset = Param.UInt32(0x00000000, "MSI-X PBA Offset and PBA BIR")
# PXCAP - PCI Express Capability
PXCAPBaseOffset = \
Param.UInt8(0x00, "Base offset of PXCAP in PCI Config space")
PXCAPNextCapability = Param.UInt8(0x00, "Pointer to next capability block")
PXCAPCapId = Param.UInt8(0x00, "Specifies this is the PCIe Capability")
PXCAPCapabilities = Param.UInt16(0x0000, "PCIe Capabilities")
PXCAPDevCapabilities = Param.UInt32(0x00000000, "PCIe Device Capabilities")
PXCAPDevCtrl = Param.UInt16(0x0000, "PCIe Device Control")
PXCAPDevStatus = Param.UInt16(0x0000, "PCIe Device Status")
PXCAPLinkCap = Param.UInt32(0x00000000, "PCIe Link Capabilities")
PXCAPLinkCtrl = Param.UInt16(0x0000, "PCIe Link Control")
PXCAPLinkStatus = Param.UInt16(0x0000, "PCIe Link Status")
PXCAPDevCap2 = Param.UInt32(0x00000000, "PCIe Device Capabilities 2")
PXCAPDevCtrl2 = Param.UInt32(0x00000000, "PCIe Device Control 2")
| bsd-3-clause |
AtaraxiaEta/linux | tools/perf/scripts/python/sched-migration.py | 1910 | 11965 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm, common_callchain,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid):
pass
def trace_unhandled(event_name, context, event_fields_dict):
pass
| gpl-2.0 |
TatsuyaShirakawa/poincare-embedding | scripts/create_mammal_subtree.py | 2 | 1450 | from __future__ import print_function, division, unicode_literals, absolute_import
import random
from nltk.corpus import wordnet as wn
import click
def transitive_closure(synsets):
hypernyms = set([])
for s in synsets:
paths = s.hypernym_paths()
for path in paths:
hypernyms.update((s,h) for h in path[1:] if h.pos() == 'n')
return hypernyms
@click.command()
@click.argument('result_file')
@click.option('--shuffle', is_flag=True)
@click.option('--sep', default='\t')
@click.option('--target', default='mammal.n.01')
def main(result_file, shuffle, sep, target):
target = wn.synset(target)
print('target:', target)
words = wn.words()
nouns = set([])
for word in words:
nouns.update(wn.synsets(word, pos='n'))
print( len(nouns), 'nouns')
hypernyms = []
for noun in nouns:
paths = noun.hypernym_paths()
for path in paths:
try:
pos = path.index(target)
for i in range(pos, len(path)-1):
hypernyms.append((noun, path[i]))
except Exception:
continue
hypernyms = list(set(hypernyms))
print( len(hypernyms), 'hypernyms' )
if not shuffle:
random.shuffle(hypernyms)
with open(result_file, 'w') as fout:
for n1, n2 in hypernyms:
print(n1.name(), n2.name(), sep=sep, file=fout)
if __name__ == '__main__':
main()
| mit |
soarpenguin/ansible | lib/ansible/modules/network/fortios/fortios_address.py | 7 | 10230 | #!/usr/bin/python
#
# Ansible module to manage IP addresses on fortios devices
# (c) 2016, Benjamin Jolivot <bjolivot@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'
}
DOCUMENTATION = """
---
module: fortios_address
version_added: "2.4"
author: "Benjamin Jolivot (@bjolivot)"
short_description: Manage fortios firewall address objects
description:
- This module provide management of firewall addresses on FortiOS devices.
extends_documentation_fragment: fortios
options:
state:
description:
- Specifies if address need to be added or deleted.
required: true
choices: ['present', 'absent']
name:
description:
- Name of the address to add or delete.
required: true
type:
description:
- Type of the address.
choices: ['iprange', 'fqdn', 'ipmask', 'geography']
value:
description:
- Address value, based on type.
If type=fqdn, somthing like www.google.com.
If type=ipmask, you can use simple ip (192.168.0.1), ip+mask (192.168.0.1 255.255.255.0) or CIDR (192.168.0.1/32).
start_ip:
description:
- First ip in range (used only with type=iprange).
end_ip:
description:
- Last ip in range (used only with type=iprange).
country:
description:
- 2 letter country code (like FR).
interface:
description:
- interface name the address apply to.
default: any
comment:
description:
- free text to describe address.
notes:
- This module requires netaddr python library.
"""
EXAMPLES = """
- name: Register french addresses
fortios_address:
host: 192.168.0.254
username: admin
password: p4ssw0rd
state: present
name: "fromfrance"
type: geography
country: FR
comment: "French geoip address"
- name: Register some fqdn
fortios_address:
host: 192.168.0.254
username: admin
password: p4ssw0rd
state: present
name: "Ansible"
type: fqdn
value: www.ansible.com
comment: "Ansible website"
- name: Register google DNS
fortios_address:
host: 192.168.0.254
username: admin
password: p4ssw0rd
state: present
name: "google_dns"
type: ipmask
value: 8.8.8.8
"""
RETURN = """
firewall_address_config:
description: full firewall adresses config string.
returned: always
type: string
change_string:
description: The commands executed by the module.
returned: only if config changed
type: string
"""
from ansible.module_utils.fortios import fortios_argument_spec, fortios_required_if
from ansible.module_utils.fortios import backup, AnsibleFortios
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
# check for netaddr lib
try:
from netaddr import IPNetwork
HAS_NETADDR = True
except:
HAS_NETADDR = False
# define valid country list for GEOIP address type
FG_COUNTRY_LIST = (
'ZZ', 'A1', 'A2', 'O1', 'AD', 'AE', 'AF', 'AG', 'AI', 'AL', 'AM', 'AN', 'AO',
'AP', 'AQ', 'AR', 'AS', 'AT', 'AU', 'AW', 'AX', 'AZ', 'BA', 'BB', 'BD', 'BE',
'BF', 'BG', 'BH', 'BI', 'BJ', 'BL', 'BM', 'BN', 'BO', 'BQ', 'BR', 'BS', 'BT',
'BV', 'BW', 'BY', 'BZ', 'CA', 'CC', 'CD', 'CF', 'CG', 'CH', 'CI', 'CK', 'CL',
'CM', 'CN', 'CO', 'CR', 'CU', 'CV', 'CW', 'CX', 'CY', 'CZ', 'DE', 'DJ', 'DK',
'DM', 'DO', 'DZ', 'EC', 'EE', 'EG', 'EH', 'ER', 'ES', 'ET', 'EU', 'FI', 'FJ',
'FK', 'FM', 'FO', 'FR', 'GA', 'GB', 'GD', 'GE', 'GF', 'GG', 'GH', 'GI', 'GL',
'GM', 'GN', 'GP', 'GQ', 'GR', 'GS', 'GT', 'GU', 'GW', 'GY', 'HK', 'HM', 'HN',
'HR', 'HT', 'HU', 'ID', 'IE', 'IL', 'IM', 'IN', 'IO', 'IQ', 'IR', 'IS', 'IT',
'JE', 'JM', 'JO', 'JP', 'KE', 'KG', 'KH', 'KI', 'KM', 'KN', 'KP', 'KR', 'KW',
'KY', 'KZ', 'LA', 'LB', 'LC', 'LI', 'LK', 'LR', 'LS', 'LT', 'LU', 'LV', 'LY',
'MA', 'MC', 'MD', 'ME', 'MF', 'MG', 'MH', 'MK', 'ML', 'MM', 'MN', 'MO', 'MP',
'MQ', 'MR', 'MS', 'MT', 'MU', 'MV', 'MW', 'MX', 'MY', 'MZ', 'NA', 'NC', 'NE',
'NF', 'NG', 'NI', 'NL', 'NO', 'NP', 'NR', 'NU', 'NZ', 'OM', 'PA', 'PE', 'PF',
'PG', 'PH', 'PK', 'PL', 'PM', 'PN', 'PR', 'PS', 'PT', 'PW', 'PY', 'QA', 'RE',
'RO', 'RS', 'RU', 'RW', 'SA', 'SB', 'SC', 'SD', 'SE', 'SG', 'SH', 'SI', 'SJ',
'SK', 'SL', 'SM', 'SN', 'SO', 'SR', 'SS', 'ST', 'SV', 'SX', 'SY', 'SZ', 'TC',
'TD', 'TF', 'TG', 'TH', 'TJ', 'TK', 'TL', 'TM', 'TN', 'TO', 'TR', 'TT', 'TV',
'TW', 'TZ', 'UA', 'UG', 'UM', 'US', 'UY', 'UZ', 'VA', 'VC', 'VE', 'VG', 'VI',
'VN', 'VU', 'WF', 'WS', 'YE', 'YT', 'ZA', 'ZM', 'ZW'
)
def get_formated_ipaddr(input_ip):
"""
Format given ip address string to fortigate format (ip netmask)
Args:
* **ip_str** (string) : string representing ip address
accepted format:
- ip netmask (ex: 192.168.0.10 255.255.255.0)
- ip (ex: 192.168.0.10)
- CIDR (ex: 192.168.0.10/24)
Returns:
formated ip if ip is valid (ex: "192.168.0.10 255.255.255.0")
False if ip is not valid
"""
try:
if " " in input_ip:
# ip netmask format
str_ip, str_netmask = input_ip.split(" ")
ip = IPNetwork(str_ip)
mask = IPNetwork(str_netmask)
return "%s %s" % (str_ip, str_netmask)
else:
ip = IPNetwork(input_ip)
return "%s %s" % (str(ip.ip), str(ip.netmask))
except:
return False
return False
def main():
argument_spec = dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True),
type=dict(choices=['iprange', 'fqdn', 'ipmask', 'geography'], default='ipmask'),
value=dict(),
start_ip=dict(),
end_ip=dict(),
country=dict(),
interface=dict(default='any'),
comment=dict(),
)
# merge argument_spec from module_utils/fortios.py
argument_spec.update(fortios_argument_spec)
# Load module
module = AnsibleModule(
argument_spec=argument_spec,
required_if=fortios_required_if,
supports_check_mode=True,
)
result = dict(changed=False)
if not HAS_NETADDR:
module.fail_json(msg='Could not import the python library netaddr required by this module')
# check params
if module.params['state'] == 'absent':
if module.params['type'] != "ipmask":
module.fail_json(msg='Invalid argument type=%s when state=absent' % module.params['type'])
if module.params['value'] is not None:
module.fail_json(msg='Invalid argument `value` when state=absent')
if module.params['start_ip'] is not None:
module.fail_json(msg='Invalid argument `start_ip` when state=absent')
if module.params['end_ip'] is not None:
module.fail_json(msg='Invalid argument `end_ip` when state=absent')
if module.params['country'] is not None:
module.fail_json(msg='Invalid argument `country` when state=absent')
if module.params['interface'] != "any":
module.fail_json(msg='Invalid argument `interface` when state=absent')
if module.params['comment'] is not None:
module.fail_json(msg='Invalid argument `comment` when state=absent')
else:
# state=present
# validate IP
if module.params['type'] == "ipmask":
formated_ip = get_formated_ipaddr(module.params['value'])
if formated_ip is not False:
module.params['value'] = get_formated_ipaddr(module.params['value'])
else:
module.fail_json(msg="Bad ip address format")
# validate country
if module.params['type'] == "geography":
if module.params['country'] not in FG_COUNTRY_LIST:
module.fail_json(msg="Invalid country argument, need to be in `diagnose firewall ipgeo country-list`")
# validate iprange
if module.params['type'] == "iprange":
if module.params['start_ip'] is None:
module.fail_json(msg="Missing argument 'start_ip' when type is iprange")
if module.params['end_ip'] is None:
module.fail_json(msg="Missing argument 'end_ip' when type is iprange")
# init forti object
fortigate = AnsibleFortios(module)
# Config path
config_path = 'firewall address'
# load config
fortigate.load_config(config_path)
# Absent State
if module.params['state'] == 'absent':
fortigate.candidate_config[config_path].del_block(module.params['name'])
# Present state
if module.params['state'] == 'present':
# define address params
new_addr = fortigate.get_empty_configuration_block(module.params['name'], 'edit')
if module.params['comment'] is not None:
new_addr.set_param('comment', '"%s"' % (module.params['comment']))
if module.params['type'] == 'iprange':
new_addr.set_param('type', 'iprange')
new_addr.set_param('start-ip', module.params['start_ip'])
new_addr.set_param('end-ip', module.params['end_ip'])
if module.params['type'] == 'geography':
new_addr.set_param('type', 'geography')
new_addr.set_param('country', '"%s"' % (module.params['country']))
if module.params['interface'] != 'any':
new_addr.set_param('associated-interface', '"%s"' % (module.params['interface']))
if module.params['value'] is not None:
if module.params['type'] == 'fqdn':
new_addr.set_param('type', 'fqdn')
new_addr.set_param('fqdn', '"%s"' % (module.params['value']))
if module.params['type'] == 'ipmask':
new_addr.set_param('subnet', module.params['value'])
# add the new address object to the device
fortigate.add_block(module.params['name'], new_addr)
# Apply changes (check mode is managed directly by the fortigate object)
fortigate.apply_changes()
if __name__ == '__main__':
main()
| gpl-3.0 |
valentin-krasontovitsch/ansible | lib/ansible/modules/packaging/language/maven_artifact.py | 30 | 22470 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Chris Schmidt <chris.schmidt () contrastsecurity.com>
#
# Built using https://github.com/hamnis/useful-scripts/blob/master/python/download-maven-artifact
# as a reference and starting point.
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: maven_artifact
short_description: Downloads an Artifact from a Maven Repository
version_added: "2.0"
description:
- Downloads an artifact from a maven repository given the maven coordinates provided to the module.
- Can retrieve snapshots or release versions of the artifact and will resolve the latest available
version if one is not available.
author: "Chris Schmidt (@chrisisbeef)"
requirements:
- lxml
- boto if using a S3 repository (s3://...)
options:
group_id:
description:
- The Maven groupId coordinate
required: true
artifact_id:
description:
- The maven artifactId coordinate
required: true
version:
description:
- The maven version coordinate
default: latest
classifier:
description:
- The maven classifier coordinate
extension:
description:
- The maven type/extension coordinate
default: jar
repository_url:
description:
- The URL of the Maven Repository to download from.
- Use s3://... if the repository is hosted on Amazon S3, added in version 2.2.
- Use file://... if the repository is local, added in version 2.6
default: http://repo1.maven.org/maven2
username:
description:
- The username to authenticate as to the Maven Repository. Use AWS secret key of the repository is hosted on S3
aliases: [ "aws_secret_key" ]
password:
description:
- The password to authenticate with to the Maven Repository. Use AWS secret access key of the repository is hosted on S3
aliases: [ "aws_secret_access_key" ]
dest:
description:
- The path where the artifact should be written to
- If file mode or ownerships are specified and destination path already exists, they affect the downloaded file
required: true
state:
description:
- The desired state of the artifact
default: present
choices: [present,absent]
timeout:
description:
- Specifies a timeout in seconds for the connection attempt
default: 10
version_added: "2.3"
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be set to C(no) when no other option exists.
type: bool
default: 'yes'
version_added: "1.9.3"
keep_name:
description:
- If C(yes), the downloaded artifact's name is preserved, i.e the version number remains part of it.
- This option only has effect when C(dest) is a directory and C(version) is set to C(latest).
type: bool
default: 'no'
version_added: "2.4"
verify_checksum:
description:
- If C(never), the md5 checksum will never be downloaded and verified.
- If C(download), the md5 checksum will be downloaded and verified only after artifact download. This is the default.
- If C(change), the md5 checksum will be downloaded and verified if the destination already exist,
to verify if they are identical. This was the behaviour before 2.6. Since it downloads the md5 before (maybe)
downloading the artifact, and since some repository software, when acting as a proxy/cache, return a 404 error
if the artifact has not been cached yet, it may fail unexpectedly.
If you still need it, you should consider using C(always) instead - if you deal with a checksum, it is better to
use it to verify integrity after download.
- C(always) combines C(download) and C(change).
required: false
default: 'download'
choices: ['never', 'download', 'change', 'always']
version_added: "2.6"
extends_documentation_fragment:
- files
'''
EXAMPLES = '''
# Download the latest version of the JUnit framework artifact from Maven Central
- maven_artifact:
group_id: junit
artifact_id: junit
dest: /tmp/junit-latest.jar
# Download JUnit 4.11 from Maven Central
- maven_artifact:
group_id: junit
artifact_id: junit
version: 4.11
dest: /tmp/junit-4.11.jar
# Download an artifact from a private repository requiring authentication
- maven_artifact:
group_id: com.company
artifact_id: library-name
repository_url: 'https://repo.company.com/maven'
username: user
password: pass
dest: /tmp/library-name-latest.jar
# Download a WAR File to the Tomcat webapps directory to be deployed
- maven_artifact:
group_id: com.company
artifact_id: web-app
extension: war
repository_url: 'https://repo.company.com/maven'
dest: /var/lib/tomcat7/webapps/web-app.war
# Keep a downloaded artifact's name, i.e. retain the version
- maven_artifact:
version: latest
artifact_id: spring-core
group_id: org.springframework
dest: /tmp/
keep_name: yes
# Download the latest version of the JUnit framework artifact from Maven local
- maven_artifact:
group_id: junit
artifact_id: junit
dest: /tmp/junit-latest.jar
repository_url: "file://{{ lookup('env','HOME') }}/.m2/repository"
'''
import hashlib
import os
import posixpath
import shutil
import io
import tempfile
try:
from lxml import etree
HAS_LXML_ETREE = True
except ImportError:
HAS_LXML_ETREE = False
try:
import boto3
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.parse import urlparse
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_bytes, to_native, to_text
def split_pre_existing_dir(dirname):
'''
Return the first pre-existing directory and a list of the new directories that will be created.
'''
head, tail = os.path.split(dirname)
b_head = to_bytes(head, errors='surrogate_or_strict')
if not os.path.exists(b_head):
(pre_existing_dir, new_directory_list) = split_pre_existing_dir(head)
else:
return head, [tail]
new_directory_list.append(tail)
return pre_existing_dir, new_directory_list
def adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed):
'''
Walk the new directories list and make sure that permissions are as we would expect
'''
if new_directory_list:
working_dir = os.path.join(pre_existing_dir, new_directory_list.pop(0))
directory_args['path'] = working_dir
changed = module.set_fs_attributes_if_different(directory_args, changed)
changed = adjust_recursive_directory_permissions(working_dir, new_directory_list, module, directory_args, changed)
return changed
class Artifact(object):
def __init__(self, group_id, artifact_id, version, classifier='', extension='jar'):
if not group_id:
raise ValueError("group_id must be set")
if not artifact_id:
raise ValueError("artifact_id must be set")
self.group_id = group_id
self.artifact_id = artifact_id
self.version = version
self.classifier = classifier
if not extension:
self.extension = "jar"
else:
self.extension = extension
def is_snapshot(self):
return self.version and self.version.endswith("SNAPSHOT")
def path(self, with_version=True):
base = posixpath.join(self.group_id.replace(".", "/"), self.artifact_id)
if with_version and self.version:
base = posixpath.join(base, self.version)
return base
def _generate_filename(self):
filename = self.artifact_id + "-" + self.classifier + "." + self.extension
if not self.classifier:
filename = self.artifact_id + "." + self.extension
return filename
def get_filename(self, filename=None):
if not filename:
filename = self._generate_filename()
elif os.path.isdir(filename):
filename = os.path.join(filename, self._generate_filename())
return filename
def __str__(self):
result = "%s:%s:%s" % (self.group_id, self.artifact_id, self.version)
if self.classifier:
result = "%s:%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.classifier, self.version)
elif self.extension != "jar":
result = "%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.version)
return result
@staticmethod
def parse(input):
parts = input.split(":")
if len(parts) >= 3:
g = parts[0]
a = parts[1]
v = parts[len(parts) - 1]
t = None
c = None
if len(parts) == 4:
t = parts[2]
if len(parts) == 5:
t = parts[2]
c = parts[3]
return Artifact(g, a, v, c, t)
else:
return None
class MavenDownloader:
def __init__(self, module, base="http://repo1.maven.org/maven2", local=False):
self.module = module
if base.endswith("/"):
base = base.rstrip("/")
self.base = base
self.local = local
self.user_agent = "Maven Artifact Downloader/1.0"
self.latest_version_found = None
self.metadata_file_name = "maven-metadata-local.xml" if local else "maven-metadata.xml"
def find_latest_version_available(self, artifact):
if self.latest_version_found:
return self.latest_version_found
path = "/%s/%s" % (artifact.path(False), self.metadata_file_name)
content = self._getContent(self.base + path, "Failed to retrieve the maven metadata file: " + path)
xml = etree.fromstring(content)
v = xml.xpath("/metadata/versioning/versions/version[last()]/text()")
if v:
self.latest_version_found = v[0]
return v[0]
def find_uri_for_artifact(self, artifact):
if artifact.version == "latest":
artifact.version = self.find_latest_version_available(artifact)
if artifact.is_snapshot():
if self.local:
return self._uri_for_artifact(artifact, artifact.version)
path = "/%s/%s" % (artifact.path(), self.metadata_file_name)
content = self._getContent(self.base + path, "Failed to retrieve the maven metadata file: " + path)
xml = etree.fromstring(content)
for snapshotArtifact in xml.xpath("/metadata/versioning/snapshotVersions/snapshotVersion"):
classifier = snapshotArtifact.xpath("classifier/text()")
artifact_classifier = classifier[0] if classifier else ''
extension = snapshotArtifact.xpath("extension/text()")
artifact_extension = extension[0] if extension else ''
if artifact_classifier == artifact.classifier and artifact_extension == artifact.extension:
return self._uri_for_artifact(artifact, snapshotArtifact.xpath("value/text()")[0])
timestamp_xmlpath = xml.xpath("/metadata/versioning/snapshot/timestamp/text()")
if timestamp_xmlpath:
timestamp = timestamp_xmlpath[0]
build_number = xml.xpath("/metadata/versioning/snapshot/buildNumber/text()")[0]
return self._uri_for_artifact(artifact, artifact.version.replace("SNAPSHOT", timestamp + "-" + build_number))
return self._uri_for_artifact(artifact, artifact.version)
def _uri_for_artifact(self, artifact, version=None):
if artifact.is_snapshot() and not version:
raise ValueError("Expected uniqueversion for snapshot artifact " + str(artifact))
elif not artifact.is_snapshot():
version = artifact.version
if artifact.classifier:
return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "-" + artifact.classifier + "." + artifact.extension)
return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "." + artifact.extension)
# for small files, directly get the full content
def _getContent(self, url, failmsg, force=True):
if self.local:
parsed_url = urlparse(url)
if os.path.isfile(parsed_url.path):
with io.open(parsed_url.path, 'rb') as f:
return f.read()
if force:
raise ValueError(failmsg + " because can not find file: " + url)
return None
response = self._request(url, failmsg, force)
if response:
return response.read()
return None
# only for HTTP request
def _request(self, url, failmsg, force=True):
url_to_use = url
parsed_url = urlparse(url)
if parsed_url.scheme == 's3':
parsed_url = urlparse(url)
bucket_name = parsed_url.netloc
key_name = parsed_url.path[1:]
client = boto3.client('s3', aws_access_key_id=self.module.params.get('username', ''), aws_secret_access_key=self.module.params.get('password', ''))
url_to_use = client.generate_presigned_url('get_object', Params={'Bucket': bucket_name, 'Key': key_name}, ExpiresIn=10)
req_timeout = self.module.params.get('timeout')
# Hack to add parameters in the way that fetch_url expects
self.module.params['url_username'] = self.module.params.get('username', '')
self.module.params['url_password'] = self.module.params.get('password', '')
self.module.params['http_agent'] = self.module.params.get('user_agent', None)
response, info = fetch_url(self.module, url_to_use, timeout=req_timeout)
if info['status'] == 200:
return response
if force:
raise ValueError(failmsg + " because of " + info['msg'] + "for URL " + url_to_use)
return None
def download(self, tmpdir, artifact, verify_download, filename=None):
if not artifact.version or artifact.version == "latest":
artifact = Artifact(artifact.group_id, artifact.artifact_id, self.find_latest_version_available(artifact),
artifact.classifier, artifact.extension)
url = self.find_uri_for_artifact(artifact)
tempfd, tempname = tempfile.mkstemp(dir=tmpdir)
try:
# copy to temp file
if self.local:
parsed_url = urlparse(url)
if os.path.isfile(parsed_url.path):
shutil.copy2(parsed_url.path, tempname)
else:
return "Can not find local file: " + parsed_url.path
else:
response = self._request(url, "Failed to download artifact " + str(artifact))
with os.fdopen(tempfd, 'wb') as f:
shutil.copyfileobj(response, f)
if verify_download:
invalid_md5 = self.is_invalid_md5(tempname, url)
if invalid_md5:
# if verify_change was set, the previous file would be deleted
os.remove(tempname)
return invalid_md5
except Exception as e:
os.remove(tempname)
raise e
# all good, now copy temp file to target
shutil.move(tempname, artifact.get_filename(filename))
return None
def is_invalid_md5(self, file, remote_url):
if os.path.exists(file):
local_md5 = self._local_md5(file)
if self.local:
parsed_url = urlparse(remote_url)
remote_md5 = self._local_md5(parsed_url.path)
else:
try:
remote_md5 = to_text(self._getContent(remote_url + '.md5', "Failed to retrieve MD5", False), errors='strict')
except UnicodeError as e:
return "Cannot retrieve a valid md5 from %s: %s" % (remote_url, to_native(e))
if(not remote_md5):
return "Cannot find md5 from " + remote_url
try:
# Check if remote md5 only contains md5 or md5 + filename
_remote_md5 = remote_md5.split(None)[0]
remote_md5 = _remote_md5
# remote_md5 is empty so we continue and keep original md5 string
# This should not happen since we check for remote_md5 before
except IndexError as e:
pass
if local_md5 == remote_md5:
return None
else:
return "Checksum does not match: we computed " + local_md5 + "but the repository states " + remote_md5
return "Path does not exist: " + file
def _local_md5(self, file):
md5 = hashlib.md5()
with io.open(file, 'rb') as f:
for chunk in iter(lambda: f.read(8192), b''):
md5.update(chunk)
return md5.hexdigest()
def main():
module = AnsibleModule(
argument_spec=dict(
group_id=dict(required=True),
artifact_id=dict(required=True),
version=dict(default="latest"),
classifier=dict(default=''),
extension=dict(default='jar'),
repository_url=dict(default=None),
username=dict(default=None, aliases=['aws_secret_key']),
password=dict(default=None, no_log=True, aliases=['aws_secret_access_key']),
state=dict(default="present", choices=["present", "absent"]), # TODO - Implement a "latest" state
timeout=dict(default=10, type='int'),
dest=dict(type="path", required=True),
validate_certs=dict(required=False, default=True, type='bool'),
keep_name=dict(required=False, default=False, type='bool'),
verify_checksum=dict(required=False, default='download', choices=['never', 'download', 'change', 'always'])
),
add_file_common_args=True
)
if not HAS_LXML_ETREE:
module.fail_json(msg='module requires the lxml python library installed on the managed machine')
repository_url = module.params["repository_url"]
if not repository_url:
repository_url = "http://repo1.maven.org/maven2"
try:
parsed_url = urlparse(repository_url)
except AttributeError as e:
module.fail_json(msg='url parsing went wrong %s' % e)
local = parsed_url.scheme == "file"
if parsed_url.scheme == 's3' and not HAS_BOTO:
module.fail_json(msg='boto3 required for this module, when using s3:// repository URLs')
group_id = module.params["group_id"]
artifact_id = module.params["artifact_id"]
version = module.params["version"]
classifier = module.params["classifier"]
extension = module.params["extension"]
state = module.params["state"]
dest = module.params["dest"]
b_dest = to_bytes(dest, errors='surrogate_or_strict')
keep_name = module.params["keep_name"]
verify_checksum = module.params["verify_checksum"]
verify_download = verify_checksum in ['download', 'always']
verify_change = verify_checksum in ['change', 'always']
downloader = MavenDownloader(module, repository_url, local)
try:
artifact = Artifact(group_id, artifact_id, version, classifier, extension)
except ValueError as e:
module.fail_json(msg=e.args[0])
changed = False
prev_state = "absent"
if dest.endswith(os.sep):
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if not os.path.exists(b_dest):
(pre_existing_dir, new_directory_list) = split_pre_existing_dir(dest)
os.makedirs(b_dest)
directory_args = module.load_file_common_arguments(module.params)
directory_mode = module.params["directory_mode"]
if directory_mode is not None:
directory_args['mode'] = directory_mode
else:
directory_args['mode'] = None
changed = adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed)
if os.path.isdir(b_dest):
version_part = version
if keep_name and version == 'latest':
version_part = downloader.find_latest_version_available(artifact)
if classifier:
dest = posixpath.join(dest, "%s-%s-%s.%s" % (artifact_id, version_part, classifier, extension))
else:
dest = posixpath.join(dest, "%s-%s.%s" % (artifact_id, version_part, extension))
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if os.path.lexists(b_dest) and ((not verify_change) or not downloader.is_invalid_md5(dest, downloader.find_uri_for_artifact(artifact))):
prev_state = "present"
if prev_state == "absent":
try:
download_error = downloader.download(module.tmpdir, artifact, verify_download, b_dest)
if download_error is None:
changed = True
else:
module.fail_json(msg="Cannot retrieve the artifact to destination: " + download_error)
except ValueError as e:
module.fail_json(msg=e.args[0])
module.params['dest'] = dest
file_args = module.load_file_common_arguments(module.params)
changed = module.set_fs_attributes_if_different(file_args, changed)
if changed:
module.exit_json(state=state, dest=dest, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier,
extension=extension, repository_url=repository_url, changed=changed)
else:
module.exit_json(state=state, dest=dest, changed=changed)
if __name__ == '__main__':
main()
| gpl-3.0 |
nikhilprathapani/python-for-android | python-modules/twisted/twisted/web/test/test_http.py | 52 | 52636 | # Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test HTTP support.
"""
from urlparse import urlparse, urlunsplit, clear_cache
import random, urllib, cgi
from twisted.python.compat import set
from twisted.python.failure import Failure
from twisted.trial import unittest
from twisted.trial.unittest import TestCase
from twisted.web import http, http_headers
from twisted.web.http import PotentialDataLoss, _DataLoss
from twisted.web.http import _IdentityTransferDecoder
from twisted.protocols import loopback
from twisted.internet.task import Clock
from twisted.internet.error import ConnectionLost
from twisted.test.proto_helpers import StringTransport
from twisted.test.test_internet import DummyProducer
from twisted.web.test.test_web import DummyChannel
class DateTimeTest(unittest.TestCase):
"""Test date parsing functions."""
def testRoundtrip(self):
for i in range(10000):
time = random.randint(0, 2000000000)
timestr = http.datetimeToString(time)
time2 = http.stringToDatetime(timestr)
self.assertEquals(time, time2)
class DummyHTTPHandler(http.Request):
def process(self):
self.content.seek(0, 0)
data = self.content.read()
length = self.getHeader('content-length')
request = "'''\n"+str(length)+"\n"+data+"'''\n"
self.setResponseCode(200)
self.setHeader("Request", self.uri)
self.setHeader("Command", self.method)
self.setHeader("Version", self.clientproto)
self.setHeader("Content-Length", len(request))
self.write(request)
self.finish()
class LoopbackHTTPClient(http.HTTPClient):
def connectionMade(self):
self.sendCommand("GET", "/foo/bar")
self.sendHeader("Content-Length", 10)
self.endHeaders()
self.transport.write("0123456789")
class ResponseTestMixin(object):
"""
A mixin that provides a simple means of comparing an actual response string
to an expected response string by performing the minimal parsing.
"""
def assertResponseEquals(self, responses, expected):
"""
Assert that the C{responses} matches the C{expected} responses.
@type responses: C{str}
@param responses: The bytes sent in response to one or more requests.
@type expected: C{list} of C{tuple} of C{str}
@param expected: The expected values for the responses. Each tuple
element of the list represents one response. Each string element
of the tuple is a full header line without delimiter, except for
the last element which gives the full response body.
"""
for response in expected:
expectedHeaders, expectedContent = response[:-1], response[-1]
headers, rest = responses.split('\r\n\r\n', 1)
headers = headers.splitlines()
self.assertEqual(set(headers), set(expectedHeaders))
content = rest[:len(expectedContent)]
responses = rest[len(expectedContent):]
self.assertEqual(content, expectedContent)
class HTTP1_0TestCase(unittest.TestCase, ResponseTestMixin):
requests = (
"GET / HTTP/1.0\r\n"
"\r\n"
"GET / HTTP/1.1\r\n"
"Accept: text/html\r\n"
"\r\n")
expected_response = [
("HTTP/1.0 200 OK",
"Request: /",
"Command: GET",
"Version: HTTP/1.0",
"Content-Length: 13",
"'''\nNone\n'''\n")]
def test_buffer(self):
"""
Send requests over a channel and check responses match what is expected.
"""
b = StringTransport()
a = http.HTTPChannel()
a.requestFactory = DummyHTTPHandler
a.makeConnection(b)
# one byte at a time, to stress it.
for byte in self.requests:
a.dataReceived(byte)
a.connectionLost(IOError("all one"))
value = b.value()
self.assertResponseEquals(value, self.expected_response)
def test_requestBodyTimeout(self):
"""
L{HTTPChannel} resets its timeout whenever data from a request body is
delivered to it.
"""
clock = Clock()
transport = StringTransport()
protocol = http.HTTPChannel()
protocol.timeOut = 100
protocol.callLater = clock.callLater
protocol.makeConnection(transport)
protocol.dataReceived('POST / HTTP/1.0\r\nContent-Length: 2\r\n\r\n')
clock.advance(99)
self.assertFalse(transport.disconnecting)
protocol.dataReceived('x')
clock.advance(99)
self.assertFalse(transport.disconnecting)
protocol.dataReceived('x')
self.assertEqual(len(protocol.requests), 1)
class HTTP1_1TestCase(HTTP1_0TestCase):
requests = (
"GET / HTTP/1.1\r\n"
"Accept: text/html\r\n"
"\r\n"
"POST / HTTP/1.1\r\n"
"Content-Length: 10\r\n"
"\r\n"
"0123456789POST / HTTP/1.1\r\n"
"Content-Length: 10\r\n"
"\r\n"
"0123456789HEAD / HTTP/1.1\r\n"
"\r\n")
expected_response = [
("HTTP/1.1 200 OK",
"Request: /",
"Command: GET",
"Version: HTTP/1.1",
"Content-Length: 13",
"'''\nNone\n'''\n"),
("HTTP/1.1 200 OK",
"Request: /",
"Command: POST",
"Version: HTTP/1.1",
"Content-Length: 21",
"'''\n10\n0123456789'''\n"),
("HTTP/1.1 200 OK",
"Request: /",
"Command: POST",
"Version: HTTP/1.1",
"Content-Length: 21",
"'''\n10\n0123456789'''\n"),
("HTTP/1.1 200 OK",
"Request: /",
"Command: HEAD",
"Version: HTTP/1.1",
"Content-Length: 13",
"")]
class HTTP1_1_close_TestCase(HTTP1_0TestCase):
requests = (
"GET / HTTP/1.1\r\n"
"Accept: text/html\r\n"
"Connection: close\r\n"
"\r\n"
"GET / HTTP/1.0\r\n"
"\r\n")
expected_response = [
("HTTP/1.1 200 OK",
"Connection: close",
"Request: /",
"Command: GET",
"Version: HTTP/1.1",
"Content-Length: 13",
"'''\nNone\n'''\n")]
class HTTP0_9TestCase(HTTP1_0TestCase):
requests = (
"GET /\r\n")
expected_response = "HTTP/1.1 400 Bad Request\r\n\r\n"
def assertResponseEquals(self, response, expectedResponse):
self.assertEquals(response, expectedResponse)
class HTTPLoopbackTestCase(unittest.TestCase):
expectedHeaders = {'request' : '/foo/bar',
'command' : 'GET',
'version' : 'HTTP/1.0',
'content-length' : '21'}
numHeaders = 0
gotStatus = 0
gotResponse = 0
gotEndHeaders = 0
def _handleStatus(self, version, status, message):
self.gotStatus = 1
self.assertEquals(version, "HTTP/1.0")
self.assertEquals(status, "200")
def _handleResponse(self, data):
self.gotResponse = 1
self.assertEquals(data, "'''\n10\n0123456789'''\n")
def _handleHeader(self, key, value):
self.numHeaders = self.numHeaders + 1
self.assertEquals(self.expectedHeaders[key.lower()], value)
def _handleEndHeaders(self):
self.gotEndHeaders = 1
self.assertEquals(self.numHeaders, 4)
def testLoopback(self):
server = http.HTTPChannel()
server.requestFactory = DummyHTTPHandler
client = LoopbackHTTPClient()
client.handleResponse = self._handleResponse
client.handleHeader = self._handleHeader
client.handleEndHeaders = self._handleEndHeaders
client.handleStatus = self._handleStatus
d = loopback.loopbackAsync(server, client)
d.addCallback(self._cbTestLoopback)
return d
def _cbTestLoopback(self, ignored):
if not (self.gotStatus and self.gotResponse and self.gotEndHeaders):
raise RuntimeError(
"didn't got all callbacks %s"
% [self.gotStatus, self.gotResponse, self.gotEndHeaders])
del self.gotEndHeaders
del self.gotResponse
del self.gotStatus
del self.numHeaders
def _prequest(**headers):
"""
Make a request with the given request headers for the persistence tests.
"""
request = http.Request(DummyChannel(), None)
for k, v in headers.iteritems():
request.requestHeaders.setRawHeaders(k, v)
return request
class PersistenceTestCase(unittest.TestCase):
"""
Tests for persistent HTTP connections.
"""
ptests = [#(PRequest(connection="Keep-Alive"), "HTTP/1.0", 1, {'connection' : 'Keep-Alive'}),
(_prequest(), "HTTP/1.0", 0, {'connection': None}),
(_prequest(connection=["close"]), "HTTP/1.1", 0, {'connection' : ['close']}),
(_prequest(), "HTTP/1.1", 1, {'connection': None}),
(_prequest(), "HTTP/0.9", 0, {'connection': None}),
]
def testAlgorithm(self):
c = http.HTTPChannel()
for req, version, correctResult, resultHeaders in self.ptests:
result = c.checkPersistence(req, version)
self.assertEquals(result, correctResult)
for header in resultHeaders.keys():
self.assertEquals(req.responseHeaders.getRawHeaders(header, None), resultHeaders[header])
class IdentityTransferEncodingTests(TestCase):
"""
Tests for L{_IdentityTransferDecoder}.
"""
def setUp(self):
"""
Create an L{_IdentityTransferDecoder} with callbacks hooked up so that
calls to them can be inspected.
"""
self.data = []
self.finish = []
self.contentLength = 10
self.decoder = _IdentityTransferDecoder(
self.contentLength, self.data.append, self.finish.append)
def test_exactAmountReceived(self):
"""
If L{_IdentityTransferDecoder.dataReceived} is called with a string
with length equal to the content length passed to
L{_IdentityTransferDecoder}'s initializer, the data callback is invoked
with that string and the finish callback is invoked with a zero-length
string.
"""
self.decoder.dataReceived('x' * self.contentLength)
self.assertEqual(self.data, ['x' * self.contentLength])
self.assertEqual(self.finish, [''])
def test_shortStrings(self):
"""
If L{_IdentityTransferDecoder.dataReceived} is called multiple times
with strings which, when concatenated, are as long as the content
length provided, the data callback is invoked with each string and the
finish callback is invoked only after the second call.
"""
self.decoder.dataReceived('x')
self.assertEqual(self.data, ['x'])
self.assertEqual(self.finish, [])
self.decoder.dataReceived('y' * (self.contentLength - 1))
self.assertEqual(self.data, ['x', 'y' * (self.contentLength - 1)])
self.assertEqual(self.finish, [''])
def test_longString(self):
"""
If L{_IdentityTransferDecoder.dataReceived} is called with a string
with length greater than the provided content length, only the prefix
of that string up to the content length is passed to the data callback
and the remainder is passed to the finish callback.
"""
self.decoder.dataReceived('x' * self.contentLength + 'y')
self.assertEqual(self.data, ['x' * self.contentLength])
self.assertEqual(self.finish, ['y'])
def test_rejectDataAfterFinished(self):
"""
If data is passed to L{_IdentityTransferDecoder.dataReceived} after the
finish callback has been invoked, L{RuntimeError} is raised.
"""
failures = []
def finish(bytes):
try:
decoder.dataReceived('foo')
except:
failures.append(Failure())
decoder = _IdentityTransferDecoder(5, self.data.append, finish)
decoder.dataReceived('x' * 4)
self.assertEqual(failures, [])
decoder.dataReceived('y')
failures[0].trap(RuntimeError)
self.assertEqual(
str(failures[0].value),
"_IdentityTransferDecoder cannot decode data after finishing")
def test_unknownContentLength(self):
"""
If L{_IdentityTransferDecoder} is constructed with C{None} for the
content length, it passes all data delivered to it through to the data
callback.
"""
data = []
finish = []
decoder = _IdentityTransferDecoder(None, data.append, finish.append)
decoder.dataReceived('x')
self.assertEqual(data, ['x'])
decoder.dataReceived('y')
self.assertEqual(data, ['x', 'y'])
self.assertEqual(finish, [])
def _verifyCallbacksUnreferenced(self, decoder):
"""
Check the decoder's data and finish callbacks and make sure they are
None in order to help avoid references cycles.
"""
self.assertIdentical(decoder.dataCallback, None)
self.assertIdentical(decoder.finishCallback, None)
def test_earlyConnectionLose(self):
"""
L{_IdentityTransferDecoder.noMoreData} raises L{_DataLoss} if it is
called and the content length is known but not enough bytes have been
delivered.
"""
self.decoder.dataReceived('x' * (self.contentLength - 1))
self.assertRaises(_DataLoss, self.decoder.noMoreData)
self._verifyCallbacksUnreferenced(self.decoder)
def test_unknownContentLengthConnectionLose(self):
"""
L{_IdentityTransferDecoder.noMoreData} calls the finish callback and
raises L{PotentialDataLoss} if it is called and the content length is
unknown.
"""
body = []
finished = []
decoder = _IdentityTransferDecoder(None, body.append, finished.append)
self.assertRaises(PotentialDataLoss, decoder.noMoreData)
self.assertEqual(body, [])
self.assertEqual(finished, [''])
self._verifyCallbacksUnreferenced(decoder)
def test_finishedConnectionLose(self):
"""
L{_IdentityTransferDecoder.noMoreData} does not raise any exception if
it is called when the content length is known and that many bytes have
been delivered.
"""
self.decoder.dataReceived('x' * self.contentLength)
self.decoder.noMoreData()
self._verifyCallbacksUnreferenced(self.decoder)
class ChunkedTransferEncodingTests(unittest.TestCase):
"""
Tests for L{_ChunkedTransferDecoder}, which turns a byte stream encoded
using HTTP I{chunked} C{Transfer-Encoding} back into the original byte
stream.
"""
def test_decoding(self):
"""
L{_ChunkedTransferDecoder.dataReceived} decodes chunked-encoded data
and passes the result to the specified callback.
"""
L = []
p = http._ChunkedTransferDecoder(L.append, None)
p.dataReceived('3\r\nabc\r\n5\r\n12345\r\n')
p.dataReceived('a\r\n0123456789\r\n')
self.assertEqual(L, ['abc', '12345', '0123456789'])
def test_short(self):
"""
L{_ChunkedTransferDecoder.dataReceived} decodes chunks broken up and
delivered in multiple calls.
"""
L = []
finished = []
p = http._ChunkedTransferDecoder(L.append, finished.append)
for s in '3\r\nabc\r\n5\r\n12345\r\n0\r\n\r\n':
p.dataReceived(s)
self.assertEqual(L, ['a', 'b', 'c', '1', '2', '3', '4', '5'])
self.assertEqual(finished, [''])
def test_newlines(self):
"""
L{_ChunkedTransferDecoder.dataReceived} doesn't treat CR LF pairs
embedded in chunk bodies specially.
"""
L = []
p = http._ChunkedTransferDecoder(L.append, None)
p.dataReceived('2\r\n\r\n\r\n')
self.assertEqual(L, ['\r\n'])
def test_extensions(self):
"""
L{_ChunkedTransferDecoder.dataReceived} disregards chunk-extension
fields.
"""
L = []
p = http._ChunkedTransferDecoder(L.append, None)
p.dataReceived('3; x-foo=bar\r\nabc\r\n')
self.assertEqual(L, ['abc'])
def test_finish(self):
"""
L{_ChunkedTransferDecoder.dataReceived} interprets a zero-length
chunk as the end of the chunked data stream and calls the completion
callback.
"""
finished = []
p = http._ChunkedTransferDecoder(None, finished.append)
p.dataReceived('0\r\n\r\n')
self.assertEqual(finished, [''])
def test_extra(self):
"""
L{_ChunkedTransferDecoder.dataReceived} passes any bytes which come
after the terminating zero-length chunk to the completion callback.
"""
finished = []
p = http._ChunkedTransferDecoder(None, finished.append)
p.dataReceived('0\r\n\r\nhello')
self.assertEqual(finished, ['hello'])
def test_afterFinished(self):
"""
L{_ChunkedTransferDecoder.dataReceived} raises L{RuntimeError} if it
is called after it has seen the last chunk.
"""
p = http._ChunkedTransferDecoder(None, lambda bytes: None)
p.dataReceived('0\r\n\r\n')
self.assertRaises(RuntimeError, p.dataReceived, 'hello')
def test_earlyConnectionLose(self):
"""
L{_ChunkedTransferDecoder.noMoreData} raises L{_DataLoss} if it is
called and the end of the last trailer has not yet been received.
"""
parser = http._ChunkedTransferDecoder(None, lambda bytes: None)
parser.dataReceived('0\r\n\r')
exc = self.assertRaises(_DataLoss, parser.noMoreData)
self.assertEqual(
str(exc),
"Chunked decoder in 'trailer' state, still expecting more data "
"to get to finished state.")
def test_finishedConnectionLose(self):
"""
L{_ChunkedTransferDecoder.noMoreData} does not raise any exception if
it is called after the terminal zero length chunk is received.
"""
parser = http._ChunkedTransferDecoder(None, lambda bytes: None)
parser.dataReceived('0\r\n\r\n')
parser.noMoreData()
def test_reentrantFinishedNoMoreData(self):
"""
L{_ChunkedTransferDecoder.noMoreData} can be called from the finished
callback without raising an exception.
"""
errors = []
successes = []
def finished(extra):
try:
parser.noMoreData()
except:
errors.append(Failure())
else:
successes.append(True)
parser = http._ChunkedTransferDecoder(None, finished)
parser.dataReceived('0\r\n\r\n')
self.assertEqual(errors, [])
self.assertEqual(successes, [True])
class ChunkingTestCase(unittest.TestCase):
strings = ["abcv", "", "fdfsd423", "Ffasfas\r\n",
"523523\n\rfsdf", "4234"]
def testChunks(self):
for s in self.strings:
self.assertEquals((s, ''), http.fromChunk(''.join(http.toChunk(s))))
self.assertRaises(ValueError, http.fromChunk, '-5\r\nmalformed!\r\n')
def testConcatenatedChunks(self):
chunked = ''.join([''.join(http.toChunk(t)) for t in self.strings])
result = []
buffer = ""
for c in chunked:
buffer = buffer + c
try:
data, buffer = http.fromChunk(buffer)
result.append(data)
except ValueError:
pass
self.assertEquals(result, self.strings)
class ParsingTestCase(unittest.TestCase):
"""
Tests for protocol parsing in L{HTTPChannel}.
"""
def runRequest(self, httpRequest, requestClass, success=1):
httpRequest = httpRequest.replace("\n", "\r\n")
b = StringTransport()
a = http.HTTPChannel()
a.requestFactory = requestClass
a.makeConnection(b)
# one byte at a time, to stress it.
for byte in httpRequest:
if a.transport.disconnecting:
break
a.dataReceived(byte)
a.connectionLost(IOError("all done"))
if success:
self.assertEquals(self.didRequest, 1)
del self.didRequest
else:
self.assert_(not hasattr(self, "didRequest"))
return a
def test_basicAuth(self):
"""
L{HTTPChannel} provides username and password information supplied in
an I{Authorization} header to the L{Request} which makes it available
via its C{getUser} and C{getPassword} methods.
"""
testcase = self
class Request(http.Request):
l = []
def process(self):
testcase.assertEquals(self.getUser(), self.l[0])
testcase.assertEquals(self.getPassword(), self.l[1])
for u, p in [("foo", "bar"), ("hello", "there:z")]:
Request.l[:] = [u, p]
s = "%s:%s" % (u, p)
f = "GET / HTTP/1.0\nAuthorization: Basic %s\n\n" % (s.encode("base64").strip(), )
self.runRequest(f, Request, 0)
def test_headers(self):
"""
Headers received by L{HTTPChannel} in a request are made available to
the L{Request}.
"""
processed = []
class MyRequest(http.Request):
def process(self):
processed.append(self)
self.finish()
requestLines = [
"GET / HTTP/1.0",
"Foo: bar",
"baz: Quux",
"baz: quux",
"",
""]
self.runRequest('\n'.join(requestLines), MyRequest, 0)
[request] = processed
self.assertEquals(
request.requestHeaders.getRawHeaders('foo'), ['bar'])
self.assertEquals(
request.requestHeaders.getRawHeaders('bAz'), ['Quux', 'quux'])
def test_tooManyHeaders(self):
"""
L{HTTPChannel} enforces a limit of C{HTTPChannel.maxHeaders} on the
number of headers received per request.
"""
processed = []
class MyRequest(http.Request):
def process(self):
processed.append(self)
requestLines = ["GET / HTTP/1.0"]
for i in range(http.HTTPChannel.maxHeaders + 2):
requestLines.append("%s: foo" % (i,))
requestLines.extend(["", ""])
channel = self.runRequest("\n".join(requestLines), MyRequest, 0)
self.assertEqual(processed, [])
self.assertEqual(
channel.transport.value(),
"HTTP/1.1 400 Bad Request\r\n\r\n")
def test_headerLimitPerRequest(self):
"""
L{HTTPChannel} enforces the limit of C{HTTPChannel.maxHeaders} per
request so that headers received in an earlier request do not count
towards the limit when processing a later request.
"""
processed = []
class MyRequest(http.Request):
def process(self):
processed.append(self)
self.finish()
self.patch(http.HTTPChannel, 'maxHeaders', 1)
requestLines = [
"GET / HTTP/1.1",
"Foo: bar",
"",
"",
"GET / HTTP/1.1",
"Bar: baz",
"",
""]
channel = self.runRequest("\n".join(requestLines), MyRequest, 0)
[first, second] = processed
self.assertEqual(first.getHeader('foo'), 'bar')
self.assertEqual(second.getHeader('bar'), 'baz')
self.assertEqual(
channel.transport.value(),
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n'
'\r\n'
'0\r\n'
'\r\n'
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n'
'\r\n'
'0\r\n'
'\r\n')
def testCookies(self):
"""
Test cookies parsing and reading.
"""
httpRequest = '''\
GET / HTTP/1.0
Cookie: rabbit="eat carrot"; ninja=secret; spam="hey 1=1!"
'''
testcase = self
class MyRequest(http.Request):
def process(self):
testcase.assertEquals(self.getCookie('rabbit'), '"eat carrot"')
testcase.assertEquals(self.getCookie('ninja'), 'secret')
testcase.assertEquals(self.getCookie('spam'), '"hey 1=1!"')
testcase.didRequest = 1
self.finish()
self.runRequest(httpRequest, MyRequest)
def testGET(self):
httpRequest = '''\
GET /?key=value&multiple=two+words&multiple=more%20words&empty= HTTP/1.0
'''
testcase = self
class MyRequest(http.Request):
def process(self):
testcase.assertEquals(self.method, "GET")
testcase.assertEquals(self.args["key"], ["value"])
testcase.assertEquals(self.args["empty"], [""])
testcase.assertEquals(self.args["multiple"], ["two words", "more words"])
testcase.didRequest = 1
self.finish()
self.runRequest(httpRequest, MyRequest)
def test_extraQuestionMark(self):
"""
While only a single '?' is allowed in an URL, several other servers
allow several and pass all after the first through as part of the
query arguments. Test that we emulate this behavior.
"""
httpRequest = 'GET /foo?bar=?&baz=quux HTTP/1.0\n\n'
testcase = self
class MyRequest(http.Request):
def process(self):
testcase.assertEqual(self.method, 'GET')
testcase.assertEqual(self.path, '/foo')
testcase.assertEqual(self.args['bar'], ['?'])
testcase.assertEqual(self.args['baz'], ['quux'])
testcase.didRequest = 1
self.finish()
self.runRequest(httpRequest, MyRequest)
def test_formPOSTRequest(self):
"""
The request body of a I{POST} request with a I{Content-Type} header
of I{application/x-www-form-urlencoded} is parsed according to that
content type and made available in the C{args} attribute of the
request object. The original bytes of the request may still be read
from the C{content} attribute.
"""
query = 'key=value&multiple=two+words&multiple=more%20words&empty='
httpRequest = '''\
POST / HTTP/1.0
Content-Length: %d
Content-Type: application/x-www-form-urlencoded
%s''' % (len(query), query)
testcase = self
class MyRequest(http.Request):
def process(self):
testcase.assertEquals(self.method, "POST")
testcase.assertEquals(self.args["key"], ["value"])
testcase.assertEquals(self.args["empty"], [""])
testcase.assertEquals(self.args["multiple"], ["two words", "more words"])
# Reading from the content file-like must produce the entire
# request body.
testcase.assertEquals(self.content.read(), query)
testcase.didRequest = 1
self.finish()
self.runRequest(httpRequest, MyRequest)
def testMissingContentDisposition(self):
req = '''\
POST / HTTP/1.0
Content-Type: multipart/form-data; boundary=AaB03x
Content-Length: 103
--AaB03x
Content-Type: text/plain
Content-Transfer-Encoding: quoted-printable
abasdfg
--AaB03x--
'''
self.runRequest(req, http.Request, success=False)
def test_chunkedEncoding(self):
"""
If a request uses the I{chunked} transfer encoding, the request body is
decoded accordingly before it is made available on the request.
"""
httpRequest = '''\
GET / HTTP/1.0
Content-Type: text/plain
Transfer-Encoding: chunked
6
Hello,
14
spam,eggs spam spam
0
'''
testcase = self
class MyRequest(http.Request):
def process(self):
# The tempfile API used to create content returns an
# instance of a different type depending on what platform
# we're running on. The point here is to verify that the
# request body is in a file that's on the filesystem.
# Having a fileno method that returns an int is a somewhat
# close approximation of this. -exarkun
testcase.assertIsInstance(self.content.fileno(), int)
testcase.assertEqual(self.method, 'GET')
testcase.assertEqual(self.path, '/')
content = self.content.read()
testcase.assertEqual(content, 'Hello, spam,eggs spam spam')
testcase.assertIdentical(self.channel._transferDecoder, None)
testcase.didRequest = 1
self.finish()
self.runRequest(httpRequest, MyRequest)
class QueryArgumentsTestCase(unittest.TestCase):
def testUnquote(self):
try:
from twisted.protocols import _c_urlarg
except ImportError:
raise unittest.SkipTest("_c_urlarg module is not available")
# work exactly like urllib.unquote, including stupid things
# % followed by a non-hexdigit in the middle and in the end
self.failUnlessEqual(urllib.unquote("%notreally%n"),
_c_urlarg.unquote("%notreally%n"))
# % followed by hexdigit, followed by non-hexdigit
self.failUnlessEqual(urllib.unquote("%1quite%1"),
_c_urlarg.unquote("%1quite%1"))
# unquoted text, followed by some quoted chars, ends in a trailing %
self.failUnlessEqual(urllib.unquote("blah%21%40%23blah%"),
_c_urlarg.unquote("blah%21%40%23blah%"))
# Empty string
self.failUnlessEqual(urllib.unquote(""), _c_urlarg.unquote(""))
def testParseqs(self):
self.failUnlessEqual(cgi.parse_qs("a=b&d=c;+=f"),
http.parse_qs("a=b&d=c;+=f"))
self.failUnlessRaises(ValueError, http.parse_qs, "blah",
strict_parsing = 1)
self.failUnlessEqual(cgi.parse_qs("a=&b=c", keep_blank_values = 1),
http.parse_qs("a=&b=c", keep_blank_values = 1))
self.failUnlessEqual(cgi.parse_qs("a=&b=c"),
http.parse_qs("a=&b=c"))
def test_urlparse(self):
"""
For a given URL, L{http.urlparse} should behave the same as
L{urlparse}, except it should always return C{str}, never C{unicode}.
"""
def urls():
for scheme in ('http', 'https'):
for host in ('example.com',):
for port in (None, 100):
for path in ('', 'path'):
if port is not None:
host = host + ':' + str(port)
yield urlunsplit((scheme, host, path, '', ''))
def assertSameParsing(url, decode):
"""
Verify that C{url} is parsed into the same objects by both
L{http.urlparse} and L{urlparse}.
"""
urlToStandardImplementation = url
if decode:
urlToStandardImplementation = url.decode('ascii')
standardResult = urlparse(urlToStandardImplementation)
scheme, netloc, path, params, query, fragment = http.urlparse(url)
self.assertEqual(
(scheme, netloc, path, params, query, fragment),
standardResult)
self.assertTrue(isinstance(scheme, str))
self.assertTrue(isinstance(netloc, str))
self.assertTrue(isinstance(path, str))
self.assertTrue(isinstance(params, str))
self.assertTrue(isinstance(query, str))
self.assertTrue(isinstance(fragment, str))
# With caching, unicode then str
clear_cache()
for url in urls():
assertSameParsing(url, True)
assertSameParsing(url, False)
# With caching, str then unicode
clear_cache()
for url in urls():
assertSameParsing(url, False)
assertSameParsing(url, True)
# Without caching
for url in urls():
clear_cache()
assertSameParsing(url, True)
clear_cache()
assertSameParsing(url, False)
def test_urlparseRejectsUnicode(self):
"""
L{http.urlparse} should reject unicode input early.
"""
self.assertRaises(TypeError, http.urlparse, u'http://example.org/path')
def testEscchar(self):
try:
from twisted.protocols import _c_urlarg
except ImportError:
raise unittest.SkipTest("_c_urlarg module is not available")
self.failUnlessEqual("!@#+b",
_c_urlarg.unquote("+21+40+23+b", "+"))
class ClientDriver(http.HTTPClient):
def handleStatus(self, version, status, message):
self.version = version
self.status = status
self.message = message
class ClientStatusParsing(unittest.TestCase):
def testBaseline(self):
c = ClientDriver()
c.lineReceived('HTTP/1.0 201 foo')
self.failUnlessEqual(c.version, 'HTTP/1.0')
self.failUnlessEqual(c.status, '201')
self.failUnlessEqual(c.message, 'foo')
def testNoMessage(self):
c = ClientDriver()
c.lineReceived('HTTP/1.0 201')
self.failUnlessEqual(c.version, 'HTTP/1.0')
self.failUnlessEqual(c.status, '201')
self.failUnlessEqual(c.message, '')
def testNoMessage_trailingSpace(self):
c = ClientDriver()
c.lineReceived('HTTP/1.0 201 ')
self.failUnlessEqual(c.version, 'HTTP/1.0')
self.failUnlessEqual(c.status, '201')
self.failUnlessEqual(c.message, '')
class RequestTests(unittest.TestCase, ResponseTestMixin):
"""
Tests for L{http.Request}
"""
def _compatHeadersTest(self, oldName, newName):
"""
Verify that each of two different attributes which are associated with
the same state properly reflect changes made through the other.
This is used to test that the C{headers}/C{responseHeaders} and
C{received_headers}/C{requestHeaders} pairs interact properly.
"""
req = http.Request(DummyChannel(), None)
getattr(req, newName).setRawHeaders("test", ["lemur"])
self.assertEqual(getattr(req, oldName)["test"], "lemur")
setattr(req, oldName, {"foo": "bar"})
self.assertEqual(
list(getattr(req, newName).getAllRawHeaders()),
[("Foo", ["bar"])])
setattr(req, newName, http_headers.Headers())
self.assertEqual(getattr(req, oldName), {})
def test_received_headers(self):
"""
L{Request.received_headers} is a backwards compatible API which
accesses and allows mutation of the state at L{Request.requestHeaders}.
"""
self._compatHeadersTest('received_headers', 'requestHeaders')
def test_headers(self):
"""
L{Request.headers} is a backwards compatible API which accesses and
allows mutation of the state at L{Request.responseHeaders}.
"""
self._compatHeadersTest('headers', 'responseHeaders')
def test_getHeader(self):
"""
L{http.Request.getHeader} returns the value of the named request
header.
"""
req = http.Request(DummyChannel(), None)
req.requestHeaders.setRawHeaders("test", ["lemur"])
self.assertEquals(req.getHeader("test"), "lemur")
def test_getHeaderReceivedMultiples(self):
"""
When there are multiple values for a single request header,
L{http.Request.getHeader} returns the last value.
"""
req = http.Request(DummyChannel(), None)
req.requestHeaders.setRawHeaders("test", ["lemur", "panda"])
self.assertEquals(req.getHeader("test"), "panda")
def test_getHeaderNotFound(self):
"""
L{http.Request.getHeader} returns C{None} when asked for the value of a
request header which is not present.
"""
req = http.Request(DummyChannel(), None)
self.assertEquals(req.getHeader("test"), None)
def test_getAllHeaders(self):
"""
L{http.Request.getAllheaders} returns a C{dict} mapping all request
header names to their corresponding values.
"""
req = http.Request(DummyChannel(), None)
req.requestHeaders.setRawHeaders("test", ["lemur"])
self.assertEquals(req.getAllHeaders(), {"test": "lemur"})
def test_getAllHeadersNoHeaders(self):
"""
L{http.Request.getAllHeaders} returns an empty C{dict} if there are no
request headers.
"""
req = http.Request(DummyChannel(), None)
self.assertEquals(req.getAllHeaders(), {})
def test_getAllHeadersMultipleHeaders(self):
"""
When there are multiple values for a single request header,
L{http.Request.getAllHeaders} returns only the last value.
"""
req = http.Request(DummyChannel(), None)
req.requestHeaders.setRawHeaders("test", ["lemur", "panda"])
self.assertEquals(req.getAllHeaders(), {"test": "panda"})
def test_setResponseCode(self):
"""
L{http.Request.setResponseCode} takes a status code and causes it to be
used as the response status.
"""
channel = DummyChannel()
req = http.Request(channel, None)
req.setResponseCode(201)
req.write('')
self.assertEqual(
channel.transport.written.getvalue().splitlines()[0],
'%s 201 Created' % (req.clientproto,))
def test_setResponseCodeAndMessage(self):
"""
L{http.Request.setResponseCode} takes a status code and a message and
causes them to be used as the response status.
"""
channel = DummyChannel()
req = http.Request(channel, None)
req.setResponseCode(202, "happily accepted")
req.write('')
self.assertEqual(
channel.transport.written.getvalue().splitlines()[0],
'%s 202 happily accepted' % (req.clientproto,))
def test_setResponseCodeAcceptsIntegers(self):
"""
L{http.Request.setResponseCode} accepts C{int} or C{long} for the code
parameter and raises L{TypeError} if passed anything else.
"""
req = http.Request(DummyChannel(), None)
req.setResponseCode(1)
req.setResponseCode(1L)
self.assertRaises(TypeError, req.setResponseCode, "1")
def test_setHost(self):
"""
L{http.Request.setHost} sets the value of the host request header.
"""
req = http.Request(DummyChannel(), None)
req.setHost("example.com", 443)
self.assertEqual(
req.requestHeaders.getRawHeaders("host"), ["example.com"])
def test_setHeader(self):
"""
L{http.Request.setHeader} sets the value of the given response header.
"""
req = http.Request(DummyChannel(), None)
req.setHeader("test", "lemur")
self.assertEquals(req.responseHeaders.getRawHeaders("test"), ["lemur"])
def test_firstWrite(self):
"""
For an HTTP 1.0 request, L{http.Request.write} sends an HTTP 1.0
Response-Line and whatever response headers are set.
"""
req = http.Request(DummyChannel(), None)
trans = StringTransport()
req.transport = trans
req.setResponseCode(200)
req.clientproto = "HTTP/1.0"
req.responseHeaders.setRawHeaders("test", ["lemur"])
req.write('Hello')
self.assertResponseEquals(
trans.value(),
[("HTTP/1.0 200 OK",
"Test: lemur",
"Hello")])
def test_firstWriteHTTP11Chunked(self):
"""
For an HTTP 1.1 request, L{http.Request.write} sends an HTTP 1.1
Response-Line, whatever response headers are set, and uses chunked
encoding for the response body.
"""
req = http.Request(DummyChannel(), None)
trans = StringTransport()
req.transport = trans
req.setResponseCode(200)
req.clientproto = "HTTP/1.1"
req.responseHeaders.setRawHeaders("test", ["lemur"])
req.write('Hello')
req.write('World!')
self.assertResponseEquals(
trans.value(),
[("HTTP/1.1 200 OK",
"Test: lemur",
"Transfer-Encoding: chunked",
"5\r\nHello\r\n6\r\nWorld!\r\n")])
def test_firstWriteLastModified(self):
"""
For an HTTP 1.0 request for a resource with a known last modified time,
L{http.Request.write} sends an HTTP Response-Line, whatever response
headers are set, and a last-modified header with that time.
"""
req = http.Request(DummyChannel(), None)
trans = StringTransport()
req.transport = trans
req.setResponseCode(200)
req.clientproto = "HTTP/1.0"
req.lastModified = 0
req.responseHeaders.setRawHeaders("test", ["lemur"])
req.write('Hello')
self.assertResponseEquals(
trans.value(),
[("HTTP/1.0 200 OK",
"Test: lemur",
"Last-Modified: Thu, 01 Jan 1970 00:00:00 GMT",
"Hello")])
def test_parseCookies(self):
"""
L{http.Request.parseCookies} extracts cookies from C{requestHeaders}
and adds them to C{received_cookies}.
"""
req = http.Request(DummyChannel(), None)
req.requestHeaders.setRawHeaders(
"cookie", ['test="lemur"; test2="panda"'])
req.parseCookies()
self.assertEquals(req.received_cookies, {"test": '"lemur"',
"test2": '"panda"'})
def test_parseCookiesMultipleHeaders(self):
"""
L{http.Request.parseCookies} can extract cookies from multiple Cookie
headers.
"""
req = http.Request(DummyChannel(), None)
req.requestHeaders.setRawHeaders(
"cookie", ['test="lemur"', 'test2="panda"'])
req.parseCookies()
self.assertEquals(req.received_cookies, {"test": '"lemur"',
"test2": '"panda"'})
def test_connectionLost(self):
"""
L{http.Request.connectionLost} closes L{Request.content} and drops the
reference to the L{HTTPChannel} to assist with garbage collection.
"""
req = http.Request(DummyChannel(), None)
# Cause Request.content to be created at all.
req.gotLength(10)
# Grab a reference to content in case the Request drops it later on.
content = req.content
# Put some bytes into it
req.handleContentChunk("hello")
# Then something goes wrong and content should get closed.
req.connectionLost(Failure(ConnectionLost("Finished")))
self.assertTrue(content.closed)
self.assertIdentical(req.channel, None)
def test_registerProducerTwiceFails(self):
"""
Calling L{Request.registerProducer} when a producer is already
registered raises ValueError.
"""
req = http.Request(DummyChannel(), None)
req.registerProducer(DummyProducer(), True)
self.assertRaises(
ValueError, req.registerProducer, DummyProducer(), True)
def test_registerProducerWhenQueuedPausesPushProducer(self):
"""
Calling L{Request.registerProducer} with an IPushProducer when the
request is queued pauses the producer.
"""
req = http.Request(DummyChannel(), True)
producer = DummyProducer()
req.registerProducer(producer, True)
self.assertEquals(['pause'], producer.events)
def test_registerProducerWhenQueuedDoesntPausePullProducer(self):
"""
Calling L{Request.registerProducer} with an IPullProducer when the
request is queued does not pause the producer, because it doesn't make
sense to pause a pull producer.
"""
req = http.Request(DummyChannel(), True)
producer = DummyProducer()
req.registerProducer(producer, False)
self.assertEquals([], producer.events)
def test_registerProducerWhenQueuedDoesntRegisterPushProducer(self):
"""
Calling L{Request.registerProducer} with an IPushProducer when the
request is queued does not register the producer on the request's
transport.
"""
self.assertIdentical(
None, getattr(http.StringTransport, 'registerProducer', None),
"StringTransport cannot implement registerProducer for this test "
"to be valid.")
req = http.Request(DummyChannel(), True)
producer = DummyProducer()
req.registerProducer(producer, True)
# This is a roundabout assertion: http.StringTransport doesn't
# implement registerProducer, so Request.registerProducer can't have
# tried to call registerProducer on the transport.
self.assertIsInstance(req.transport, http.StringTransport)
def test_registerProducerWhenQueuedDoesntRegisterPullProducer(self):
"""
Calling L{Request.registerProducer} with an IPullProducer when the
request is queued does not register the producer on the request's
transport.
"""
self.assertIdentical(
None, getattr(http.StringTransport, 'registerProducer', None),
"StringTransport cannot implement registerProducer for this test "
"to be valid.")
req = http.Request(DummyChannel(), True)
producer = DummyProducer()
req.registerProducer(producer, False)
# This is a roundabout assertion: http.StringTransport doesn't
# implement registerProducer, so Request.registerProducer can't have
# tried to call registerProducer on the transport.
self.assertIsInstance(req.transport, http.StringTransport)
def test_registerProducerWhenNotQueuedRegistersPushProducer(self):
"""
Calling L{Request.registerProducer} with an IPushProducer when the
request is not queued registers the producer as a push producer on the
request's transport.
"""
req = http.Request(DummyChannel(), False)
producer = DummyProducer()
req.registerProducer(producer, True)
self.assertEquals([(producer, True)], req.transport.producers)
def test_registerProducerWhenNotQueuedRegistersPullProducer(self):
"""
Calling L{Request.registerProducer} with an IPullProducer when the
request is not queued registers the producer as a pull producer on the
request's transport.
"""
req = http.Request(DummyChannel(), False)
producer = DummyProducer()
req.registerProducer(producer, False)
self.assertEquals([(producer, False)], req.transport.producers)
def test_connectionLostNotification(self):
"""
L{Request.connectionLost} triggers all finish notification Deferreds
and cleans up per-request state.
"""
d = DummyChannel()
request = http.Request(d, True)
finished = request.notifyFinish()
request.connectionLost(Failure(ConnectionLost("Connection done")))
self.assertIdentical(request.channel, None)
return self.assertFailure(finished, ConnectionLost)
def test_finishNotification(self):
"""
L{Request.finish} triggers all finish notification Deferreds.
"""
request = http.Request(DummyChannel(), False)
finished = request.notifyFinish()
# Force the request to have a non-None content attribute. This is
# probably a bug in Request.
request.gotLength(1)
request.finish()
return finished
def test_finishAfterConnectionLost(self):
"""
Calling L{Request.finish} after L{Request.connectionLost} has been
called results in a L{RuntimeError} being raised.
"""
channel = DummyChannel()
transport = channel.transport
req = http.Request(channel, False)
req.connectionLost(Failure(ConnectionLost("The end.")))
self.assertRaises(RuntimeError, req.finish)
class MultilineHeadersTestCase(unittest.TestCase):
"""
Tests to exercise handling of multiline headers by L{HTTPClient}. RFCs 1945
(HTTP 1.0) and 2616 (HTTP 1.1) state that HTTP message header fields can
span multiple lines if each extra line is preceded by at least one space or
horizontal tab.
"""
def setUp(self):
"""
Initialize variables used to verify that the header-processing functions
are getting called.
"""
self.handleHeaderCalled = False
self.handleEndHeadersCalled = False
# Dictionary of sample complete HTTP header key/value pairs, including
# multiline headers.
expectedHeaders = {'Content-Length': '10',
'X-Multiline' : 'line-0\tline-1',
'X-Multiline2' : 'line-2 line-3'}
def ourHandleHeader(self, key, val):
"""
Dummy implementation of L{HTTPClient.handleHeader}.
"""
self.handleHeaderCalled = True
self.assertEquals(val, self.expectedHeaders[key])
def ourHandleEndHeaders(self):
"""
Dummy implementation of L{HTTPClient.handleEndHeaders}.
"""
self.handleEndHeadersCalled = True
def test_extractHeader(self):
"""
A header isn't processed by L{HTTPClient.extractHeader} until it is
confirmed in L{HTTPClient.lineReceived} that the header has been
received completely.
"""
c = ClientDriver()
c.handleHeader = self.ourHandleHeader
c.handleEndHeaders = self.ourHandleEndHeaders
c.lineReceived('HTTP/1.0 201')
c.lineReceived('Content-Length: 10')
self.assertIdentical(c.length, None)
self.assertFalse(self.handleHeaderCalled)
self.assertFalse(self.handleEndHeadersCalled)
# Signal end of headers.
c.lineReceived('')
self.assertTrue(self.handleHeaderCalled)
self.assertTrue(self.handleEndHeadersCalled)
self.assertEquals(c.length, 10)
def test_noHeaders(self):
"""
An HTTP request with no headers will not cause any calls to
L{handleHeader} but will cause L{handleEndHeaders} to be called on
L{HTTPClient} subclasses.
"""
c = ClientDriver()
c.handleHeader = self.ourHandleHeader
c.handleEndHeaders = self.ourHandleEndHeaders
c.lineReceived('HTTP/1.0 201')
# Signal end of headers.
c.lineReceived('')
self.assertFalse(self.handleHeaderCalled)
self.assertTrue(self.handleEndHeadersCalled)
self.assertEquals(c.version, 'HTTP/1.0')
self.assertEquals(c.status, '201')
def test_multilineHeaders(self):
"""
L{HTTPClient} parses multiline headers by buffering header lines until
an empty line or a line that does not start with whitespace hits
lineReceived, confirming that the header has been received completely.
"""
c = ClientDriver()
c.handleHeader = self.ourHandleHeader
c.handleEndHeaders = self.ourHandleEndHeaders
c.lineReceived('HTTP/1.0 201')
c.lineReceived('X-Multiline: line-0')
self.assertFalse(self.handleHeaderCalled)
# Start continuing line with a tab.
c.lineReceived('\tline-1')
c.lineReceived('X-Multiline2: line-2')
# The previous header must be complete, so now it can be processed.
self.assertTrue(self.handleHeaderCalled)
# Start continuing line with a space.
c.lineReceived(' line-3')
c.lineReceived('Content-Length: 10')
# Signal end of headers.
c.lineReceived('')
self.assertTrue(self.handleEndHeadersCalled)
self.assertEquals(c.version, 'HTTP/1.0')
self.assertEquals(c.status, '201')
self.assertEquals(c.length, 10)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.