repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
nugget/home-assistant | homeassistant/components/modbus/climate.py | 2 | 5083 | """Support for Generic Modbus Thermostats."""
import logging
import struct
import voluptuous as vol
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateDevice
from homeassistant.components.climate.const import SUPPORT_TARGET_TEMPERATURE
from homeassistant.components.modbus import (
CONF_HUB, DEFAULT_HUB, DOMAIN as MODBUS_DOMAIN)
from homeassistant.const import ATTR_TEMPERATURE, CONF_NAME, CONF_SLAVE
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_TARGET_TEMP = 'target_temp_register'
CONF_CURRENT_TEMP = 'current_temp_register'
CONF_DATA_TYPE = 'data_type'
CONF_COUNT = 'data_count'
CONF_PRECISION = 'precision'
DATA_TYPE_INT = 'int'
DATA_TYPE_UINT = 'uint'
DATA_TYPE_FLOAT = 'float'
DEPENDENCIES = ['modbus']
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_CURRENT_TEMP): cv.positive_int,
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_SLAVE): cv.positive_int,
vol.Required(CONF_TARGET_TEMP): cv.positive_int,
vol.Optional(CONF_COUNT, default=2): cv.positive_int,
vol.Optional(CONF_DATA_TYPE, default=DATA_TYPE_FLOAT):
vol.In([DATA_TYPE_INT, DATA_TYPE_UINT, DATA_TYPE_FLOAT]),
vol.Optional(CONF_HUB, default=DEFAULT_HUB): cv.string,
vol.Optional(CONF_PRECISION, default=1): cv.positive_int,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Modbus Thermostat Platform."""
name = config.get(CONF_NAME)
modbus_slave = config.get(CONF_SLAVE)
target_temp_register = config.get(CONF_TARGET_TEMP)
current_temp_register = config.get(CONF_CURRENT_TEMP)
data_type = config.get(CONF_DATA_TYPE)
count = config.get(CONF_COUNT)
precision = config.get(CONF_PRECISION)
hub_name = config.get(CONF_HUB)
hub = hass.data[MODBUS_DOMAIN][hub_name]
add_entities([ModbusThermostat(
hub, name, modbus_slave, target_temp_register, current_temp_register,
data_type, count, precision)], True)
class ModbusThermostat(ClimateDevice):
"""Representation of a Modbus Thermostat."""
def __init__(self, hub, name, modbus_slave, target_temp_register,
current_temp_register, data_type, count, precision):
"""Initialize the unit."""
self._hub = hub
self._name = name
self._slave = modbus_slave
self._target_temperature_register = target_temp_register
self._current_temperature_register = current_temp_register
self._target_temperature = None
self._current_temperature = None
self._data_type = data_type
self._count = int(count)
self._precision = precision
self._structure = '>f'
data_types = {
DATA_TYPE_INT: {1: 'h', 2: 'i', 4: 'q'},
DATA_TYPE_UINT: {1: 'H', 2: 'I', 4: 'Q'},
DATA_TYPE_FLOAT: {1: 'e', 2: 'f', 4: 'd'},
}
self._structure = '>{}'.format(
data_types[self._data_type][self._count])
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
def update(self):
"""Update Target & Current Temperature."""
self._target_temperature = self.read_register(
self._target_temperature_register)
self._current_temperature = self.read_register(
self._current_temperature_register)
@property
def name(self):
"""Return the name of the climate device."""
return self._name
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Return the target temperature."""
return self._target_temperature
def set_temperature(self, **kwargs):
"""Set new target temperature."""
target_temperature = kwargs.get(ATTR_TEMPERATURE)
if target_temperature is None:
return
byte_string = struct.pack(self._structure, target_temperature)
register_value = struct.unpack('>h', byte_string[0:2])[0]
try:
self.write_register(
self._target_temperature_register, register_value)
except AttributeError as ex:
_LOGGER.error(ex)
def read_register(self, register):
"""Read holding register using the Modbus hub slave."""
try:
result = self._hub.read_holding_registers(
self._slave, register, self._count)
except AttributeError as ex:
_LOGGER.error(ex)
byte_string = b''.join(
[x.to_bytes(2, byteorder='big') for x in result.registers])
val = struct.unpack(self._structure, byte_string)[0]
register_value = format(val, '.{}f'.format(self._precision))
return register_value
def write_register(self, register, value):
"""Write register using the Modbus hub slave."""
self._hub.write_registers(self._slave, register, [value, 0])
| apache-2.0 |
balazssimon/ml-playground | udemy/lazyprogrammer/deep-reinforcement-learning-python/mountaincar/q_learning.py | 1 | 6102 | # This takes 4min 30s to run in Python 2.7
# But only 1min 30s to run in Python 3.5!
#
# Note: gym changed from version 0.7.3 to 0.8.0
# MountainCar episode length is capped at 200 in later versions.
# This means your agent can't learn as much in the earlier episodes
# since they are no longer as long.
import gym
import os
import sys
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from gym import wrappers
from datetime import datetime
from sklearn.pipeline import FeatureUnion
from sklearn.preprocessing import StandardScaler
from sklearn.kernel_approximation import RBFSampler
from sklearn.linear_model import SGDRegressor
# SGDRegressor defaults:
# loss='squared_loss', penalty='l2', alpha=0.0001,
# l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
# verbose=0, epsilon=0.1, random_state=None, learning_rate='invscaling',
# eta0=0.01, power_t=0.25, warm_start=False, average=False
# Inspired by https://github.com/dennybritz/reinforcement-learning
class FeatureTransformer:
def __init__(self, env, n_components=500):
observation_examples = np.array([env.observation_space.sample() for x in range(10000)])
scaler = StandardScaler()
scaler.fit(observation_examples)
# Used to converte a state to a featurizes represenation.
# We use RBF kernels with different variances to cover different parts of the space
featurizer = FeatureUnion([
("rbf1", RBFSampler(gamma=5.0, n_components=n_components)),
("rbf2", RBFSampler(gamma=2.0, n_components=n_components)),
("rbf3", RBFSampler(gamma=1.0, n_components=n_components)),
("rbf4", RBFSampler(gamma=0.5, n_components=n_components))
])
example_features = featurizer.fit_transform(scaler.transform(observation_examples))
self.dimensions = example_features.shape[1]
self.scaler = scaler
self.featurizer = featurizer
def transform(self, observations):
# print "observations:", observations
scaled = self.scaler.transform(observations)
# assert(len(scaled.shape) == 2)
return self.featurizer.transform(scaled)
# Holds one SGDRegressor for each action
class Model:
def __init__(self, env, feature_transformer, learning_rate):
self.env = env
self.models = []
self.feature_transformer = feature_transformer
for i in range(env.action_space.n):
model = SGDRegressor(learning_rate=learning_rate)
model.partial_fit(feature_transformer.transform( [env.reset()] ), [0])
self.models.append(model)
def predict(self, s):
X = self.feature_transformer.transform([s])
result = np.stack([m.predict(X) for m in self.models]).T
assert(len(result.shape) == 2)
return result
def update(self, s, a, G):
X = self.feature_transformer.transform([s])
assert(len(X.shape) == 2)
self.models[a].partial_fit(X, [G])
def sample_action(self, s, eps):
# eps = 0
# Technically, we don't need to do epsilon-greedy
# because SGDRegressor predicts 0 for all states
# until they are updated. This works as the
# "Optimistic Initial Values" method, since all
# the rewards for Mountain Car are -1.
if np.random.random() < eps:
return self.env.action_space.sample()
else:
return np.argmax(self.predict(s))
# returns a list of states_and_rewards, and the total reward
def play_one(model, env, eps, gamma):
observation = env.reset()
done = False
totalreward = 0
iters = 0
while not done and iters < 10000:
action = model.sample_action(observation, eps)
prev_observation = observation
observation, reward, done, info = env.step(action)
# update the model
next = model.predict(observation)
# assert(next.shape == (1, env.action_space.n))
G = reward + gamma*np.max(next[0])
model.update(prev_observation, action, G)
totalreward += reward
iters += 1
return totalreward
def plot_cost_to_go(env, estimator, num_tiles=20):
x = np.linspace(env.observation_space.low[0], env.observation_space.high[0], num=num_tiles)
y = np.linspace(env.observation_space.low[1], env.observation_space.high[1], num=num_tiles)
X, Y = np.meshgrid(x, y)
# both X and Y will be of shape (num_tiles, num_tiles)
Z = np.apply_along_axis(lambda _: -np.max(estimator.predict(_)), 2, np.dstack([X, Y]))
# Z will also be of shape (num_tiles, num_tiles)
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(X, Y, Z,
rstride=1, cstride=1, cmap=matplotlib.cm.coolwarm, vmin=-1.0, vmax=1.0)
ax.set_xlabel('Position')
ax.set_ylabel('Velocity')
ax.set_zlabel('Cost-To-Go == -V(s)')
ax.set_title("Cost-To-Go Function")
fig.colorbar(surf)
plt.show()
def plot_running_avg(totalrewards):
N = len(totalrewards)
running_avg = np.empty(N)
for t in range(N):
running_avg[t] = totalrewards[max(0, t-100):(t+1)].mean()
plt.plot(running_avg)
plt.title("Running Average")
plt.show()
def main(show_plots=True):
env = gym.make('MountainCar-v0')
ft = FeatureTransformer(env)
model = Model(env, ft, "constant")
gamma = 0.99
if 'monitor' in sys.argv:
filename = os.path.basename(__file__).split('.')[0]
monitor_dir = './' + filename + '_' + str(datetime.now())
env = wrappers.Monitor(env, monitor_dir)
N = 300
totalrewards = np.empty(N)
for n in range(N):
# eps = 1.0/(0.1*n+1)
eps = 0.1*(0.97**n)
if n == 199:
print("eps:", eps)
# eps = 1.0/np.sqrt(n+1)
totalreward = play_one(model, env, eps, gamma)
totalrewards[n] = totalreward
if (n + 1) % 100 == 0:
print("episode:", n, "total reward:", totalreward)
print("avg reward for last 100 episodes:", totalrewards[-100:].mean())
print("total steps:", -totalrewards.sum())
if show_plots:
plt.plot(totalrewards)
plt.title("Rewards")
plt.show()
plot_running_avg(totalrewards)
# plot the optimal state-value function
plot_cost_to_go(env, model)
if __name__ == '__main__':
# for i in range(10):
# main(show_plots=False)
main() | apache-2.0 |
dlcwshi/p2pool-feathercoin | nattraverso/utils.py | 288 | 1563 | """
Various utility functions used in the nattraverso package.
@author: Raphael Slinckx
@copyright: Copyright 2005
@license: LGPL
@contact: U{raphael@slinckx.net<mailto:raphael@slinckx.net>}
@version: 0.1.0
"""
__revision__ = "$id"
def is_rfc1918_ip(ip):
"""
Checks if the given ip address is a rfc1918 one.
@param ip: The ip address to test
@type ip: a string "x.x.x.x"
@return: True if it's a LAN address, False otherwise
"""
if isinstance(ip, basestring):
ip = _ip_to_number(ip)
for net, mask in _nets:
if ip&mask == net:
return True
return False
def is_bogus_ip(ip):
"""
Checks if the given ip address is bogus, i.e. 0.0.0.0 or 127.0.0.1.
@param ip: The ip address to test
@type ip: a string "x.x.x.x"
@return: True if it's bogus, False otherwise
"""
return ip.startswith('0.') or ip.startswith('127.')
def _ip_to_number(ipstr):
"""
Translate a string ip address to a packed number.
@param ipstr: the ip address to transform
@type ipstr: a string "x.x.x.x"
@return: an int32 number representing the ip address
"""
net = [ int(digit) for digit in ipstr.split('.') ] + [ 0, 0, 0 ]
net = net[:4]
return ((((((0L+net[0])<<8) + net[1])<<8) + net[2])<<8) +net[3]
# List of rfc1918 net/mask
_rfc1918_networks = [('127', 8), ('192.168', 16), ('10', 8), ('172.16', 12)]
# Machine readable form of the above
_nets = [(_ip_to_number(net), (2L**32 -1)^(2L**(32-mask)-1))
for net, mask in _rfc1918_networks]
| gpl-3.0 |
dimas-lex/osbb | osb/osb/billing/testing/AccountsServiceTest.py | 1 | 2043 | # -*- coding: utf-8 -*-
from django.test import TestCase
from osb.billing.models import Accounts, Services
# from osb.billing.Services.AccountsService import AccountsService
from osb.billing.Services.AccountsService import AccountsService
from osb.billing.Services.ServiceService import *
class AccountsServiceTest(TestCase):
def setUp(self):
self.accountServ = AccountsService()
self.accountServ.create(uid="1", name="lion", address="pr")
self.accountServ.create(uid="2", name="cat", address="pr2")
self.accountServ.create(uid="3", name="cat", address="pr2", porch=3)
def test_01_get_all(self):
""" Test 'get_all' method """
print self.test_01_get_all.__doc__
self.assertEqual(len(self.accountServ.get_all()), 3)
def test_02_get_by_porch(self):
""" Test 'get_by_porch' method """
print self.test_02_get_by_porch.__doc__
self.assertEqual(len(self.accountServ.get_by_porch(porch=3)), 1)
def test_03_create(self):
""" Test 'create' method """
print self.test_03_create.__doc__
self.assertTrue(
isinstance(
self.accountServ.create(uid="4", name="dog", address="pr"),
Accounts
)
)
def test_04_update(self):
""" Test 'update' method """
print self.test_04_update.__doc__
self.assertTrue( self.accountServ.update(name="dog", uid="3", address="prr") )
def test_05_delete(self):
""" Test 'delete' method """
print self.test_05_delete.__doc__
self.assertTrue( self.accountServ.delete(uid="3") )
# def test_06_print(self):
# """ Just #print out results """
# print self.test_06_print.__doc__
# accounts = self.accountServ.get_all()
# for acc in accounts:
# print ( " ".join(("uid", acc.uid, "name", acc.name, "address", acc.address, "porch", str(acc.porch), "deleted", str(acc.deleted) )) )
# self.assertTrue(True) | gpl-2.0 |
citrix-openstack-build/python-keystoneclient | keystoneclient/openstack/common/py3kcompat/urlutils.py | 11 | 1352 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Canonical Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Python2/Python3 compatibility layer for OpenStack
"""
import six
if six.PY3:
# python3
import urllib.parse
urlencode = urllib.parse.urlencode
urljoin = urllib.parse.urljoin
quote = urllib.parse.quote
parse_qsl = urllib.parse.parse_qsl
urlparse = urllib.parse.urlparse
urlsplit = urllib.parse.urlsplit
urlunsplit = urllib.parse.urlunsplit
else:
# python2
import urllib
import urlparse
urlencode = urllib.urlencode
quote = urllib.quote
parse = urlparse
parse_qsl = parse.parse_qsl
urljoin = parse.urljoin
urlparse = parse.urlparse
urlsplit = parse.urlsplit
urlunsplit = parse.urlunsplit
| apache-2.0 |
jmollan/support-tools | googlecode-issues-exporter/issues.py | 18 | 32921 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading Google Code issues to an issue service.
"""
import collections
import datetime
import json
import re
import sys
import HTMLParser
# Regular expression used by Google Code for auto-linking issue references,
# e.g. "issue #8" or "bug5".
GC_ISSUE_REF_RE = re.compile(r"""
(?P<prefix>\b(issue|bug)\s*)
(?P<project_name>\s+[-a-z0-9]+[:\#])?
(?P<number_sign>\#?)
(?P<issue_id>\d+)\b""", re.IGNORECASE | re.MULTILINE | re.VERBOSE)
# Regular expression to match issue references generated by this tool and
# match GitHub's system. (e.g. "- **Blocking**: #1, #2, #3")
EX_ISSUE_REF_RE = re.compile(
r"- \*\*(?P<tag>([^\*]+))\*\*: #(?P<issues>([^\n]+))")
def RemapIssueIds(comment, id_mapping):
"""Rewrite a comment's text based on an ID mapping.
Args:
comment: A string with the comment text. e.g. 'Closes issue #42'.
id_mapping: A dictionary mapping Google Code to GitHub issue IDs.
e.g. { '42': '142' }
Returns:
The rewritten comment text.
"""
def replaceGoogleCodeIssueReferences(match):
# Ignore references to other projects.
if match.group('project_name'):
return match.group()
# Ignore issues not found in the ID mapping.
google_code_id = match.group('issue_id')
if not id_mapping or google_code_id not in id_mapping:
return match.group()
github_id = id_mapping[google_code_id]
return match.group().replace(google_code_id, github_id)
def replaceExportedIssueReferences(match):
# Parse the issues list and regenerate.
gc_issue_ids = match.group('issues').split(", #")
gh_issue_ids = []
for gc_issue_id in gc_issue_ids:
if id_mapping and gc_issue_id in id_mapping:
gh_issue_ids.append(id_mapping[gc_issue_id])
else:
gh_issue_ids.append(gc_issue_id)
return "- **%s**: #%s" % (
match.group('tag'), ", #".join(gh_issue_ids))
comment = GC_ISSUE_REF_RE.sub(replaceGoogleCodeIssueReferences, comment)
comment = EX_ISSUE_REF_RE.sub(replaceExportedIssueReferences, comment)
return comment
def _ParseIssueReferences(issue_ref_list):
"""Parses a list of issue references into a tuple of IDs added/removed.
For example: [ "alpha:7", "beta:8", "-gamma:9" ] => ([ "7", "8" ], [ "9" ])
NOTE: We don't support cross-project issue references. Rather we
just assume the issue reference is within the same project.
"""
added = []
removed = []
for proj in issue_ref_list:
parts = proj.split(":")
proj_id = parts[1] if len(parts) >= 2 else proj[1:]
if proj[0] != "-":
added.append(proj_id)
else:
removed.append(proj_id)
return added, removed
class IdentityDict(dict):
def __missing__(self, key):
return key
def TryFormatDate(date):
"""Attempt to clean up a timestamp date."""
try:
if date.endswith(":"):
date = date[:len(date) - 1]
datetime_version = datetime.datetime.strptime(
date, "%Y-%m-%dT%H:%M:%S.%fZ")
return str(datetime_version)
except ValueError as ve:
return date
def WrapText(text, max):
"""Inserts a newline if any line of a file is > max chars.
Note that the newline is inserted at the first whitespace
character, so there may be lines longer than max.
"""
char_list = list(text)
last_linebreak = 0
for i in range(0, len(char_list)):
if char_list[i] == '\n' or char_list[i] == '\r':
last_linebreak = i
if i - last_linebreak > max and char_list[i] == ' ':
# Replace ' ' with '\n'
char_list.pop(i)
char_list.insert(i, '\n')
last_linebreak = i
return ''.join(char_list)
class Error(Exception):
"""Base error class."""
class InvalidUserError(Error):
"""Error for an invalid user."""
class ProjectNotFoundError(Error):
"""Error for a non-existent project."""
class ServiceError(Error):
"""Error when communicating with the issue or user service."""
class UserService(object):
"""Abstract user operations.
Handles user operations on an user API.
"""
def IsUser(self, username):
"""Checks if the user exists.
Args:
username: The username to check.
Returns:
True if the username exists.
"""
raise NotImplementedError()
class GoogleCodeIssue(object):
"""Google Code issue.
Handles parsing and viewing a Google Code issue.
"""
def __init__(self, issue, project_name, user_map):
"""Initialize the GoogleCodeIssue.
Args:
issue: The Google Code Issue as a dictionary.
project_name: The name of the project the issue belongs to.
user_map: A map from Google Code usernames to issue service names.
"""
self._issue = issue
self._project_name = project_name
self._user_map = user_map
def GetProjectName(self):
"""Returns the project name."""
return self._project_name
def GetUserMap(self):
"""Returns the user map."""
return self._user_map
def GetOwner(self):
"""Get the owner username of a Google Code issue.
This will ALWAYS be the person requesting the issue export.
"""
return self._user_map["user_requesting_export"]
def GetContentUpdatedOn(self):
"""Get the date the content was last updated from a Google Code issue.
Returns:
The time stamp when the issue content was last updated
"""
return self._issue["updated"]
def GetCreatedOn(self):
"""Get the creation date from a Google Code issue.
Returns:
The time stamp when the issue content was created
"""
return self._issue["published"]
def GetId(self):
"""Get the id from a Google Code issue.
Returns:
The issue id
"""
return self._issue["id"]
def GetLabels(self):
"""Get the labels from a Google Code issue.
Returns:
A list of the labels of this issue.
"""
return self._issue.get("labels", [])
def GetKind(self):
"""Get the kind from a Google Code issue.
Returns:
The issue kind, if none is found defaults to 'Defect'
"""
types = [t for t in self.GetLabels() if "Type-" in t]
if types:
return types[0][len("Type-"):]
return "Defect"
def GetPriority(self):
"""Get the priority from a Google Code issue.
Returns:
The issue priority, if none is found defaults to 'Medium'
"""
priorities = [p for p in self.GetLabels() if "Priority-" in p]
if priorities:
return priorities[0][len("Priority-"):]
return "Medium"
def GetAuthor(self):
"""Get the author's username of a Google Code issue.
Returns:
The Google Code username that the issue is authored by or the
repository owner if no mapping or email address exists.
"""
if "author" not in self._issue:
return None
author = self._issue["author"]["name"]
return self._user_map[author]
def GetStatus(self):
"""Get the status from a Google Code issue.
Returns:
The issue status
"""
status = self._issue["status"].lower()
if status == "accepted":
status = "open"
return status
def GetTitle(self):
"""Get the title from a Google Code issue.
Returns:
The issue title
"""
title = self._issue["title"]
# It is not possible to create a Google Code issue without a title, but you
# can edit an issue to remove its title afterwards.
if title.isspace():
title = "<empty title>"
return title
def GetUpdatedOn(self):
"""Get the date the issue was last updated.
Returns:
The time stamp when the issue was last updated
"""
return self.GetCreatedOn()
def GetComments(self):
"""Get the list of comments for the issue (if any).
Returns:
The list of comments attached to the issue
"""
# The 0th comment is the issue's description. Also, filter out
# any deleted comments.
comments = self._issue["comments"]["items"][1:]
return [c for c in comments if not "deletedBy" in c]
def IsOpen(self):
"""Check if an issue is marked as open.
Returns:
True if the issue was open.
"""
return "state" in self._issue and self._issue["state"] == "open"
def GetDescription(self):
"""Returns the Description of the issue."""
# Just return the description of the underlying comment. However,
# we fudge a few things since metadata is stored differently for
# "the issue" (i.e. comment #0) and other comments.
comment_0_data = self._issue["comments"]["items"][0]
googlecode_comment = GoogleCodeComment(self, comment_0_data)
issue_description = googlecode_comment.GetDescription()
# Be careful not to run afoul of issue reference rewriting...
issue_header = "Originally reported on Google Code with ID %s\n" % (
self.GetId())
return issue_header + issue_description
class GoogleCodeComment(object):
"""Google Code Comment.
Handles parsing and viewing a Google Code Comment.
"""
def __init__(self, googlecode_issue, comment, id_mapping=None):
"""Initialize the GoogleCodeComment.
Args:
googlecode_issue: A GoogleCodeIssue instance.
comment: The Google Code Comment as dictionary.
id_mapping: Mapping from Google Code issue IDs to their new locations.
"""
self._comment = comment
self._googlecode_issue = googlecode_issue
self._id_mapping = id_mapping
def GetContent(self):
"""Get the content from a Google Code comment.
Returns:
The issue comment
"""
return self._comment["content"]
def GetCreatedOn(self):
"""Get the creation date from a Google Code comment.
Returns:
The time stamp when the issue comment content was created
"""
return self._comment["published"]
def GetId(self):
"""Get the id from a Google Code comment.
Returns:
The issue comment id
"""
return self._comment["id"]
def GetLabels(self):
"""Get the labels modified with the comment."""
if "updates" in self._comment:
if "labels" in self._comment["updates"]:
return self._comment["updates"]["labels"]
return []
def GetIssue(self):
"""Get the GoogleCodeIssue this comment belongs to.
Returns:
The issue id
"""
return self._googlecode_issue
def GetUpdatedOn(self):
"""Get the date the issue comment content was last updated.
Returns:
The time stamp when the issue comment content was last updated
"""
return self.GetCreatedOn()
def GetAuthor(self):
"""Get the author's username of a Google Code issue comment.
Returns:
The Google Code username that the issue comment is authored by or the
repository owner if no mapping or email address exists.
"""
if "author" not in self._comment:
return None
author = self._comment["author"]["name"]
return self.GetIssue().GetUserMap()[author]
def GetDescription(self):
"""Returns the Description of the comment."""
author = self.GetAuthor()
comment_date = self.GetCreatedOn()
comment_text = self.GetContent()
comment_updates = {}
if "updates" in self._comment:
comment_updates = self._comment["updates"]
body = ""
if comment_text:
# Google Takeout includes escaped HTML such as > and á.
html_parser = HTMLParser.HTMLParser()
comment_text = html_parser.unescape(comment_text)
# Remove <b> tags, which Codesite automatically includes if issue body
# is based on a prompt.
comment_text = comment_text.replace("<b>", "")
comment_text = comment_text.replace("</b>", "")
# 82 instead of 80 in case it was already wrapped...
comment_text = WrapText(comment_text, 82)
body += "```\n" + comment_text + "\n```\n\n"
footer = "Reported by `%s` on %s\n" % (
author, TryFormatDate(comment_date))
if "status" in comment_updates:
footer += "- **Status changed**: `%s`\n" % (comment_updates["status"])
footer += self._GetLabelInfo()
footer += self._GetLinksToOtherIssues()
if "mergedInto" in comment_updates and comment_updates["mergedInto"]:
footer += "- **Merged into**: #%s\n" % (
comment_updates["mergedInto"])
# Add references to attachments as appropriate. (Do this last since it
# inserts a horizontal rule.)
footer += self._GetAttachmentInfo()
raw_comment_body = body + footer
return RemapIssueIds(raw_comment_body, self._id_mapping)
def _GetLabelInfo(self):
"""Returns Markdown text for a comment's labels as appropriate."""
if not self.GetLabels():
return ""
labels_added = []
labels_removed = []
for label in self.GetLabels():
if label.startswith("-"):
labels_removed.append(label[1:])
else:
labels_added.append(label)
label_info = ""
if labels_added:
label_info += "- **Labels added**: %s\n" % (", ".join(labels_added))
if labels_removed:
label_info += "- **Labels removed**: %s\n" % (", ".join(labels_removed))
return label_info
def _GetLinksToOtherIssues(self):
"""Returns Markdown text for a comment's links to other issues."""
if "updates" not in self._comment:
return ""
updates = self._comment["updates"]
ref_info = ""
if "blocking" in updates:
added, removed = _ParseIssueReferences(updates["blocking"])
if added:
ref_info += "- **Blocking**: #" + ", #".join(added) + "\n"
if removed:
ref_info += "- **No longer blocking**: #" + ", #".join(removed) + "\n"
if "blockedOn" in updates:
added, removed = _ParseIssueReferences(updates["blockedOn"])
if added:
ref_info += "- **Blocked on**: #" + ", #".join(added) + "\n"
if removed:
ref_info += ("- **No longer blocked on**: #" +
", #".join(removed) + "\n")
return ref_info
def _GetAttachmentInfo(self):
"""Returns Markdown text for a comment's attachments as appropriate."""
attachmentLines = []
attachments = self._comment["attachments"] if "attachments" in self._comment else []
for attachment in attachments:
if "isDeleted" in attachment:
# Deleted attachments won't be found on the issue mirror.
continue
link = "https://storage.googleapis.com/google-code-attachments/%s/issue-%d/comment-%d/%s" % (
self.GetIssue().GetProjectName(), self.GetIssue().GetId(),
self.GetId(), attachment["fileName"])
def has_extension(extension):
return attachment["fileName"].lower().endswith(extension)
is_image_attachment = False
for extension in [".png", ".jpg", ".jpeg", ".bmp", ".tif", ".gif"]:
is_image_attachment |= has_extension(".png")
if is_image_attachment:
line = " * *Attachment: %s<br>*" % (
attachment["fileName"], attachment["fileName"], link)
else:
line = " * *Attachment: [%s](%s)*" % (attachment["fileName"], link)
attachmentLines.append(line)
if len(attachmentLines) > 0:
return "\n<hr>\n" + "\n".join(attachmentLines)
return ""
class IssueService(object):
"""Abstract issue operations.
Handles creating and updating issues and comments on an user API.
"""
def GetIssues(self, state="open"):
"""Gets all of the issue for the repository with the given state.
Args:
state: The state of the repository can be either 'open' or 'closed'.
Returns:
The list of all of the issues with the given state.
Raises:
IOError: An error occurred accessing previously created issues.
"""
raise NotImplementedError()
def GetComments(self, issue_number):
"""Gets all the comments for the issue with the given ID."""
raise NotImplementedError()
def CreateIssue(self, googlecode_issue):
"""Creates an issue.
Args:
googlecode_issue: An instance of GoogleCodeIssue
Returns:
The issue number of the new issue.
Raises:
ServiceError: An error occurred creating the issue.
"""
raise NotImplementedError()
def EditIssue(self, googlecode_issue, issue_number):
"""Edits an existing issue."""
raise NotImplementedError()
def CloseIssue(self, issue_number):
"""Closes an issue.
Args:
issue_number: The issue number.
"""
raise NotImplementedError()
def CreateComment(self, issue_number, googlecode_comment):
"""Creates a comment on an issue.
Args:
issue_number: The issue number.
googlecode_comment: An instance of GoogleCodeComment
"""
raise NotImplementedError()
def EditComment(self, googlecode_issue, googlecode_comment, comment_number):
"""Edits an existing comment."""
raise NotImplementedError()
def LoadIssueData(issue_file_path, project_name):
"""Loads issue data from a file.
Args:
issue_file_path: path to the file to load
project_name: name of the project to load
Returns:
Issue data as a list of dictionaries.
Raises:
ProjectNotFoundError: the project_name was not found in the file.
"""
with open(issue_file_path) as user_file:
user_data = json.load(user_file)
user_projects = user_data["projects"]
for project in user_projects:
if project_name == project["name"]:
return project["issues"]["items"]
raise ProjectNotFoundError("Project %s not found" % project_name)
def LoadUserData(user_file_path, user_service):
"""Loads user data from a file. If not present, the user name will
just return whatever is passed to it.
Args:
user_file_path: path to the file to load
user_service: an instance of UserService
"""
identity_dict = IdentityDict()
if not user_file_path:
return identity_dict
with open(user_file_path) as user_data:
user_json = user_data.read()
user_map = json.loads(user_json)["users"]
for username in user_map.values():
if not user_service.IsUser(username):
raise InvalidUserError("%s is not a User" % username)
result.update(user_map)
return result
class IssueExporter(object):
"""Issue Migration.
Handles the uploading issues from Google Code to an issue service.
"""
def __init__(self, issue_service, user_service, issue_json_data,
project_name, user_map):
"""Initialize the IssueExporter.
Args:
issue_service: An instance of IssueService.
user_service: An instance of UserService.
project_name: The name of the project to export to.
issue_json_data: A data object of issues from Google Code.
user_map: A map from user email addresses to service usernames.
"""
self._issue_service = issue_service
self._user_service = user_service
self._issue_json_data = issue_json_data
self._project_name = project_name
self._user_map = user_map
# Specialized index of issues to quickly check what has been migrated to
# GitHub and if so, determine it's new issue ID. See Init(...).
self._issue_index = {}
self._prefix = "" # Output only.
self._issue_total = 0
self._issue_number = 0
self._comment_number = 0
self._comment_total = 0
self._skipped_issues = 0
# Mapping from Google Code issue ID to destination service issue ID.
self._id_mapping = {}
def Init(self, require_all_issues_exported=False):
"""Initialize the needed variables.
Arg:
require_all_issues_exported: Bool. Require that all issues have
been exported. Used to ensure that rewritting comments won't fail.
"""
print "Building issue index."
self._issue_index = {}
index = self._issue_index
for issue in self._issue_json_data:
gc_issue = GoogleCodeIssue(issue, self._project_name, self._user_map)
if gc_issue.GetTitle() not in index:
index[gc_issue.GetTitle()] = []
index[gc_issue.GetTitle()].append({
"googlecode_id": gc_issue.GetId(),
"exported": False,
"exported_id": -1,
"comment_count": -1,
})
print "Determining which issues have already been exported."
open_issues = self._issue_service.GetIssues("open")
closed_issues = self._issue_service.GetIssues("closed")
all_exported_issues = open_issues + closed_issues
# Sort issues by GitHub ID, since Google Code issues will be exported in
# order we can use the exported issue's chronology to resolve ambiguities
# for issues with the same title. Yes, GitHub number == ID.
all_exported_issues = sorted(all_exported_issues,
key=lambda issue: issue["number"])
for exported_issue in all_exported_issues:
exported_issue_id = exported_issue["number"]
exported_issue_title = exported_issue["title"]
if exported_issue_title not in index:
print "Warning: GitHub issue #%s '%s' not in Google Takeout dump." % (
exported_issue_id, exported_issue_title)
continue
# Mark of the issue as exported.
for idx in range(0, len(index[exported_issue_title])):
if not index[exported_issue_title][idx]["exported"]:
index[exported_issue_title][idx]["exported"] = True
index[exported_issue_title][idx]["exported_id"] = exported_issue_id
index[exported_issue_title][idx]["comment_count"] = (
exported_issue["comments"])
break
if idx >= len(index[exported_issue_title]):
print "Warning: Couldn't find the %sth issue titled '%s'." % (
idx, exported_issue_title)
# Build the ID map based on previously created issue. Only used if
# rewriting comments.
if not require_all_issues_exported:
return
print "Confirming all issues have been exported."
for title in index:
for issue in index[title]:
self._id_mapping[str(issue["googlecode_id"])] = str(issue["exported_id"])
if not issue["exported"]:
raise Exception(
"Issue #%s '%s' not found. Can't rewrite comments." % (
gc_issue.GetId(), gc_issue.GetTitle()))
print "len(id_map) = %s, with %s total issues" % (
len(self._id_mapping), len(self._issue_json_data))
if len(self._id_mapping) < len(self._issue_json_data):
raise Exception("Not all issues have been exported.")
def _GetExportedIssue(self, googlecode_issue):
"""Return metadata about the exported Google Code issue."""
index = self._issue_index
issue_title = googlecode_issue.GetTitle()
issue_id = googlecode_issue.GetId()
if issue_title not in index:
raise Exception("Google Code issue '%s' not expected to be exported." % (
issue_title))
for idx in range(0, len(index[issue_title])):
if index[issue_title][idx]["googlecode_id"] == issue_id:
return index[issue_title][idx]
raise Exception("Unable to find Google Code issue #%s." % (issue_id))
def _HasIssueBeenExported(self, googlecode_issue):
"""Returns whether or not a Google Code issue has been exported."""
export_metadata = self._GetExportedIssue(googlecode_issue)
return export_metadata["exported"]
def _UpdateProgressBar(self):
"""Update issue count 'feed'.
This displays the current status of the script to the user.
"""
feed_string = ("\r%sIssue: %d/%d -> Comment: %d/%d " %
(self._prefix, self._issue_number, self._issue_total,
self._comment_number, self._comment_total))
sys.stdout.write(feed_string)
sys.stdout.flush()
def _CreateIssue(self, googlecode_issue):
"""Converts an issue from Google Code to an issue service.
This will take the Google Code issue and create a corresponding issue on
the issue service. If the issue on Google Code was closed it will also
be closed on the issue service.
Args:
googlecode_issue: An instance of GoogleCodeIssue
Returns:
The issue number assigned by the service.
"""
return self._issue_service.CreateIssue(googlecode_issue)
def _CreateComments(self, comments, issue_number, googlecode_issue):
"""Converts a list of issue comment from Google Code to an issue service.
This will take a list of Google Code issue comments and create
corresponding comments on an issue service for the given issue number.
Args:
comments: A list of comments (each comment is just a string).
issue_number: The issue number.
source_issue_id: The Google Code issue id.
"""
self._comment_total = len(comments)
self._comment_number = 0
for comment in comments:
googlecode_comment = GoogleCodeComment(googlecode_issue, comment)
self._comment_number += 1
self._UpdateProgressBar()
self._issue_service.CreateComment(issue_number, googlecode_comment)
def _RewriteComments(self, googlecode_issue, exported_issue_number):
"""Rewrite all comments in the issue to update issue ID references.
Args:
googlecode_issue: The Google Code issue to rewrite.
issue_number: The issue ID on the **destination** system.
"""
id_mapping = self._id_mapping
comments = googlecode_issue.GetComments()
self._prefix = "Rewriting "
self._comment_total = len(comments)
self._comment_number = 0
self._issue_service.EditIssue(googlecode_issue, exported_issue_number)
# Get existing comments from the destination, necessary because we don't
# know the IDs used on the output side. (GitHub uses timestamps :P)
existing_comments = self._issue_service.GetComments(exported_issue_number)
for comment_idx in range(0, len(comments)):
if comment_idx >= len(existing_comments):
print "\nError: More comments on Google Code than on dest service?"
print "Google Code #%s vs. dest service #%s (%s comments vs. %s)" % (
googlecode_issue.GetId(), exported_issue_number,
len(comments), len(existing_comments))
break
comment = comments[comment_idx]
comment_number = existing_comments[comment_idx]["id"]
gc_comment = GoogleCodeComment(googlecode_issue, comment, id_mapping)
self._comment_number += 1
self._UpdateProgressBar()
self._issue_service.EditComment(
exported_issue_number, gc_comment, comment_number)
def _FixBlockingBlockedOn(self, issue_json):
"""Fix the issue JSON object to normalize how blocking/blocked-on are used.
There is a bug in how Google Takeout exports blocking/blocked-on status.
Each comment may have an update with a list of added/removed
blocked/blocking issues. However, comment #0, the "original issue state"
does not contain this information.
However, the issue does contain summary information. (i.e. a union of
initial state and all comment updates.
This function figures out what should be in comment #0 so everything
actually makes sense when rendered.
"""
# Issue references we add to comment #0
# - References that are removed later, but not explicitly added.
# (assumed to have been added on comment #0).
# - References that are in the summary, but not explicitly added.
# (assumed to have been added on comment #0).
def IssueRefToString(issue_ref):
return issue_ref["projectId"] + ":" + str(issue_ref["issueId"])
def GetUnionReferences(kind_name):
"""The initial issue reference IDs."""
references = []
if kind_name in issue_json:
for reference in issue_json[kind_name]:
references.append(IssueRefToString(reference))
references, _ = _ParseIssueReferences(references)
return references
def DesiredReferences(union_references, kind_name):
"""Returns the desired references on commeng #0 for the kind."""
current_list = [] # List of references as we simulate the comments.
desired_list = union_references[:] # The desired list to output.
issue_comments = issue_json["comments"]["items"]
for comment in issue_comments:
if "updates" not in comment:
continue
updates = comment["updates"]
if kind_name in updates:
added, removed = _ParseIssueReferences(updates[kind_name])
# If the reference was added in this comment, we don't need
# to add it to comment #0 since you'll "see" the addition.
for added_ref in added:
current_list.append(added_ref)
if added_ref in union_references and added_ref in desired_list:
desired_list.remove(added_ref)
# If the reference was removed in this comment AND it wasn't
# previously added by a comment, then we should add it to the
# output list. (We infer the issue was created with it.)
for removed_ref in removed:
if removed_ref not in union_references and (
removed_ref not in current_list):
desired_list.append(removed_ref)
return desired_list
def AddToComment0(issue_references, kind_name):
if not issue_references:
return
comment_0_data = issue_json["comments"]["items"][0]
if "updates" not in comment_0_data:
comment_0_data["updates"] = {}
comment_0_updates = comment_0_data["updates"]
if kind_name not in comment_0_updates:
comment_0_updates[kind_name] = []
comment_0_updates[kind_name].extend(
["???:" + iid for iid in issue_references])
starting_blocking = GetUnionReferences("blocking")
desired_blocking = DesiredReferences(starting_blocking, "blocking")
AddToComment0(desired_blocking, "blocking")
starting_blockedon = GetUnionReferences("blockedOn")
desired_blockedon = DesiredReferences(starting_blockedon, "blockedOn")
AddToComment0(desired_blockedon, "blockedOn")
return issue_json
def Start(self, rewrite_comments=False):
"""Start the issue export process.
Args:
rewrite_comments: Bool. If set will rewrite the comments for previously
exported issues. Used to fix export problems and remap issue IDs.
"""
print "Starting issue export for '%s'" % (self._project_name)
self._issue_total = len(self._issue_json_data)
self._comment_total = 0
self._issue_number = 0
self._comment_number = 0
self._skipped_issues = 0
last_issue_skipped = False # Only used for formatting output.
for issue in self._issue_json_data:
self._FixBlockingBlockedOn(issue)
googlecode_issue = GoogleCodeIssue(
issue, self._project_name, self._user_map)
issue_title = googlecode_issue.GetTitle()
short_issue_title = (
issue_title[:16] + '...') if len(issue_title) > 18 else issue_title
self._issue_number += 1
# Check if the issue has already been posted.
if self._HasIssueBeenExported(googlecode_issue):
export_metadata = self._GetExportedIssue(googlecode_issue)
print "%sGoogle Code issue #%s already exported with ID #%s." % (
("\n" if not last_issue_skipped else ""),
export_metadata["googlecode_id"],
export_metadata["exported_id"])
last_issue_skipped = True
self._skipped_issues = self._skipped_issues + 1
# Verify all comments are present.
issue_comments = googlecode_issue.GetComments()
num_issue_comments = len(issue_comments)
num_existing_comments = export_metadata["comment_count"]
if num_issue_comments > num_existing_comments:
for idx in range(num_existing_comments, num_issue_comments):
comment_data = issue_comments[idx]
googlecode_comment = GoogleCodeComment(
googlecode_issue, comment_data)
self._issue_service.CreateComment(
export_metadata["exported_id"], googlecode_comment)
print " Added missing comment #%d" % (idx + 1)
if rewrite_comments:
self._RewriteComments(googlecode_issue, export_metadata["exported_id"])
print "" # Advanced past the "progress bar" line.
continue
# Post the issue for the first time.
self._UpdateProgressBar()
last_issue_skipped = False
posted_issue_id = self._CreateIssue(googlecode_issue)
comments = googlecode_issue.GetComments()
self._CreateComments(comments, posted_issue_id, googlecode_issue)
if not googlecode_issue.IsOpen():
self._issue_service.CloseIssue(posted_issue_id)
print "Finished!"
| apache-2.0 |
onesafe/multivimdriver-vmware-vio | vio/vio/swagger/views/swagger_json.py | 2 | 4037 | # Copyright (c) 2017 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import json
import logging
import os
from rest_framework.response import Response
from rest_framework.views import APIView
logger = logging.getLogger(__name__)
class SwaggerJsonView(APIView):
def get(self, request):
json_file = os.path.join(os.path.dirname(__file__), 'multivim.flavor.swagger.json')
f = open(json_file)
json_data = json.JSONDecoder().decode(f.read())
f.close()
json_file = os.path.join(os.path.dirname(__file__), 'multivim.image.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_file = os.path.join(os.path.dirname(__file__), 'multivim.network.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_file = os.path.join(os.path.dirname(__file__), 'multivim.subnet.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_file = os.path.join(os.path.dirname(__file__), 'multivim.server.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_file = os.path.join(os.path.dirname(__file__), 'multivim.volume.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_file = os.path.join(os.path.dirname(__file__), 'multivim.vport.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_file = os.path.join(os.path.dirname(__file__), 'multivim.tenant.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_file = os.path.join(os.path.dirname(__file__), 'multivim.host.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_file = os.path.join(os.path.dirname(__file__), 'multivim.limit.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_data["basePath"] = "/openoapi/multivim-vio/v1/"
json_data["info"]["title"] = "MultiVIM driver of OpenStack VIO Service NBI"
return Response(json_data)
| apache-2.0 |
marcoantoniooliveira/labweb | oscar/lib/python2.7/site-packages/IPython/nbconvert/transformers/base.py | 2 | 3787 | """
Module that re-groups transformer that would be applied to ipynb files
before going through the templating machinery.
It exposes a convenient class to inherit from to access configurability.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from ..utils.base import NbConvertBase
from IPython.utils.traitlets import Bool
#-----------------------------------------------------------------------------
# Classes and Functions
#-----------------------------------------------------------------------------
class Transformer(NbConvertBase):
""" A configurable transformer
Inherit from this class if you wish to have configurability for your
transformer.
Any configurable traitlets this class exposed will be configurable in profiles
using c.SubClassName.atribute=value
you can overwrite :meth:`transform_cell` to apply a transformation independently on each cell
or :meth:`call` if you prefer your own logic. See corresponding docstring for informations.
Disabled by default and can be enabled via the config by
'c.YourTransformerName.enabled = True'
"""
enabled = Bool(False, config=True)
def __init__(self, **kw):
"""
Public constructor
Parameters
----------
config : Config
Configuration file structure
**kw : misc
Additional arguments
"""
super(Transformer, self).__init__(**kw)
def __call__(self, nb, resources):
if self.enabled:
return self.call(nb,resources)
else:
return nb, resources
def call(self, nb, resources):
"""
Transformation to apply on each notebook.
You should return modified nb, resources.
If you wish to apply your transform on each cell, you might want to
overwrite transform_cell method instead.
Parameters
----------
nb : NotebookNode
Notebook being converted
resources : dictionary
Additional resources used in the conversion process. Allows
transformers to pass variables into the Jinja engine.
"""
self.log.debug("Applying transform: %s", self.__class__.__name__)
try :
for worksheet in nb.worksheets:
for index, cell in enumerate(worksheet.cells):
worksheet.cells[index], resources = self.transform_cell(cell, resources, index)
return nb, resources
except NotImplementedError:
raise NotImplementedError('should be implemented by subclass')
def transform_cell(self, cell, resources, index):
"""
Overwrite if you want to apply a transformation on each cell. You
should return modified cell and resource dictionary.
Parameters
----------
cell : NotebookNode cell
Notebook cell being processed
resources : dictionary
Additional resources used in the conversion process. Allows
transformers to pass variables into the Jinja engine.
index : int
Index of the cell being processed
"""
raise NotImplementedError('should be implemented by subclass')
return cell, resources
| bsd-3-clause |
FAForever/faftools | parseudp/FAPacket.py | 1 | 10060 | #!/usr/bin/env python
import sys
import struct
import binascii
import string
import argparse
import zlib
from curses import ascii
# expects tshark on stdin as in:
# tshark -r game.pcap -R 'ip.addr==192.168.0.101' -T fields -d udp.port==6112,echo -e ip.src -e udp.srcport -e ip.dst -e udp.dstport -e frame.time_relative -e echo.data | python FAPacket.py -e -p
# any non-FA packets will crash the parser
# running:
# FAPacket.py [ -p ] [ -e ]
# -p print command stream packets
# -e print encapsulation packets
def hexdump_hash(data):
res = ''
for i in range(0, len(data)):
res += '{0:02x}'.format(ord(data[i]))
return res
def hexdump(data, indent):
res = ''
for i in range(0, len(data), 16):
if i:
for k in range(0, indent):
res += ' '
for j in range(i, min(i + 16, len(data))):
res += '{0:02x} '.format(ord(data[j]))
for k in range(min(i + 16, len(data)), i + 16):
res += ' '
for j in range(i, min(i + 16, len(data))):
if ascii.isprint(data[j]):
res += data[j]
else:
res += '.'
res += '\n'
return res
class FAPacket:
def __init__(self, data):
self.type = ord(data[0])
self.len = ord(data[1]) | ord(data[2]) << 8;
self.data = data[3:]
d = { }
d[0] = 1
d[0x32] = 1
d[0x33] = 1
d[0x34] = 1
d[1] = 1
d[3] = 1
self.decodable = d
def is_advance(self):
return self.type == 0
def is_ack(self):
return self.type == 0x32
def is_set_cmdsrc(self):
return self.type == 1
def cmdsrc(self):
return ord(self.data[0])
def ack_cmdsource(self):
return ord(self.data[0])
def pp_data(self, indent):
return hexdump(self.data, indent)
def can_decode(self):
return self.type in self.decodable
def simtick(self):
if self.type == 0x32:
return struct.unpack("<bL", self.data)[1]
if self.type == 0x33 or self.type == 0x34 or self.type == 0:
return struct.unpack("<L", self.data)[0]
def decode(self):
if self.type == 0:
return "ADV {0}".format(struct.unpack("<L", self.data)[0])
elif self.type == 0x32:
return "ACK {0} {1}".format(self.ack_cmdsource(), self.simtick())
elif self.type == 0x33:
return "SIM {0}".format(struct.unpack("<L", self.data)[0])
elif self.type == 0x34:
return "FIN {0}".format(struct.unpack("<L", self.data)[0])
elif self.type == 1:
return "CMDSOURCE {0}".format(ord(self.data[0]))
elif self.type == 3:
(h, s) = struct.unpack("<16sL", self.data)
return "HASH {0} {1}".format(s, hexdump_hash(h))
else:
return "(error)"
class FAEncap(object):
def __init__(self, src, srcport, dst, dstport, time, packet):
self.offset = 0
self.src = src
self.dst = dst
self.srcport = srcport
self.dstport = dstport
self.time = time
if ord(packet[0]) == 8:
self.type = 8
self.data = packet[1:]
self.len = len(packet) - 1
elif ord(packet[0]) == 0:
self.type = 0
self.data = packet[1:]
self.len = len(packet) - 1
elif ord(packet[0]) == 255:
self.type=255
self.data=''
self.len = len(packet) - 1
else:
(self.type, self.mask, self.seq, self.ack, self.seq2, self.ack2, self.len) = struct.unpack("<bLHHHHH", packet[0:15])
self.data = packet[15:]
def src_full(self):
return src + ":" + srcport
def dst_full(self):
return dst + ":" + dstport
def connection(self):
return self.src_full() + "->" + self.dst_full()
def pp_data(self, indent):
if self.type == 8:
return self.data
else:
return hexdump(self.data, indent)
def packets(self):
ret = []
while True:
p = self.next_packet()
if p == None:
return ret
ret.append(p)
def next_packet(self):
if self.type != 4:
return None
if self.offset + 3 > len(self.data):
return None
len_ = ord(self.data[self.offset + 1]) | ord(self.data[self.offset + 2]) << 8
if self.offset + len_ > len(self.data):
return None
offset = self.offset;
self.offset += len_
if offset == self.offset:
sys.stdout.write("waarg {0} {1} {2}".format(offset, self.offset, binascii.hexlify(self.data)))
return FAPacket(self.data[offset : self.offset])
def prepend_remaining(self, r):
self.data = str(r) + str(self.data)
def remaining(self):
if self.type == 4:
return self.data[self.offset:]
return ''
class FAPeerState(object):
def __init__(self):
self.addr_to_cmdsrc = { }
self.cmdsrc_to_addr = [ ]
self.simtick = [ ]
self.ack_simtick = [ ]
def process_egress(self, addr, packet):
if packet.is_set_cmdsrc():
self.cmdsource = packet.cmdsrc()
if packet.is_advance():
self.simtick[self.addr_to_cmdsrc[addr] ] += packet.simtick()
elif packet.is_ack():
s1 = self.addr_to_cmdsrc[addr]
s2 = packet.ack_cmdsource()
self.ack_simtick[s1][s2] = packet.simtick()
def process_ingress(self, addr, packet):
if packet.is_set_cmdsrc():
s = packet.cmdsrc()
self.addr_to_cmdsrc[addr] = s
while len(self.cmdsrc_to_addr) <= s:
self.cmdsrc_to_addr.append('')
self.simtick.append(0)
self.ack_simtick.append(0)
self.cmdsrc_to_addr[s] = addr
argp = argparse.ArgumentParser(prog = "PROG")
argp.add_argument("-e", action="store_true")
argp.add_argument("-t", action="store_true")
argp.add_argument("-p", action="store_true")
args = argp.parse_args()
remain = { }
inflate = { }
inflate_remain = { }
cmdpackets_seen = { }
future = { }
c32 = [ 0, 0, 0 ]
c33 = 0
c34 = 0
tick = 0
seq_seen = { }
for line in sys.stdin:
(src, srcport, dst, dstport, time, data) = line.split();
#print "*{0}*{1}*{2}*{3}*{4}*{5}".format(src, srcport, dst, dstport, time, data);
e = FAEncap(src, srcport, dst, dstport, time, binascii.unhexlify(data.translate(None, ':')))
if not e.connection() in seq_seen:
seq_seen[e.connection()] = {}
if not e.connection() in remain:
remain[e.connection()] = ''
if not e.connection() in future:
future[e.connection()] = { }
s = '{0} {1} type={2} len={3: 4d}'.format(e.time, e.connection(), e.type, e.len)
if e.type != 4:
print(s)
if e.len:
print(' ' * 7, hexdump(e.data, 8))
elif e.type == 4:
if e.seq2 in seq_seen[e.connection()]:
continue
if len(seq_seen[e.connection()]) and not e.seq2 - 1 in seq_seen[e.connection()]:
print("!! packet received out of sequence !! {0} cseq={1}".format(e.connection(), e.seq2))
future[e.connection()][e.seq2] = e
continue
future[e.connection()][e.seq2] = e
seq_ix = e.seq2
while seq_ix in future[e.connection()]:
e = future[e.connection()][seq_ix]
seq_seen[e.connection()][e.seq2] = 1
seq_ix += 1
s = '{0} {1} type={2} len={3: 4d}'.format(e.time, e.connection(), e.type, e.len)
s += ' cseq={0} cack={1} mask={2} eseq={3} eack={4}'.format(e.seq2, e.ack2, e.mask, e.seq, e.ack)
if args.e:
print(s)
if not e.connection() in inflate:
print(' ' * 7, e.pp_data(8))
if args.p:
if not e.connection() in cmdpackets_seen:
if e.data == "\x02\x00\x00\x00\xff\xff":
print(" !!deflate detected!! on " + e.connection())
inflate[e.connection()] = zlib.decompressobj()
if e.connection() in inflate:
if not e.connection() in cmdpackets_seen:
data = "\x78\x9c"
data += e.data
cmdpackets_seen[e.connection()] = 1
inflate_remain[e.connection()] = ''
else:
data = inflate_remain[e.connection()] + e.data
inflated = inflate[e.connection()].decompress(data)
print(' ' * 7, hexdump(inflated, 8))
e.data = inflated
inflate_remain[e.connection()] = inflate[e.connection()].unconsumed_tail
e.prepend_remaining(remain[e.connection()])
#print e.pp_data(16);
for p in e.packets():
if p.type == 0x32:
c32[p.ack_cmdsource()] = p.simtick()
elif p.type == 0x33:
c33 = p.simtick()
elif p.type == 0x34:
c34 = p.simtick()
elif p.type == 0:
tick += p.simtick()
if p.can_decode():
print(' ', p.decode())
else:
s=' {0:02x} {1: 4d} '.format(p.type, p.len - 3)
print(s, p.pp_data(len(s) + 1))
foo = ""
foo = ''
if c33 < c34:
foo += '<'
elif c33 > c34:
foo += '>'
else:
foo += ' '
if args.t:
print("TICK", ''.join([ str(c32[i]) + ' ' for i in range(0, len(c32)) ]), c33, c34, tick, foo)
remain[e.connection()] = e.remaining()
| gpl-3.0 |
mpercich/Calendarize | ios/dateparser/lib/python2.7/site-packages/ruamel/yaml/loader.py | 1 | 2063 | # coding: utf-8
from __future__ import absolute_import
from ruamel.yaml.reader import Reader
from ruamel.yaml.scanner import Scanner, RoundTripScanner
from ruamel.yaml.parser import Parser, RoundTripParser
from ruamel.yaml.composer import Composer
from ruamel.yaml.constructor import BaseConstructor, SafeConstructor, Constructor, \
RoundTripConstructor
from ruamel.yaml.resolver import VersionedResolver
__all__ = ['BaseLoader', 'SafeLoader', 'Loader', 'RoundTripLoader']
class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, VersionedResolver):
def __init__(self, stream, version=None, preserve_quotes=None):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
BaseConstructor.__init__(self)
VersionedResolver.__init__(self)
class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, VersionedResolver):
def __init__(self, stream, version=None, preserve_quotes=None):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
SafeConstructor.__init__(self)
VersionedResolver.__init__(self)
class Loader(Reader, Scanner, Parser, Composer, Constructor, VersionedResolver):
def __init__(self, stream, version=None, preserve_quotes=None):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
Constructor.__init__(self)
VersionedResolver.__init__(self)
class RoundTripLoader(Reader, RoundTripScanner, RoundTripParser, Composer,
RoundTripConstructor, VersionedResolver):
def __init__(self, stream, version=None, preserve_quotes=None):
Reader.__init__(self, stream)
RoundTripScanner.__init__(self)
RoundTripParser.__init__(self)
Composer.__init__(self)
RoundTripConstructor.__init__(self, preserve_quotes=preserve_quotes)
VersionedResolver.__init__(self, version)
| mit |
darktears/chromium-crosswalk | build/android/devil/android/tools/screenshot.py | 10 | 1852 | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Takes a screenshot from an Android device."""
import argparse
import logging
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.abspath(os.path.join(
os.path.dirname(__file__), os.pardir, os.pardir, os.pardir)))
from devil.android import device_utils
from devil.android.tools import script_common
def main():
# Parse options.
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-d', '--device', dest='devices', action='append',
help='Serial number of Android device to use.')
parser.add_argument('--blacklist-file', help='Device blacklist JSON file.')
parser.add_argument('-f', '--file', metavar='FILE',
help='Save result to file instead of generating a '
'timestamped file name.')
parser.add_argument('-v', '--verbose', action='store_true',
help='Verbose logging.')
parser.add_argument('host_file', nargs='?',
help='File to which the screenshot will be saved.')
args = parser.parse_args()
host_file = args.host_file or args.file
if args.verbose:
logging.getLogger().setLevel(logging.DEBUG)
devices = script_common.GetDevices(args.devices, args.blacklist_file)
def screenshot(device):
f = None
if host_file:
root, ext = os.path.splitext(host_file)
f = '%s_%s%s' % (root, str(device), ext)
f = device.TakeScreenshot(f)
print 'Screenshot for device %s written to %s' % (
str(device), os.path.abspath(f))
device_utils.DeviceUtils.parallel(devices).pMap(screenshot)
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
isotoma/django-cms | cms/tests/navextender.py | 2 | 3596 | # -*- coding: utf-8 -*-
from __future__ import with_statement
from cms.models import Page
from cms.test_utils.fixtures.navextenders import NavextendersFixture
from cms.test_utils.testcases import SettingsOverrideTestCase
from cms.test_utils.util.menu_extender import TestMenu
from django.conf import settings
from django.template import Template
from menus.menu_pool import menu_pool
class NavExtenderTestCase(NavextendersFixture, SettingsOverrideTestCase):
"""
Tree from fixture:
page1
page2
page3
page4
page5
"""
def setUp(self):
if not menu_pool.discovered:
menu_pool.discover_menus()
self.old_menu = menu_pool.menus
# NOTE: if we're going to directly manipulate this menu pool, we should
# at least be marking it as not _expanded.
menu_pool.menus = {
'CMSMenu': self.old_menu['CMSMenu'],
'TestMenu': TestMenu()
}
menu_pool._expanded = False
def tearDown(self):
menu_pool.menus = self.old_menu
def _get_page(self, num):
return Page.objects.get(title_set__title='page%s' % num)
def _update_page(self, num, **stuff):
Page.objects.filter(title_set__title='page%s' % num).update(**stuff)
def test_menu_registration(self):
self.assertEqual(len(menu_pool.menus), 2)
self.assertEqual(len(menu_pool.modifiers) >= 4, True)
def test_extenders_on_root(self):
self._update_page(1, navigation_extenders="TestMenu")
menu_pool.clear(settings.SITE_ID)
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 2)
self.assertEqual(len(nodes[0].children), 4)
self.assertEqual(len(nodes[0].children[3].children), 1)
self._update_page(1, in_navigation=False)
menu_pool.clear(settings.SITE_ID)
tpl = Template("{% load menu_tags %}{% show_menu %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 5)
def test_extenders_on_root_child(self):
self._update_page(4, navigation_extenders="TestMenu")
menu_pool.clear(settings.SITE_ID)
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 2)
self.assertEqual(len(nodes[1].children), 4)
def test_extenders_on_child(self):
"""
TestMenu has 4 flat nodes
"""
self._update_page(1, in_navigation=False)
self._update_page(2, navigation_extenders="TestMenu")
menu_pool.clear(settings.SITE_ID)
menu_pool.clear(settings.SITE_ID)
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 2)
self.assertEqual(len(nodes[0].children), 4)
self.assertEqual(nodes[0].children[1].get_absolute_url(), "/")
def test_incorrect_nav_extender_in_db(self):
self._update_page(2, navigation_extenders="SomethingWrong")
menu_pool.clear(settings.SITE_ID)
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_menu %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 2)
| bsd-3-clause |
metabrainz/listenbrainz-server | messybrainz/webserver/views/api.py | 3 | 1712 | import ujson
from flask import Blueprint, request, Response
from messybrainz.webserver.decorators import crossdomain, ip_filter
from werkzeug.exceptions import BadRequest, NotFound
import messybrainz
import messybrainz.db.exceptions
import ujson
api_bp = Blueprint('api', __name__)
def ujsonify(*args, **kwargs):
"""An implementation of flask's jsonify which uses ujson
instead of json. Doesn't have as many bells and whistles
(no indent/separator support).
"""
return Response((ujson.dumps(dict(*args, **kwargs)), '\n'),
mimetype='application/json')
@api_bp.route("/submit", methods=["POST"])
@crossdomain()
@ip_filter
def submit():
raw_data = request.get_data()
try:
data = ujson.loads(raw_data.decode("utf-8"))
except ValueError as e:
raise BadRequest("Cannot parse JSON document: %s" % e)
if not isinstance(data, list):
raise BadRequest("submitted data must be a list")
try:
result = messybrainz.submit_listens_and_sing_me_a_sweet_song(data)
return ujsonify(result)
except messybrainz.exceptions.BadDataException as e:
raise BadRequest(e)
@api_bp.route("/<uuid:messybrainz_id>")
@crossdomain()
def get(messybrainz_id):
try:
data = messybrainz.load_recording(messybrainz_id)
except messybrainz.exceptions.NoDataFoundException:
raise NotFound
return Response(ujson.dumps(data), mimetype='application/json')
@api_bp.route("/<uuid:messybrainz_id>/aka")
@crossdomain()
def get_aka(messybrainz_id):
"""Returns all other MessyBrainz recordings that are known to be equivalent
(as specified in the clusters table).
"""
raise NotImplementedError
| gpl-2.0 |
benghaem/py-senmolib | senmolib/pipeline/processor.py | 2 | 5495 | import subprocess
import zmq as zmq
import time
class Processor(object):
"""Processor launches real processes and configures data flow between processing and fusion components.
Processor is useful when one would like to use a mix of senmo components that may be implemented across languages. If one is using components that are only written in Python, PureProcessor may be preferable as all components may be defined within the same file.
Args:
port_list: A length 4 list of port values. For example, [5000,5001,5002,5003]. The first value is the input port, the second the worker port, the third the fusion port, and the fourth the output port.
process_count: An int. The number of worker processes to start
vent_path: A string. an absolute path to the vent component
worker_path: A string. an absolute path to the worker component
fusion_path: A string. an absolute path to the fusion component
identifier: A string to identify the processor
"""
def __init__(self, port_list, process_count, vent_path, worker_path, fusion_path, identifier):
super(Processor, self).__init__()
#State
self.running = False
#Process settings
self.process_count = int(process_count)
self.identifier = identifier
self.process_path = worker_path
self.fusion_path = fusion_path
self.vent_path = vent_path
self.port_list = [str(val) for val in port_list]
#Controller Listing
self.process_controlers = []
self.fusion_controller = None
self.vent_controller = None
self.iid_offset = 0
#ZMQ context and socket
def start(self):
"""Start processes
Starts the defined number of worker processes as well as the fusion and vent components
"""
# start workers
self.add_worker_process(self.process_count)
# Fusion launch
self.fusion_controller = Process(self.identifier+"-fusion", self.fusion_path, self.port_list)
# Vent launch
self.vent_controller = Process(self.identifier+"-vent", self.vent_path, self.port_list)
self.running = True
def add_worker_process(self, num):
"""Starts a additional set of worker processes
Args:
num: an int. The number of additional worker processes to start
"""
for i in range(num):
new_process = Process(self.identifier+"-p-"+str(i+self.iid_offset), self.process_path, self.port_list)
self.process_controlers.append(new_process)
self.iid_offset += num
def stop(self):
"""Stop all worker processes as well as the fusion and vent components"""
for pc in self.process_controlers:
pc.stop()
self.fusion_controller.stop()
self.vent_controller.stop()
#Reset controllers
self._reset_controllers()
def reload_processes(self, kill=False, rolling=False):
"""Reloads all worker processes. By default will stop and then restart all processes. Enabling rolling will keep the workers running while reloading worker processes sequentially.
Args:
kill: A boolean. If True, kill proesses instead of stopping them
rolling: A boolean. If True, reload processes sequentially
"""
if rolling:
for pc in self.process_controlers:
pc.restart(kill=kill)
else:
self.running = False
for pc in self.process_controlers:
if kill:
pc.kill()
else:
pc.stop()
for pc in self.process_controlers:
pc.start()
self.running = True
def reload_fusion(self, kill=False, hard_reload=True):
"""Reloads the fusion process. Setting hard_reload to false will keep the processor running while the process is reloaded"""
if hard_reload:
self.running = False
if kill:
self.fusion_controller.kill()
else:
self.fusion_controller.stop()
self.running = True
def kill(self):
"""Kill all worker processes as well as fusion and vent components"""
for pc in self.process_controlers:
pc.kill()
self.fusion_controller.kill()
self._reset_controllers()
def restart(self):
"""Stop and then restart all worker processes and fusion/vent components"""
self.stop()
self.start()
def _reset_controllers(self, vent=True, fusion=True, process=True, running=True):
if fusion:
self.fusion_controller = None
if vent:
self.vent_controller = None
if process:
self.process_controllers = []
if running:
self.running = False
def list_pids(self):
"""
Returns a list of process pids
Returns:
list: A list of all pids, where the fusion component is first, the vent component is second, and the worker processes follow
"""
return [self.fusion_controller.get_pid()] + [self.vent_controller.get_pid()] + [pc.get_pid() for pc in self.process_controlers]
def process_count(self):
"""
Returns number of active worker processes
Returns:
int: number of active worker processes
"""
return len(self.process_controlers)
class Process(object):
"""Wrapper for subprocess that allows restarting of processes"""
def __init__(self, identifier, path, port_list, start=True):
super(Process, self).__init__()
self.path = path
self.port_list = port_list
self.identifier = identifier
self.complete_args = [self.path] + self.port_list + [self.identifier]
if start:
self.start()
def start(self):
print(self.complete_args)
self.process_instance = subprocess.Popen(self.complete_args)
def stop(self):
self.process_instance.terminate()
def kill(self):
self.process_instance.kill()
def get_pid(self):
return self.process_instance.pid
def restart(self, kill=False):
#Stop process
if kill:
self.kill()
else:
self.stop()
#Restart
self.start() | mit |
uraxy/qiidly | qiidly/command_line.py | 1 | 2094 | # -*- coding: utf-8 -*-
"""dummy docstring."""
import argparse
from qiidly import __version__, __description__
from qiidly.main import Qiidly
def _arg_parser():
parser = argparse.ArgumentParser(
prog='qiidly',
description=__description__)
parser.add_argument('-V', '--version',
action='version',
version='%(prog)s version {}'.format(__version__))
parser.add_argument('-q', '--qiita-token',
required=True,
help='Qiita access token')
parser.add_argument('-f', '--feedly-token',
required=True,
help='Feedly developer access token')
return parser
# http://stackoverflow.com/questions/3041986/apt-command-line-interface-like-yes-no-input
def _query_yes_no(question, default=None):
valid = {'yes': True, 'y': True,
'no': False, 'n': False}
if default is None:
prompt = ' [y/n] '
elif default == "yes":
prompt = ' [Y/n] '
elif default == 'no':
prompt = ' [y/N] '
else:
raise ValueError("Invalid default answer: '{default}'".format(default=default))
while True:
print(question + prompt, end='')
choice = input().lower()
if choice == '' and default is not None:
return valid[default]
elif choice in valid:
return valid[choice]
else:
print("Please respond with 'y' or 'n'.")
def main():
"""dummy."""
args = _arg_parser().parse_args()
for target in ['tags', 'followees']:
q = Qiidly(args.qiita_token, args.feedly_token, target=target)
have_to_sync = q.have_to_sync()
q.print_todo()
if not have_to_sync:
print('Already up-to-date.')
print()
continue
# sync to Feedly
print('')
if not _query_yes_no('Sync to Feedly?', default=None):
print('Did nothing.')
continue
q.sync()
print('Done!')
if __name__ == '__main__':
main()
| mit |
oneklc/dimod | dimod/views/bqm.py | 1 | 6577 | # Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =============================================================================
try:
import collections.abc as abc
except ImportError:
import collections as abc
class BQMView(object):
__slots__ = '_adj',
def __init__(self, bqm):
self._adj = bqm._adj
# support python2 pickle
def __getstate__(self):
return {'_adj': self._adj}
# support python2 pickle
def __setstate__(self, state):
self._adj = state['_adj']
class LinearView(BQMView, abc.MutableMapping):
"""Acts as a dictionary `{v: bias, ...}` for the linear biases.
The linear biases are stored in a dict-of-dicts format, where 'self loops'
store the linear biases.
So `{v: bias}` is stored `._adj = {v: {v: Bias(bias)}}`.
If v is not in ._adj[v] then the bias is treated as 0.
"""
def __delitem__(self, v):
if v not in self:
raise KeyError
adj = self._adj
if len(adj[v]) - (v in adj[v]) > 0:
raise ValueError("there are interactions associated with {} that must be deleted first".format(v))
del adj[v]
def __getitem__(self, v):
# developer note: we could try to match the type with other biases in
# the bqm, but I think it is better to just use python int 0 as it
# is most likely to be compatible with other numeric types.
return self._adj[v].get(v, 0)
def __iter__(self):
return iter(self._adj)
def __len__(self):
return len(self._adj)
def __setitem__(self, v, bias):
adj = self._adj
if v in adj:
adj[v][v] = bias
else:
adj[v] = {v: bias}
def __str__(self):
return str(dict(self))
def items(self):
return LinearItemsView(self)
class LinearItemsView(abc.ItemsView):
"""Faster items iteration for LinearView."""
__slots__ = ()
def __iter__(self):
for v, neighbours in self._mapping._adj.items():
# see note in LinearView.__getitem__
yield v, neighbours.get(v, 0)
class QuadraticView(BQMView, abc.MutableMapping):
"""Acts as a dictionary `{(u, v): bias, ...}` for the quadratic biases.
The quadratic biases are stored in a dict-of-dicts format. So `{(u, v): bias}` is stored as
`._adj = {u: {v: Bias(bias)}, v: {u: Bias(bias)}}`.
"""
def __delitem__(self, interaction):
u, v = interaction
if u == v:
raise KeyError('{} is not an interaction'.format(interaction))
adj = self._adj
del adj[v][u]
del adj[u][v]
def __getitem__(self, interaction):
u, v = interaction
if u == v:
raise KeyError('{} cannot have an interaction with itself'.format(u))
return self._adj[u][v]
def __iter__(self):
seen = set()
adj = self._adj
for u, neigh in adj.items():
for v in neigh:
if u == v:
# not adjacent to itself
continue
if v not in seen:
yield (u, v)
seen.add(u)
def __len__(self):
# remove the self-loops
return sum(len(neighbours) - (v in neighbours)
for v, neighbours in self._adj.items()) // 2
def __setitem__(self, interaction, bias):
u, v = interaction
if u == v:
raise KeyError('{} cannot have an interaction with itself'.format(u))
adj = self._adj
# we don't know what type we want the biases, so we require that the variables already
# exist before we can add an interaction between them
if u not in adj:
raise KeyError('{} is not already a variable in the binary quadratic model'.format(u))
if v not in adj:
raise KeyError('{} is not already a variable in the binary quadratic model'.format(v))
adj[u][v] = adj[v][u] = bias
def __str__(self):
return str(dict(self))
def items(self):
return QuadraticItemsView(self)
class QuadraticItemsView(abc.ItemsView):
"""Faster items iteration"""
__slots__ = ()
def __iter__(self):
adj = self._mapping._adj
for u, v in self._mapping:
yield (u, v), adj[u][v]
class NeighbourView(abc.Mapping):
"""Acts as a dictionary `{u: bias, ...}` for the neighbours of a variable `v`.
See Also:
:class:`AdjacencyView`
"""
__slots__ = '_adj', '_var'
def __init__(self, adj, v):
self._adj = adj
self._var = v
def __getitem__(self, v):
u = self._var
if u == v:
raise KeyError('{} cannot have an interaction with itself'.format(u))
return self._adj[u][v]
def __setitem__(self, u, bias):
v = self._var
if u == v:
raise KeyError('{} cannot have an interaction with itself'.format(u))
adj = self._adj
if u not in adj:
raise KeyError('{} is not an interaction'.format((u, v)))
adj[v][u] = adj[u][v] = bias
def __iter__(self):
v = self._var
for u in self._adj[v]:
if u != v:
yield u
def __len__(self):
v = self._var
neighbours = self._adj[v]
return len(neighbours) - (v in neighbours) # ignore self
def __str__(self):
return str(dict(self))
class AdjacencyView(BQMView, abc.Mapping):
"""Acts as a dict-of-dicts `{u: {v: bias}, v: {u: bias}}` for the quadratic biases.
The quadratic biases are stored in a dict-of-dicts format. So `{u: {v: bias}, v: {u: bias}}`
is stored as `._adj = {u: {v: Bias(bias)}, v: {u: Bias(bias)}}`.
"""
def __getitem__(self, v):
if v not in self._adj:
raise KeyError('{} is not a variable'.format(v))
return NeighbourView(self._adj, v)
def __iter__(self):
return iter(self._adj)
def __len__(self):
return len(self._adj)
| apache-2.0 |
gorjuce/odoo | addons/base_report_designer/plugin/openerp_report_designer/bin/script/SendToServer.py | 293 | 10562 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import uno
import string
import unohelper
import random
import xmlrpclib
import base64, tempfile
from com.sun.star.task import XJobExecutor
import os
import sys
if __name__<>'package':
from lib.gui import *
from lib.error import *
from lib.functions import *
from lib.logreport import *
from lib.tools import *
from LoginTest import *
from lib.rpc import *
database="report"
uid = 3
class SendtoServer(unohelper.Base, XJobExecutor):
Kind = {
'PDF' : 'pdf',
'OpenOffice': 'sxw',
'HTML' : 'html'
}
def __init__(self, ctx):
self.ctx = ctx
self.module = "openerp_report"
self.version = "0.1"
LoginTest()
self.logobj=Logger()
if not loginstatus and __name__=="package":
exit(1)
global passwd
self.password = passwd
global url
self.sock=RPCSession(url)
desktop=getDesktop()
oDoc2 = desktop.getCurrentComponent()
docinfo=oDoc2.getDocumentInfo()
self.ids = self.sock.execute(database, uid, self.password, 'ir.module.module', 'search', [('name','=','base_report_designer'),('state', '=', 'installed')])
if not len(self.ids):
ErrorDialog("Please install base_report_designer module.", "", "Module Uninstalled Error!")
exit(1)
report_name = ""
name=""
if docinfo.getUserFieldValue(2)<>"" :
try:
fields=['name','report_name']
self.res_other = self.sock.execute(database, uid, self.password, 'ir.actions.report.xml', 'read', [int(docinfo.getUserFieldValue(2))],fields)
name = self.res_other[0]['name']
report_name = self.res_other[0]['report_name']
except:
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logob.log_write('ServerParameter', LOG_ERROR, info)
elif docinfo.getUserFieldValue(3) <> "":
name = ""
result = "rnd"
for i in range(5):
result =result + random.choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890')
report_name = docinfo.getUserFieldValue(3) + "." + result
else:
ErrorDialog("Please select appropriate module...","Note: use Odoo Report -> Open a new Report", "Module selection ERROR");
exit(1)
self.win = DBModalDialog(60, 50, 180, 100, "Send To Server")
self.win.addFixedText("lblName",10 , 9, 40, 15, "Report Name :")
self.win.addEdit("txtName", -5, 5, 123, 15,name)
self.win.addFixedText("lblReportName", 2, 30, 50, 15, "Technical Name :")
self.win.addEdit("txtReportName", -5, 25, 123, 15,report_name)
self.win.addCheckBox("chkHeader", 51, 45, 70 ,15, "Corporate Header")
self.win.setCheckBoxState("chkHeader", True)
self.win.addFixedText("lblResourceType", 2 , 60, 50, 15, "Select Rpt. Type :")
self.win.addComboListBox("lstResourceType", -5, 58, 123, 15,True,itemListenerProc=self.lstbox_selected)
self.lstResourceType = self.win.getControl( "lstResourceType" )
self.txtReportName=self.win.getControl( "txtReportName" )
self.txtReportName.Enable=False
for kind in self.Kind.keys():
self.lstResourceType.addItem( kind, self.lstResourceType.getItemCount() )
self.win.addButton( "btnSend", -5, -5, 80, 15, "Send Report to Server", actionListenerProc = self.btnOk_clicked)
self.win.addButton( "btnCancel", -5 - 80 -5, -5, 40, 15, "Cancel", actionListenerProc = self.btnCancel_clicked)
self.win.doModalDialog("lstResourceType", self.Kind.keys()[0])
def lstbox_selected(self, oItemEvent):
pass
def btnCancel_clicked(self, oActionEvent):
self.win.endExecute()
def btnOk_clicked(self, oActionEvent):
if self.win.getEditText("txtName") <> "" and self.win.getEditText("txtReportName") <> "":
desktop=getDesktop()
oDoc2 = desktop.getCurrentComponent()
docinfo=oDoc2.getDocumentInfo()
self.getInverseFieldsRecord(1)
fp_name = tempfile.mktemp('.'+"sxw")
if not oDoc2.hasLocation():
oDoc2.storeAsURL("file://"+fp_name,Array(makePropertyValue("MediaType","application/vnd.sun.xml.writer"),))
if docinfo.getUserFieldValue(2)=="":
name=self.win.getEditText("txtName"),
name_id={}
try:
name_id = self.sock.execute(database, uid, self.password, 'ir.actions.report.xml' , 'search',[('name','=',name)])
if not name_id:
id=self.getID()
docinfo.setUserFieldValue(2,id)
rec = {
'name': self.win.getEditText("txtReportName"),
'key': 'action',
'model': docinfo.getUserFieldValue(3),
'value': 'ir.actions.report.xml,'+str(id),
'key2': 'client_print_multi',
'object': True,
'user_id': uid
}
res = self.sock.execute(database, uid, self.password, 'ir.values' , 'create',rec )
else :
ErrorDialog("This name is already used for another report.\nPlease try with another name.", "", "Error!")
self.logobj.log_write('SendToServer',LOG_WARNING, ': report name already used DB %s' % (database))
self.win.endExecute()
except Exception,e:
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('ServerParameter', LOG_ERROR, info)
else:
id = docinfo.getUserFieldValue(2)
vId = self.sock.execute(database, uid, self.password, 'ir.values' , 'search', [('value','=','ir.actions.report.xml,'+str(id))])
rec = { 'name': self.win.getEditText("txtReportName") }
res = self.sock.execute(database, uid, self.password, 'ir.values' , 'write',vId,rec)
oDoc2.store()
data = read_data_from_file( get_absolute_file_path( oDoc2.getURL()[7:] ) )
self.getInverseFieldsRecord(0)
#sock = xmlrpclib.ServerProxy(docinfo.getUserFieldValue(0) +'/xmlrpc/object')
file_type = oDoc2.getURL()[7:].split(".")[-1]
params = {
'name': self.win.getEditText("txtName"),
'model': docinfo.getUserFieldValue(3),
'report_name': self.win.getEditText("txtReportName"),
'header': (self.win.getCheckBoxState("chkHeader") <> 0),
'report_type': self.Kind[self.win.getListBoxSelectedItem("lstResourceType")],
}
if self.win.getListBoxSelectedItem("lstResourceType")=='OpenOffice':
params['report_type']=file_type
self.sock.execute(database, uid, self.password, 'ir.actions.report.xml', 'write', int(docinfo.getUserFieldValue(2)), params)
# Call upload_report as the *last* step, as it will call register_all() and cause the report service
# to be loaded - which requires all the data to be correct in the database
self.sock.execute(database, uid, self.password, 'ir.actions.report.xml', 'upload_report', int(docinfo.getUserFieldValue(2)),base64.encodestring(data),file_type,{})
self.logobj.log_write('SendToServer',LOG_INFO, ':Report %s successfully send using %s'%(params['name'],database))
self.win.endExecute()
else:
ErrorDialog("Either report name or technical name is empty.\nPlease specify an appropriate name.", "", "Error!")
self.logobj.log_write('SendToServer',LOG_WARNING, ': either report name or technical name is empty.')
self.win.endExecute()
def getID(self):
desktop=getDesktop()
doc = desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
params = {
'name': self.win.getEditText("txtName"),
'model': docinfo.getUserFieldValue(3),
'report_name': self.win.getEditText('txtReportName')
}
id=self.sock.execute(database, uid, self.password, 'ir.actions.report.xml' ,'create', params)
return id
def getInverseFieldsRecord(self, nVal):
desktop=getDesktop()
doc = desktop.getCurrentComponent()
count=0
oParEnum = doc.getTextFields().createEnumeration()
while oParEnum.hasMoreElements():
oPar = oParEnum.nextElement()
if oPar.supportsService("com.sun.star.text.TextField.DropDown"):
oPar.SelectedItem = oPar.Items[nVal]
if nVal==0:
oPar.update()
if __name__<>"package" and __name__=="__main__":
SendtoServer(None)
elif __name__=="package":
g_ImplementationHelper.addImplementation( SendtoServer, "org.openoffice.openerp.report.sendtoserver", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
handroissuazo/tensorflow | venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treeadapters/sax.py | 1835 | 1661 | from __future__ import absolute_import, division, unicode_literals
from xml.sax.xmlreader import AttributesNSImpl
from ..constants import adjustForeignAttributes, unadjustForeignAttributes
prefix_mapping = {}
for prefix, localName, namespace in adjustForeignAttributes.values():
if prefix is not None:
prefix_mapping[prefix] = namespace
def to_sax(walker, handler):
"""Call SAX-like content handler based on treewalker walker"""
handler.startDocument()
for prefix, namespace in prefix_mapping.items():
handler.startPrefixMapping(prefix, namespace)
for token in walker:
type = token["type"]
if type == "Doctype":
continue
elif type in ("StartTag", "EmptyTag"):
attrs = AttributesNSImpl(token["data"],
unadjustForeignAttributes)
handler.startElementNS((token["namespace"], token["name"]),
token["name"],
attrs)
if type == "EmptyTag":
handler.endElementNS((token["namespace"], token["name"]),
token["name"])
elif type == "EndTag":
handler.endElementNS((token["namespace"], token["name"]),
token["name"])
elif type in ("Characters", "SpaceCharacters"):
handler.characters(token["data"])
elif type == "Comment":
pass
else:
assert False, "Unknown token type"
for prefix, namespace in prefix_mapping.items():
handler.endPrefixMapping(prefix)
handler.endDocument()
| apache-2.0 |
CVML/pybrain | pybrain/supervised/knn/lsh/nearoptimal.py | 25 | 6466 |
"""This module provides functionality for locality sensitive hashing in high
dimensional euclidean spaces.
It is based on the work of Andoni and Indyk, 'Near-Optimal Hashing Algorithms
for Approximate Nearest Neighbor in High Dimensions'."""
__author__ = 'Justin Bayer, bayer.justin@googlemail.com'
import logging
from collections import defaultdict
from heapq import nlargest
from math import sqrt, log, ceil
from scipy import array, dot, random, ones
try:
# Python 2
from scipy import weave
except ImportError:
# Python 3
pass
class MultiDimHash(object):
"""Class that represents a datastructure that enables nearest neighbours
search and methods to do so."""
# If the dimension of a dataset is bigger than this bound, the
# dimensionality will be reduced by a random projection into 24dimensional
# space
lowerDimensionBound = 24
def _getRadius(self):
return self._radius
def _setRadius(self, value):
self._radius = abs(value)
self.radiusSquared = value ** 2
radius = property(_getRadius, _setRadius)
def __init__(self, dim, omega=4, prob=0.8):
"""Create a hash for arrays of dimension dim.
The hyperspace will be split into hypercubes with a sidelength of
omega * sqrt(sqrt(dim)), that is omega * radius.
Every point in the dim-dimensional euclidean space will be hashed to
its correct bucket with a probability of prob.
"""
message = ("Creating Hash with %i dimensions, sidelength %.2f and " +
"cNN-probability %.2f") % (dim, omega, prob)
logging.debug(message)
self.dim = dim
self.omega = omega
self.prob = prob
self.radius = sqrt(sqrt(min(dim, self.lowerDimensionBound)))
logging.debug("Radius set to %.2f" % self.radius)
self._initializeGrids()
self._initializeProjection()
self.balls = defaultdict(lambda: [])
def _findAmountOfGrids(self):
w = self.radius
omega = self.omega
d = self.dim
prob = self.prob
N = ((omega * w) / (w / sqrt(d))) ** d
result = int(ceil(log((1 - prob) / N, 1 - 1 / N)))
logging.debug("Number of grids: %i" % result)
return result
def _initializeGrids(self):
offset = self.omega * self.radius
radius_offset = ones(self.dim) * self.radius
self.gridBalls = random.random((self._findAmountOfGrids(), self.dim))
self.gridBalls *= offset
self.gridBalls += radius_offset
def _initializeProjection(self):
if self.dim <= self.lowerDimensionBound:
# We only need to reduce the dimension if it's bigger than
# lowerDimensionBound; otherwise, chose identity
self.projection = 1
else:
projection_shape = self.dim, self.lowerDimensionBound
self.projection = random.standard_normal(projection_shape)
self.projection /= sqrt(self.lowerDimensionBound)
def _findHypercube(self, point):
"""Return where a point lies in what hypercube.
The result is a pair of two arrays. The first array is an array of
integers that indicate the multidimensional index of the hypercube it
is in. The second array is an array of floats, specifying the
coordinates of the point in that hypercube.
"""
offset = self.omega * self.radius
divmods = (divmod(p, offset) for p in point)
hypercube_indices, relative_point = [], []
for index, rest in divmods:
hypercube_indices.append(index)
relative_point.append(rest)
return array(hypercube_indices, dtype=int), array(relative_point)
def _findLocalBall_noinline(self, point):
"""Return the index of the ball that the point lies in."""
for i, ball in enumerate(self.gridBalls):
distance = point - ball
if dot(distance.T, distance) <= self.radiusSquared:
return i
def _findLocalBall_inline(self, point):
"""Return the index of the ball that the point lies in."""
balls = self.gridBalls
nBalls, dim = balls.shape #@UnusedVariable
radiusSquared = self.radiusSquared #@UnusedVariable
code = """
#line 121 "nearoptimal.py"
return_val = -1;
for (long i = 0; i < nBalls; i++)
{
double distance = 0.0;
for (long j = 0; j < dim; j++)
{
double diff = balls(i, j) - point(j);
distance += diff * diff;
}
if (distance <= radiusSquared) {
return_val = i;
break;
}
}
"""
variables = 'point', 'balls', 'nBalls', 'dim', 'radiusSquared',
result = weave.inline(
code,
variables,
type_converters=weave.converters.blitz,
compiler='gcc')
return result if result != -1 else None
_findLocalBall = _findLocalBall_noinline
def findBall(self, point):
hypercube_index, relative_point = self._findHypercube(point)
ball_index = self._findLocalBall(relative_point)
return tuple(hypercube_index), ball_index
def insert(self, point, satellite):
"""Put a point and its satellite information into the hash structure.
"""
point = dot(self.projection, point)
index = self.findBall(point)
self.balls[index].append((point, satellite))
def _findKnnCandidates(self, point):
"""Return a set of candidates that might be nearest neighbours of a
query point."""
index = self.findBall(point)
logging.debug("Found %i candidates for cNN" % len(self.balls[index]))
return self.balls[index]
def knn(self, point, k):
"""Return the k approximate nearest neighbours of the item in the
current hash.
Mind that the probabilistic nature of the data structure might not
return a nearest neighbor at all and not the nearest neighbour."""
candidates = self._findKnnCandidates(point)
def sortKey(xxx_todo_changeme):
(point_, satellite_) = xxx_todo_changeme
distance = point - point_
return - dot(distance.T, distance)
return nlargest(k, candidates, key=sortKey)
| bsd-3-clause |
midonet/python-neutron-plugin-midonet | midonet/neutron/extensions/port.py | 3 | 6430 | # Copyright (C) 2014 Midokura SARL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import base
from neutron import manager
PORT = 'midonet_port'
PORTS = '%ss' % PORT
# Monkey patches to add validations.
def _validate_non_negative_or_none(data, valid_values=None):
if data is not None:
attr._validate_non_negative_or_none(data, valid_values)
def _validate_range_or_none(data, valid_values=None):
if data is not None:
attr._validate_range(data, valid_values)
attr.validators['type:non_negative_or_none'] = _validate_non_negative_or_none
attr.validators['type:range_or_none'] = _validate_range_or_none
RESOURCE_ATTRIBUTE_MAP = {
PORTS: {
'device_id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'host_id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None},
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'inbound_filter_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None},
'interface_name': {'allow_post': True, 'allow_put': False,
'validate': {'type:string_or_none': None},
'is_visible': True, 'default': None},
'network_cidr': {'allow_post': True, 'allow_put': True,
'validate': {'type:subnet_or_none': None},
'is_visible': True, 'default': None},
'outbound_filter_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None},
'peer_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None},
'port_address': {'allow_post': True, 'allow_put': True,
'validate': {'type:ip_address_or_none': None},
'is_visible': True, 'default': None},
'port_mac': {'allow_post': True, 'allow_put': True,
'validate': {'type:mac_address_or_none': None},
'is_visible': True, 'default': None},
'type': {'allow_post': True, 'allow_put': True,
'validate': {
'type:values': [
'Bridge',
'Router',
'ExteriorBridge',
'ExteriorRouter',
'InteriorBridge',
'InteriorRouter',
'Vxlan'
]
},
'is_visible': True},
'vif_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None},
'vlan_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:range_or_none': [0, 65335]},
'is_visible': True, 'default': None},
'vni': {'allow_post': True, 'allow_put': True,
'validate': {'type:non_negative_or_none': None},
'is_visible': True, 'default': None},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:uuid': None},
'is_visible': True}
}
}
class Port(object):
"""Port extension."""
@classmethod
def get_name(cls):
return "Midonet Port Extension"
@classmethod
def get_alias(cls):
return "midonet-port"
@classmethod
def get_description(cls):
return "Port abstraction for basic port-related features"
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/midonet-port/api/v1.0"
@classmethod
def get_updated(cls):
return "2014-07-20T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
exts = []
plugin = manager.NeutronManager.get_plugin()
resource_name = PORT
collection_name = PORTS
params = RESOURCE_ATTRIBUTE_MAP.get(collection_name, dict())
controller = base.create_resource(
collection_name, resource_name, plugin, params)
ex = extensions.ResourceExtension(collection_name, controller)
exts.append(ex)
return exts
def update_attributes_map(self, attributes):
for resource_map, attrs in RESOURCE_ATTRIBUTE_MAP.iteritems():
extended_attrs = attributes.get(resource_map)
if extended_attrs:
attrs.update(extended_attrs)
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
@six.add_metaclass(abc.ABCMeta)
class PortPluginBase(object):
@abc.abstractmethod
def get_midonet_port(self, context, midonet_port, fields=None):
pass
@abc.abstractmethod
def get_midonet_ports(self, context, fields=None, filters=None):
pass
@abc.abstractmethod
def create_midonet_port(self, context, midonet_port):
pass
@abc.abstractmethod
def update_midonet_port(self, context, id, midonet_port):
pass
@abc.abstractmethod
def delete_midonet_port(self, context, id):
pass
| apache-2.0 |
shubhamgupta123/erpnext | erpnext/manufacturing/doctype/blanket_order/test_blanket_order.py | 13 | 2528 | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from frappe.utils import add_months, today
from erpnext import get_company_currency
from .blanket_order import make_sales_order, make_purchase_order
class TestBlanketOrder(unittest.TestCase):
def test_sales_order_creation(self):
bo = make_blanket_order(blanket_order_type="Selling")
so = make_sales_order(bo.name)
so.currency = get_company_currency(so.company)
so.delivery_date = today()
so.items[0].qty = 10
so.submit()
self.assertEqual(so.doctype, "Sales Order")
self.assertEqual(len(so.get("items")), len(bo.get("items")))
# check the rate, quantity and updation for the ordered quantity
self.assertEqual(so.items[0].rate, bo.items[0].rate)
bo = frappe.get_doc("Blanket Order", bo.name)
self.assertEqual(so.items[0].qty, bo.items[0].ordered_qty)
# test the quantity
so1 = make_sales_order(bo.name)
so1.currency = get_company_currency(so1.company)
self.assertEqual(so1.items[0].qty, (bo.items[0].qty-bo.items[0].ordered_qty))
def test_purchase_order_creation(self):
bo = make_blanket_order(blanket_order_type="Purchasing")
po = make_purchase_order(bo.name)
po.currency = get_company_currency(po.company)
po.schedule_date = today()
po.items[0].qty = 10
po.submit()
self.assertEqual(po.doctype, "Purchase Order")
self.assertEqual(len(po.get("items")), len(bo.get("items")))
# check the rate, quantity and updation for the ordered quantity
self.assertEqual(po.items[0].rate, po.items[0].rate)
bo = frappe.get_doc("Blanket Order", bo.name)
self.assertEqual(po.items[0].qty, bo.items[0].ordered_qty)
# test the quantity
po1 = make_sales_order(bo.name)
po1.currency = get_company_currency(po1.company)
self.assertEqual(po1.items[0].qty, (bo.items[0].qty-bo.items[0].ordered_qty))
def make_blanket_order(**args):
args = frappe._dict(args)
bo = frappe.new_doc("Blanket Order")
bo.blanket_order_type = args.blanket_order_type
bo.company = args.company or "_Test Company"
if args.blanket_order_type == "Selling":
bo.customer = args.customer or "_Test Customer"
else:
bo.supplier = args.supplier or "_Test Supplier"
bo.from_date = today()
bo.to_date = add_months(bo.from_date, months=12)
bo.append("items", {
"item_code": args.item_code or "_Test Item",
"qty": args.quantity or 1000,
"rate": args.rate or 100
})
bo.insert()
bo.submit()
return bo | gpl-3.0 |
51reboot/actual_09_homework | 08/zhaoyong/cmdb/userdb.py | 1 | 5201 | #encoding: utf-8
import json
import gconf
import MySQLdb
from dbutils import execute_fetch_sql
from dbutils import execute_commit_sql
'''
获取用户信息
'''
def get_users():
_columns = ('id','username','password','age')
_sql = 'select * from user'
_count,_rt_list = execute_fetch_sql(_sql)
_rt = []
for _line in _rt_list:
_rt.append(dict(zip(_columns, _line)))
print _rt
return _rt
'''
保存用户信息
'''
def save_users(users):
fhandler = open(gconf.USER_FILE, 'wb')
fhandler.write(json.dumps(users))
fhandler.close()
'''
进行用户登录验证
True/False: 用户名和密码验证成功/用户名或密码错误
如果有一个用户的username&password 与输入相同则登录成功
如果所有用户的username&password 与输入不相同则登录失败
'''
def validate_login(username, password):
#_sql = 'select * from user where username="{username}" and password=md5("{password}")'.format(username=username,password=password)
_sql = 'select * from user where username=%s and password=md5(%s)'
_count,_rt_list = execute_fetch_sql(_sql,(username,password))
return _count != 0
'''
验证添加用户的信息
True/False, 描述信息
'''
def validate_add_user(username, password, age):
users = get_users()
for user in users:
if user.get('username') == username:
return False, u'用户名已经存在'
if len(password) < 6:
return False, u'密码长度至少为6位'
if not str(age).isdigit() or int(age) < 0 or int(age) > 100:
return False, u'年龄不正确'
return True, ''
'''
添加用户信息
'''
def add_user(username, password, age):
_sql = 'insert into user(username,password,age) values (%s,md5(%s),%s) '
_args = (username,password,age)
_count = execute_commit_sql(_sql,(username,password,age))
'''
获取用户信息
'''
def get_user(username):
users = get_users()
for user in users:
if user.get('username') == username:
return user
return None
def get_user_id(id,fetch=True):
_columns = ('id','username','password','age')
_sql = 'select * from user where id=%s'
_args = (id)
_count, _rt_list = execute_fetch_sql(_sql,_args)
_rt = []
for _line in _rt_list:
_rt.append(dict(zip(_columns, _line)))
return _rt
#get_user_id(19)
'''
验证用户更新
'''
def validate_update_user(username, password, age,*args):
if get_user(username) is None:
return False, u'用户信息不存在'
if len(password) < 6:
return False, u'密码长度至少为6位'
if not str(age).isdigit() or int(age) < 0 or int(age) > 100:
return False, u'年龄不正确'
return True, ''
'''
验证用户更新
'''
def validate_update_user_age(uid, user_age,*args):
if get_user_id(uid) is None:
return False, u'用户信息不存在'
if not str(user_age).isdigit() or int(user_age) <= 0 or int(user_age) > 100:
return False, u'年龄输入错误'
return True, ''
'''
更新用户信息
'''
def update_user(user_age,uid):
_sql = 'update user set age=%s where id=%s'
_args = (user_age,uid)
_count = execute_commit_sql(_sql,_args)
'''
验证用户
'''
def validate_delete_user(uid):
if get_user_id(uid) is None:
return False, u'用户信息不存在'
return True, ''
'''
删除用户信息
'''
def delete_user(uid):
_sql = 'delete from user where id=%s '
_args = (uid)
_count = execute_commit_sql(_sql,_args)
'''
验证用户信息
'''
def validate_charge_user_password(uid,user_password,username,manager_password):
if not validate_login(username,manager_password):
return False,u'管理员密码错误'
if get_user(username) is None:
return False, u'用户信息不存在'
if len(user_password) < 6:
return False, u'密码长度至少为6位'
return True,''
'''
修改用户密码
'''
def charge_user_password(uid,user_password):
_sql = 'update user set password=md5(%s) where id=%s'
_args = (user_password,uid)
_count = execute_commit_sql(_sql, _args)
'''
日志信息显示
'''
def accesslog(topn):
_columns = ('count','url','ip','code')
_sql = 'select * from accesslog limit %s'
_args = (topn)
_count, _rt_list = execute_fetch_sql(_sql,_args)
_rt = []
for _line in _rt_list:
_rt.append(dict(zip(_columns, _line)))
return _rt
if __name__ == '__main__':
print accesslog(1)
# update_user('aa','123456',88,18)
#get_userid("aa")
#print get_userid()
#print validate_login('kk', '123456')
#print validate_login('kk', '1234567')
#print validate_login('woniu', '123456')
#username = 'woniu1'
#password = '123456'
#age = '28'
#_is_ok, _error = validate_add_user(username, password, age)
#if _is_ok:
# add_user(username, password, age)
#else:
# print _error
#
#print delete_user('woniu2')
#print validate_update_user('woniu2', password, age)[1]
#print validate_update_user('kk', password, 'ac')[1]
#_is_ok, _error = validate_update_user('kk', password, 30)
#if _is_ok:
# update_user('kk', 'abcdef', 31)
| mit |
annoviko/pyclustering | pyclustering/core/tests/ut_package.py | 1 | 4514 | """!
@brief Unit-tests for pyclustering package that is used for exchange between ccore library and python code.
@authors Andrei Novikov (pyclustering@yandex.ru)
@date 2014-2020
@copyright BSD-3-Clause
"""
import unittest
import numpy
from pyclustering.core.pyclustering_package import package_builder, package_extractor
from ctypes import c_ulong, c_size_t, c_double, c_uint, c_float, c_char_p
class Test(unittest.TestCase):
def templatePackUnpack(self, dataset, c_type_data=None):
package_pointer = package_builder(dataset, c_type_data).create()
unpacked_package = package_extractor(package_pointer).extract()
packing_data = dataset
if isinstance(packing_data, numpy.ndarray):
packing_data = dataset.tolist()
if isinstance(packing_data, str):
self.assertEqual(dataset, unpacked_package)
else:
self.assertTrue(self.compare_containers(packing_data, unpacked_package))
def compare_containers(self, container1, container2):
def is_container(container):
return isinstance(container, list) or isinstance(container, tuple)
if len(container1) == 0 and len(container2) == 0:
return True
if len(container1) != len(container2):
return False
for index in range(len(container1)):
if is_container(container1[index]) and is_container(container2[index]):
return self.compare_containers(container1[index], container2[index])
elif is_container(container1[index]) == is_container(container2[index]):
if container1[index] != container2[index]:
return False
else:
return False
return True
def testListInteger(self):
self.templatePackUnpack([1, 2, 3, 4, 5])
def testListIntegerSingle(self):
self.templatePackUnpack([2])
def testListIntegerNegative(self):
self.templatePackUnpack([-1, -2, -10, -20])
def testListIntegerNegativeAndPositive(self):
self.templatePackUnpack([-1, 26, -10, -20, 13])
def testListFloat(self):
self.templatePackUnpack([1.1, 1.2, 1.3, 1.4, 1.5, 1.6])
def testListFloatNegativeAndPositive(self):
self.templatePackUnpack([1.1, -1.2, -1.3, -1.4, 1.5, -1.6])
def testListLong(self):
self.templatePackUnpack([100000000, 2000000000])
def testListEmpty(self):
self.templatePackUnpack([])
def testListOfListInteger(self):
self.templatePackUnpack([ [1, 2, 3], [4, 5, 6], [7, 8, 9] ])
def testListOfListDouble(self):
self.templatePackUnpack([ [1.1, 5.4], [1.3], [1.4, -9.4] ])
def testListOfListWithGaps(self):
self.templatePackUnpack([ [], [1, 2, 3], [], [4], [], [5, 6, 7] ])
def testListSpecifyUnsignedLong(self):
self.templatePackUnpack([1, 2, 3, 4, 5], c_ulong)
def testListSpecifyUnsignedSizeT(self):
self.templatePackUnpack([1, 2, 3, 4, 5], c_size_t)
def testListSpecifyDouble(self):
self.templatePackUnpack([1.1, 1.6, -7.8], c_double)
def testListOfListSpecifySizeT(self):
self.templatePackUnpack([ [1, 2, 3], [4, 5] ], c_size_t)
def testListOfListSpecifyUnsignedIntWithGaps(self):
self.templatePackUnpack([ [1, 2, 3], [], [4, 5], [], [] ], c_uint)
def testListOfListEmpty(self):
self.templatePackUnpack([ [], [], [] ])
def testListOfListOfListInteger(self):
self.templatePackUnpack([ [ [1], [2] ], [ [3], [4] ], [ [5, 6], [7, 8] ] ])
def testTupleInterger(self):
self.templatePackUnpack([ (1, 2, 3), (4, 5), (6, 7, 8, 9) ], c_uint)
def testTupleFloat(self):
self.templatePackUnpack([ (1.0, 2.0, 3.8), (4.6, 5.0), (6.8, 7.4, 8.5, 9.6) ], c_float)
def testTupleEmpty(self):
self.templatePackUnpack([(), (), ()])
def testNumpyMatrixOneColumn(self):
self.templatePackUnpack(numpy.array([[1.0], [2.0], [3.0]]), c_double)
def testNumpyMatrixTwoColumns(self):
self.templatePackUnpack(numpy.array([[1.0, 1.0], [2.0, 2.0]]), c_double)
def testNumpyMatrixThreeColumns(self):
self.templatePackUnpack(numpy.array([[1.1, 2.2, 3.3], [2.2, 3.3, 4.4], [3.3, 4.4, 5.5]]), c_double)
def testString(self):
self.templatePackUnpack("Test message number one".encode('utf-8'))
def testEmptyString(self):
self.templatePackUnpack("".encode('utf-8'))
| gpl-3.0 |
derdewey/mongrel2 | examples/bbs/client.py | 96 | 1033 | #!/usr/bin/env python
import sys
import socket
import json
from base64 import b64decode
try:
import json
except:
import simplejson as json
import getpass
host = sys.argv[1]
port = int(sys.argv[2])
def read_msg():
reply = ""
ch = CONN.recv(1)
while ch != '\0':
reply += ch
ch = CONN.recv(1)
return json.loads(b64decode(reply))
def post_msg(data):
msg = '@bbs %s\x00' % (json.dumps({'type': 'msg', 'msg': data}))
CONN.send(msg)
print "Connecting to %s:%d" % (host, port)
CONN = socket.socket()
CONN.connect((host, port))
USER = getpass.getuser()
post_msg("connect")
while True:
try:
reply = read_msg()
if 'msg' in reply and reply['msg']:
print reply['msg']
if reply['type'] == "prompt":
msg = raw_input(reply['pchar'])
post_msg(msg)
if reply['type'] == 'exit':
sys.exit(0)
except EOFError:
print "\nBye."
break
| bsd-3-clause |
annarev/tensorflow | tensorflow/python/framework/versions_test.py | 14 | 2056 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for exposed tensorflow versions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import versions
from tensorflow.python.platform import test
class VersionTest(test.TestCase):
def testVersion(self):
self.assertEqual(type(versions.__version__), str)
self.assertEqual(type(versions.VERSION), str)
# This pattern will need to grow as we include alpha, builds, etc.
self.assertRegex(versions.__version__, r'^\d+\.\d+\.(\d+(\-\w+)?|head)$')
self.assertRegex(versions.VERSION, r'^\d+\.\d+\.(\d+(\-\w+)?|head)$')
def testGraphDefVersion(self):
version = versions.GRAPH_DEF_VERSION
min_consumer = versions.GRAPH_DEF_VERSION_MIN_CONSUMER
min_producer = versions.GRAPH_DEF_VERSION_MIN_PRODUCER
for v in version, min_consumer, min_producer:
self.assertEqual(type(v), int)
self.assertLessEqual(0, min_consumer)
self.assertLessEqual(0, min_producer)
self.assertLessEqual(min_producer, version)
def testGitAndCompilerVersion(self):
self.assertEqual(type(versions.__git_version__), str)
self.assertEqual(type(versions.__compiler_version__), str)
self.assertEqual(type(versions.GIT_VERSION), str)
self.assertEqual(type(versions.COMPILER_VERSION), str)
if __name__ == '__main__':
test.main()
| apache-2.0 |
boudewijnrempt/kura | doc/script5.py | 1 | 3804 | import os.path, sys, string, codecs
from kuralib import kuraapp
from kuragui.guiconfig import guiConf
from kuragui import guiconfig
False = 0
True = 1
def splitCSVLine(line):
"""Splits a CSV-formatted line into a list.
See: http://www.colorstudy.com/software/webware/
"""
list = []
position = 0
fieldStart = 0
while 1:
if position >= len(line):
# This only happens when we have a trailing comma
list.append('')
return list
if line[position] == '"':
field = ""
position = position + 1
while 1:
end = string.find(line, '"', position)
if end == -1:
# This indicates a badly-formed CSV file, but
# we'll accept it anyway.
field = line[position:]
position = len(line)
break
if end + 1 < len(line) and line[end + 1] == '"':
field = "%s%s" % (field, line[position:end + 1])
position = end + 2
else:
field = "%s%s" % (field, line[position:end])
position = end + 2
break
else:
end = string.find(line, ",", position)
if end == -1:
list.append(line[position:end])
return list
field = line[position:end]
position = end + 1
list.append(field)
return list
def init():
if guiConf.backend == guiconfig.FILE:
kuraapp.initApp(guiConf.backend,
dbfile = os.path.join(guiConf.filepath,
guiConf.datastore))
elif guiConf.backend == guiconfig.SQL:
if guiConf.username != "":
try:
kuraapp.initApp(guiConf.backend,
username = str(guiConf.username),
database = str(guiConf.database),
password = str(guiConf.password),
hostname = str(guiConf.hostname))
except Exception, e:
print "Error connecting to database: %s" % e
sys.exit(1)
kuraapp.initCurrentEnvironment(guiConf.usernr,
guiConf.languagenr,
guiConf.projectnr)
def main(args):
if len(args) < 2:
print "Usage: python script5.py f1...fn"
sys.exit(1)
init()
for line in codecs.open(args[1], "r", "UTF-8"):
line = splitCSVLine(line)
print "Inserting %s" % line[0]
lexeme = kuraapp.app.createObject("lng_lex", fields={},
form = line[0],
glosse = line[1],
languagenr = guiConf.languagenr,
phonetic_form = line[3],
usernr = guiConf.usernr)
lexeme.insert()
tag = kuraapp.app.createObject("lng_lex_tag", fields={},
lexnr = lexeme.lexnr,
tag = "POS",
value = line[2],
usernr = guiConf.usernr)
tag.insert()
tag = kuraapp.app.createObject("lng_lex_tag",
lexnr = lexeme.lexnr,
tag = "FILE",
value = args[1],
usernr = guiConf.usernr)
tag.insert()
kuraapp.app.saveFile()
if __name__ == "__main__":
main(sys.argv)
| bsd-2-clause |
elric/virtaal-debian | virtaal/plugins/spellchecker.py | 1 | 13888 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010-2011 Zuza Software Foundation
#
# This file is part of Virtaal.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import logging
import os
import os.path
import re
from gettext import dgettext
import gobject
from virtaal.common import pan_app
from virtaal.controllers.baseplugin import BasePlugin
if not pan_app.DEBUG:
try:
import psyco
except:
psyco = None
else:
psyco = None
_dict_add_re = re.compile('Add "(.*)" to Dictionary')
class Plugin(BasePlugin):
"""A plugin to control spell checking.
It can also download spell checkers on Windows."""
display_name = _('Spell Checker')
description = _('Check spelling and provide suggestions')
version = 0.1
_base_URL = 'http://dictionary.locamotion.org/hunspell/'
_dict_URL = _base_URL + '%s.tar.bz2'
_lang_list = 'languages.txt'
# INITIALIZERS #
def __init__(self, internal_name, main_controller):
self.internal_name = internal_name
# If these imports fail, the plugin is automatically disabled
import gtkspell
import enchant
self.gtkspell = gtkspell
self.enchant = enchant
# languages that we've handled before:
self._seen_languages = {}
# languages supported by enchant:
self._enchant_languages = self.enchant.list_languages()
# HTTP clients (Windows only)
self.clients = {}
# downloadable languages (Windows only)
self.languages = set()
unit_view = main_controller.unit_controller.view
self.unit_view = unit_view
self._connect_id = self.unit_view.connect('textview-language-changed', self._on_unit_lang_changed)
self._textbox_ids = []
self._unitview_ids = []
# For some reason the i18n of gtkspell doesn't work on Windows, so we
# intervene. We also don't want the Languages submenu, so we remove it.
if unit_view.sources:
self._connect_to_textboxes(unit_view, unit_view.sources)
srclang = main_controller.lang_controller.source_lang.code
for textview in unit_view.sources:
self._on_unit_lang_changed(unit_view, textview, srclang)
else:
self._unitview_ids.append(unit_view.connect('sources-created', self._connect_to_textboxes))
if unit_view.targets:
self._connect_to_textboxes(unit_view, unit_view.targets)
tgtlang = main_controller.lang_controller.target_lang.code
for textview in unit_view.targets:
self._on_unit_lang_changed(unit_view, textview, tgtlang)
else:
self._unitview_ids.append(unit_view.connect('targets-created', self._connect_to_textboxes))
def destroy(self):
"""Remove signal connections and disable spell checking."""
for id in self._unitview_ids:
self.unit_view.disconnect(id)
for textbox, id in self._textbox_ids:
textbox.disconnect(id)
if getattr(self, '_connect_id', None):
self.unit_view.disconnect(self._connect_id)
for text_view in self.unit_view.sources + self.unit_view.targets:
self._disable_checking(text_view)
def _connect_to_textboxes(self, unitview, textboxes):
for textbox in textboxes:
self._textbox_ids.append((
textbox,
textbox.connect('populate-popup', self._on_populate_popup)
))
# METHODS #
def _build_client(self, url, clients_id, callback, error_callback=None):
from virtaal.support.httpclient import HTTPClient
client = HTTPClient()
client.set_virtaal_useragent()
self.clients[clients_id] = client
if logging.root.level != logging.DEBUG:
client.get(url, callback)
else:
def error_log(request, result):
logging.debug('Could not get %s: status %d' % (url, request.status))
client.get(url, callback, error_callback=error_log)
def _download_checker(self, language):
"""A Windows-only way to obtain new dictionaries."""
if 'APPDATA' not in os.environ:
# We won't have an idea of where to save it, so let's give up now
return
if language in self.clients:
# We already tried earlier, or started the process
return
if not self.languages:
if self._lang_list not in self.clients:
# We don't yet have a list of available languages
url = self._base_URL + self._lang_list #index page listing all the dictionaries
callback = lambda *args: self._process_index(language=language, *args)
self._build_client(url, self._lang_list, callback)
# self._process_index will call this again, so we can exit
return
language_to_download = None
# People almost definitely want 'en_US' for 'en', so let's ensure
# that we get that right:
if language == 'en':
language_to_download = 'en_US'
self.clients[language] = None
else:
# Let's see if a dictionary is available for this language:
for l in self.languages:
if l == language or l.startswith(language+'_'):
self.clients[language] = None
logging.debug("Will use %s to spell check %s", l, language)
language_to_download = l
break
else:
# No dictionary available
# Indicate that we never need to try this language:
logging.debug("Found no suitable language for spell checking")
self.clients[language] = None
return
# Now download the actual files after we have determined that it is
# available
callback = lambda *args: self._process_tarball(language=language, *args)
url = self._dict_URL % language_to_download
self._build_client(url, language, callback)
def _tar_ok(self, tar):
# TODO: Verify that the tarball is ok:
# - only two files
# - must be .aff and .dic
# - language codes should be sane
# - file sizes should be ok
# - no directory structure
return True
def _ensure_dir(self, dir):
if not os.path.isdir(dir):
os.makedirs(dir)
def _process_index(self, request, result, language=None):
"""Process the list of languages."""
if request.status == 200 and not self.languages:
self.languages = set(result.split())
self._download_checker(language)
else:
logging.debug("Couldn't get list of spell checkers")
#TODO: disable plugin
def _process_tarball(self, request, result, language=None):
# Indicate that we already tried and shouldn't try again later:
self.clients[language] = None
if request.status == 200:
logging.debug('Got a dictionary')
from cStringIO import StringIO
import tarfile
file_obj = StringIO(result)
tar = tarfile.open(fileobj=file_obj)
if not self._tar_ok(tar):
return
DICTDIR = os.path.join(os.environ['APPDATA'], 'enchant', 'myspell')
self._ensure_dir(DICTDIR)
tar.extractall(DICTDIR)
self._seen_languages.pop(language, None)
self._enchant_languages = self.enchant.list_languages()
self.unit_view.update_languages()
else:
logging.debug("Couldn't get a dictionary. Status code: %d" % (request.status))
def _disable_checking(self, text_view):
"""Disable checking on the given text_view."""
if getattr(text_view, 'spell_lang', 'xxxx') is None:
# No change necessary - already disabled
return
spell = None
try:
spell = self.gtkspell.get_from_text_view(text_view)
except SystemError, e:
# At least on Mandriva .get_from_text_view() sometimes returns
# a SystemError without a description. Things seem to work fine
# anyway, so let's ignore it and hope for the best.
pass
if not spell is None:
spell.detach()
text_view.spell_lang = None
if psyco:
psyco.cannotcompile(_disable_checking)
# SIGNAL HANDLERS #
def _on_unit_lang_changed(self, unit_view, text_view, language):
if not self.gtkspell:
return
if language == 'en':
language = 'en_US'
if not language in self._seen_languages and not self.enchant.dict_exists(language):
# Sometimes enchants *wants* a country code, other times it does not.
# For the cases where it requires one, we look for the first language
# code that enchant supports and use that one.
for code in self._enchant_languages:
if code.startswith(language+'_'):
self._seen_languages[language] = code
language = code
break
else:
#logging.debug('No code in enchant.list_languages() that starts with "%s"' % (language))
# If we are on Windows, let's try to download a spell checker:
if os.name == 'nt':
self._download_checker(language)
# If we get it, it will only be activated asynchronously
# later
#TODO: packagekit on Linux?
# We couldn't find a dictionary for "language", so we should make sure that we don't
# have a spell checker for a different language on the text view. See bug 717.
self._disable_checking(text_view)
self._seen_languages[language] = None
return
language = self._seen_languages.get(language, language)
if language is None:
self._disable_checking(text_view)
return
if getattr(text_view, 'spell_lang', None) == language:
# No change necessary - already enabled
return
gobject.idle_add(self._activate_checker, text_view, language, priority=gobject.PRIORITY_LOW)
def _activate_checker(self, text_view, language):
# All the expensive stuff in here called on idle. We mush also isolate
# this away from psyco
try:
spell = None
try:
spell = self.gtkspell.get_from_text_view(text_view)
except SystemError, e:
# At least on Mandriva .get_from_text_view() sometimes returns
# a SystemError without a description. Things seem to work fine
# anyway, so let's ignore it and hope for the best.
pass
if spell is None:
spell = self.gtkspell.Spell(text_view, language)
else:
spell.set_language(language)
spell.recheck_all()
text_view.spell_lang = language
except Exception, e:
logging.exception("Could not initialize spell checking", e)
self.gtkspell = None
#TODO: unload plugin
if psyco:
# Some of the gtkspell stuff can't work with psyco and will dump core
# if we don't avoid psyco compilation
psyco.cannotcompile(_activate_checker)
def _on_populate_popup(self, textbox, menu):
# We can't work with the menu immediately, since gtkspell only adds its
# entries in the event handler.
gobject.idle_add(self._fix_menu, menu)
def _fix_menu(self, menu):
_entries_above_separator = False
_now_remove_separator = False
for item in menu:
if item.get_name() == 'GtkSeparatorMenuItem':
if not _entries_above_separator:
menu.remove(item)
break
label = item.get_property('label')
# For some reason the i18n of gtkspell doesn't work on Windows, so
# we intervene.
if label == "<i>(no suggestions)</i>":
#l10n: This refers to spell checking
item.set_property('label', _("<i>(no suggestions)</i>"))
if label == "Ignore All":
#l10n: This refers to spell checking
item.set_property('label', _("Ignore All"))
if label == "More...":
#l10n: This refers to spelling suggestions
item.set_property('label', _("More..."))
m = _dict_add_re.match(label)
if m:
word = m.group(1)
#l10n: This refers to the spell checking dictionary
item.set_property('label', _('Add "%s" to Dictionary') % word)
# We don't want a language selector - we have our own
if label in dgettext('gtkspell', 'Languages'):
menu.remove(item)
if not _entries_above_separator:
_now_remove_separator = True
continue
_entries_above_separator = True
| gpl-2.0 |
endlessm/chromium-browser | third_party/catapult/dashboard/dashboard/common/request_handler.py | 1 | 4410 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Simple Request handler using Jinja2 templates."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import logging
import os
import jinja2
import webapp2
from google.appengine.api import users
from dashboard.common import utils
from dashboard.common import xsrf
_DASHBOARD_PYTHON_DIR = os.path.dirname(os.path.dirname(__file__))
JINJA2_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(
[os.path.join(_DASHBOARD_PYTHON_DIR, 'templates')]),
# Security team suggests that autoescaping be enabled.
autoescape=True,
extensions=['jinja2.ext.autoescape'])
class RequestHandler(webapp2.RequestHandler):
"""Base class for requests. Does common template and error handling tasks."""
def RenderHtml(self, template_file, template_values, status=200):
"""Renders HTML given template and values.
Args:
template_file: string. File name under templates directory.
template_values: dict. Mapping of template variables to corresponding.
values.
status: int. HTTP status code.
"""
self.response.set_status(status)
template = JINJA2_ENVIRONMENT.get_template(template_file)
self.GetDynamicVariables(template_values)
self.response.out.write(template.render(template_values))
def RenderStaticHtml(self, filename):
filename = os.path.join(_DASHBOARD_PYTHON_DIR, 'static', filename)
contents = open(filename, 'r')
self.response.out.write(contents.read())
contents.close()
def GetDynamicVariables(self, template_values, request_path=None):
"""Gets the values that vary for every page.
Args:
template_values: dict of name/value pairs.
request_path: path for login urls, None if using the current path.
"""
user_info = ''
xsrf_token = ''
user = users.get_current_user()
display_username = 'Sign in'
title = 'Sign in to an account'
is_admin = False
if user:
display_username = user.email()
title = 'Switch user'
xsrf_token = xsrf.GenerateToken(user)
is_admin = users.is_current_user_admin()
try:
login_url = users.create_login_url(request_path or self.request.path_qs)
except users.RedirectTooLongError:
# On the bug filing pages, the full login URL can be too long. Drop
# the correct redirect URL, since the user should already be logged in at
# this point anyway.
login_url = users.create_login_url('/')
user_info = '<a href="%s" title="%s">%s</a>' % (
login_url, title, display_username)
# Force out of passive login, as it creates multilogin issues.
login_url = login_url.replace('passive=true', 'passive=false')
template_values['login_url'] = login_url
template_values['display_username'] = display_username
template_values['user_info'] = user_info
template_values['is_admin'] = is_admin
template_values['is_internal_user'] = utils.IsInternalUser()
template_values['xsrf_token'] = xsrf_token
template_values['xsrf_input'] = (
'<input type="hidden" name="xsrf_token" value="%s">' % xsrf_token)
template_values['login_url'] = login_url
return template_values
def ReportError(self, error_message, status=500):
"""Reports the given error to the client and logs the error.
Args:
error_message: The message to log and send to the client.
status: The HTTP response code to use.
"""
logging.error('Reporting error: %r', error_message)
self.response.set_status(status)
self.response.out.write('%s\nrequest_id:%s\n' %
(error_message, utils.GetRequestId()))
def ReportWarning(self, warning_message, status=200):
"""Reports a warning to the client and logs the warning.
Args:
warning_message: The warning message to log (as an error).
status: The http response code to use.
"""
logging.warning('Reporting warning: %r', warning_message)
self.response.set_status(status)
self.response.out.write('%s\nrequest_id:%s\n' %
(warning_message, utils.GetRequestId()))
class InvalidInputError(Exception):
"""An error class for invalid user input query parameter values."""
pass
| bsd-3-clause |
ikben/troposphere | troposphere/cloudtrail.py | 4 | 1110 | from . import AWSObject, Tags, AWSProperty
from .validators import boolean
class DataResource(AWSProperty):
props = {
'Type': (basestring, True),
'Values': ([basestring], False),
}
class EventSelector(AWSProperty):
props = {
'DataResources': ([DataResource], False),
'IncludeManagementEvents': (boolean, False),
'ReadWriteType': (basestring, False),
}
class Trail(AWSObject):
resource_type = "AWS::CloudTrail::Trail"
props = {
'CloudWatchLogsLogGroupArn': (basestring, False),
'CloudWatchLogsRoleArn': (basestring, False),
'EnableLogFileValidation': (boolean, False),
'EventSelectors': ([EventSelector], False),
'IncludeGlobalServiceEvents': (boolean, False),
'IsLogging': (boolean, True),
'IsMultiRegionTrail': (boolean, False),
'KMSKeyId': (basestring, False),
'S3BucketName': (basestring, True),
'S3KeyPrefix': (basestring, False),
'SnsTopicName': (basestring, False),
'Tags': (Tags, False),
'TrailName': (basestring, False),
}
| bsd-2-clause |
simbs/edx-platform | common/djangoapps/util/views.py | 1 | 13657 | import json
import logging
import sys
from functools import wraps
from django.conf import settings
from django.core.cache import caches
from django.core.validators import ValidationError, validate_email
from django.views.decorators.csrf import requires_csrf_token
from django.views.defaults import server_error
from django.http import (Http404, HttpResponse, HttpResponseNotAllowed,
HttpResponseServerError)
import dogstats_wrapper as dog_stats_api
from edxmako.shortcuts import render_to_response
import zendesk
from microsite_configuration import microsite
import calc
import track.views
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
log = logging.getLogger(__name__)
def ensure_valid_course_key(view_func):
"""
This decorator should only be used with views which have argument course_key_string (studio) or course_id (lms).
If course_key_string (studio) or course_id (lms) is not valid raise 404.
"""
@wraps(view_func)
def inner(request, *args, **kwargs):
course_key = kwargs.get('course_key_string') or kwargs.get('course_id')
if course_key is not None:
try:
CourseKey.from_string(course_key)
except InvalidKeyError:
raise Http404
response = view_func(request, *args, **kwargs)
return response
return inner
@requires_csrf_token
def jsonable_server_error(request, template_name='500.html'):
"""
500 error handler that serves JSON on an AJAX request, and proxies
to the Django default `server_error` view otherwise.
"""
if request.is_ajax():
msg = {"error": "The edX servers encountered an error"}
return HttpResponseServerError(json.dumps(msg))
else:
return server_error(request, template_name=template_name)
def handle_500(template_path, context=None, test_func=None):
"""
Decorator for view specific 500 error handling.
Custom handling will be skipped only if test_func is passed and it returns False
Usage:
@handle_500(
template_path='certificates/server-error.html',
context={'error-info': 'Internal Server Error'},
test_func=lambda request: request.GET.get('preview', None)
)
def my_view(request):
# Any unhandled exception in this view would be handled by the handle_500 decorator
# ...
"""
def decorator(func):
"""
Decorator to render custom html template in case of uncaught exception in wrapped function
"""
@wraps(func)
def inner(request, *args, **kwargs):
"""
Execute the function in try..except block and return custom server-error page in case of unhandled exception
"""
try:
return func(request, *args, **kwargs)
except Exception: # pylint: disable=broad-except
if settings.DEBUG:
# In debug mode let django process the 500 errors and display debug info for the developer
raise
elif test_func is None or test_func(request):
# Display custom 500 page if either
# 1. test_func is None (meaning nothing to test)
# 2. or test_func(request) returns True
log.exception("Error in django view.")
return render_to_response(template_path, context)
else:
# Do not show custom 500 error when test fails
raise
return inner
return decorator
def calculate(request):
''' Calculator in footer of every page. '''
equation = request.GET['equation']
try:
result = calc.evaluator({}, {}, equation)
except:
event = {'error': map(str, sys.exc_info()),
'equation': equation}
track.views.server_track(request, 'error:calc', event, page='calc')
return HttpResponse(json.dumps({'result': 'Invalid syntax'}))
return HttpResponse(json.dumps({'result': str(result)}))
class _ZendeskApi(object):
CACHE_PREFIX = 'ZENDESK_API_CACHE'
CACHE_TIMEOUT = 60 * 60
def __init__(self):
"""
Instantiate the Zendesk API.
All of `ZENDESK_URL`, `ZENDESK_USER`, and `ZENDESK_API_KEY` must be set
in `django.conf.settings`.
"""
self._zendesk_instance = zendesk.Zendesk(
settings.ZENDESK_URL,
settings.ZENDESK_USER,
settings.ZENDESK_API_KEY,
use_api_token=True,
api_version=2,
# As of 2012-05-08, Zendesk is using a CA that is not
# installed on our servers
client_args={"disable_ssl_certificate_validation": True}
)
def create_ticket(self, ticket):
"""
Create the given `ticket` in Zendesk.
The ticket should have the format specified by the zendesk package.
"""
ticket_url = self._zendesk_instance.create_ticket(data=ticket)
return zendesk.get_id_from_url(ticket_url)
def update_ticket(self, ticket_id, update):
"""
Update the Zendesk ticket with id `ticket_id` using the given `update`.
The update should have the format specified by the zendesk package.
"""
self._zendesk_instance.update_ticket(ticket_id=ticket_id, data=update)
def get_group(self, name):
"""
Find the Zendesk group named `name`. Groups are cached for
CACHE_TIMEOUT seconds.
If a matching group exists, it is returned as a dictionary
with the format specifed by the zendesk package.
Otherwise, returns None.
"""
cache = caches['default']
cache_key = '{prefix}_group_{name}'.format(prefix=self.CACHE_PREFIX, name=name)
cached = cache.get(cache_key)
if cached:
return cached
groups = self._zendesk_instance.list_groups()['groups']
for group in groups:
if group['name'] == name:
cache.set(cache_key, group, self.CACHE_TIMEOUT)
return group
return None
def _record_feedback_in_zendesk(
realname,
email,
subject,
details,
tags,
additional_info,
group_name=None,
require_update=False
):
"""
Create a new user-requested Zendesk ticket.
Once created, the ticket will be updated with a private comment containing
additional information from the browser and server, such as HTTP headers
and user state. Returns a boolean value indicating whether ticket creation
was successful, regardless of whether the private comment update succeeded.
If `group_name` is provided, attaches the ticket to the matching Zendesk group.
If `require_update` is provided, returns False when the update does not
succeed. This allows using the private comment to add necessary information
which the user will not see in followup emails from support.
"""
zendesk_api = _ZendeskApi()
additional_info_string = (
u"Additional information:\n\n" +
u"\n".join(u"%s: %s" % (key, value) for (key, value) in additional_info.items() if value is not None)
)
# Tag all issues with LMS to distinguish channel in Zendesk; requested by student support team
zendesk_tags = list(tags.values()) + ["LMS"]
# Per edX support, we would like to be able to route white label feedback items
# via tagging
white_label_org = microsite.get_value('course_org_filter')
if white_label_org:
zendesk_tags = zendesk_tags + ["whitelabel_{org}".format(org=white_label_org)]
new_ticket = {
"ticket": {
"requester": {"name": realname, "email": email},
"subject": subject,
"comment": {"body": details},
"tags": zendesk_tags
}
}
group = None
if group_name is not None:
group = zendesk_api.get_group(group_name)
if group is not None:
new_ticket['ticket']['group_id'] = group['id']
try:
ticket_id = zendesk_api.create_ticket(new_ticket)
if group is None:
# Support uses Zendesk groups to track tickets. In case we
# haven't been able to correctly group this ticket, log its ID
# so it can be found later.
log.warning('Unable to find group named %s for Zendesk ticket with ID %s.', group_name, ticket_id)
except zendesk.ZendeskError:
log.exception("Error creating Zendesk ticket")
return False
# Additional information is provided as a private update so the information
# is not visible to the user.
ticket_update = {"ticket": {"comment": {"public": False, "body": additional_info_string}}}
try:
zendesk_api.update_ticket(ticket_id, ticket_update)
except zendesk.ZendeskError:
log.exception("Error updating Zendesk ticket with ID %s.", ticket_id)
# The update is not strictly necessary, so do not indicate
# failure to the user unless it has been requested with
# `require_update`.
if require_update:
return False
return True
DATADOG_FEEDBACK_METRIC = "lms_feedback_submissions"
def _record_feedback_in_datadog(tags):
datadog_tags = [u"{k}:{v}".format(k=k, v=v) for k, v in tags.items()]
dog_stats_api.increment(DATADOG_FEEDBACK_METRIC, tags=datadog_tags)
def submit_feedback(request):
"""
Create a new user-requested ticket, currently implemented with Zendesk.
If feedback submission is not enabled, any request will raise `Http404`.
If any configuration parameter (`ZENDESK_URL`, `ZENDESK_USER`, or
`ZENDESK_API_KEY`) is missing, any request will raise an `Exception`.
The request must be a POST request specifying `subject` and `details`.
If the user is not authenticated, the request must also specify `name` and
`email`. If the user is authenticated, the `name` and `email` will be
populated from the user's information. If any required parameter is
missing, a 400 error will be returned indicating which field is missing and
providing an error message. If Zendesk ticket creation fails, 500 error
will be returned with no body; if ticket creation succeeds, an empty
successful response (200) will be returned.
"""
if not settings.FEATURES.get('ENABLE_FEEDBACK_SUBMISSION', False):
raise Http404()
if request.method != "POST":
return HttpResponseNotAllowed(["POST"])
if (
not settings.ZENDESK_URL or
not settings.ZENDESK_USER or
not settings.ZENDESK_API_KEY
):
raise Exception("Zendesk enabled but not configured")
def build_error_response(status_code, field, err_msg):
return HttpResponse(json.dumps({"field": field, "error": err_msg}), status=status_code)
additional_info = {}
required_fields = ["subject", "details"]
if not request.user.is_authenticated():
required_fields += ["name", "email"]
required_field_errs = {
"subject": "Please provide a subject.",
"details": "Please provide details.",
"name": "Please provide your name.",
"email": "Please provide a valid e-mail.",
}
for field in required_fields:
if field not in request.POST or not request.POST[field]:
return build_error_response(400, field, required_field_errs[field])
subject = request.POST["subject"]
details = request.POST["details"]
tags = dict(
[(tag, request.POST[tag]) for tag in ["issue_type", "course_id"] if tag in request.POST]
)
if request.user.is_authenticated():
realname = request.user.profile.name
email = request.user.email
additional_info["username"] = request.user.username
else:
realname = request.POST["name"]
email = request.POST["email"]
try:
validate_email(email)
except ValidationError:
return build_error_response(400, "email", required_field_errs["email"])
for header, pretty in [
("HTTP_REFERER", "Page"),
("HTTP_USER_AGENT", "Browser"),
("REMOTE_ADDR", "Client IP"),
("SERVER_NAME", "Host")
]:
additional_info[pretty] = request.META.get(header)
success = _record_feedback_in_zendesk(realname, email, subject, details, tags, additional_info)
_record_feedback_in_datadog(tags)
return HttpResponse(status=(200 if success else 500))
def info(request):
''' Info page (link from main header) '''
return render_to_response("info.html", {})
# From http://djangosnippets.org/snippets/1042/
def parse_accept_header(accept):
"""Parse the Accept header *accept*, returning a list with pairs of
(media_type, q_value), ordered by q values.
"""
result = []
for media_range in accept.split(","):
parts = media_range.split(";")
media_type = parts.pop(0)
media_params = []
q = 1.0
for part in parts:
(key, value) = part.lstrip().split("=", 1)
if key == "q":
q = float(value)
else:
media_params.append((key, value))
result.append((media_type, tuple(media_params), q))
result.sort(lambda x, y: -cmp(x[2], y[2]))
return result
def accepts(request, media_type):
"""Return whether this request has an Accept header that matches type"""
accept = parse_accept_header(request.META.get("HTTP_ACCEPT", ""))
return media_type in [t for (t, p, q) in accept]
| agpl-3.0 |
mmrose/pygal | pygal/test/test_colors.py | 6 | 22024 | # -*- coding: utf-8 -*-
# This file is part of pygal
#
# A python svg graph plotting library
# Copyright © 2012-2015 Kozea
#
# This library is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pygal. If not, see <http://www.gnu.org/licenses/>.
"""Color utility functions tests"""
from __future__ import division
from pygal.colors import (
parse_color, unparse_color,
rgb_to_hsl, hsl_to_rgb, darken, lighten, saturate, desaturate, rotate)
def test_parse_color():
"""Test color parse function"""
assert parse_color('#123') == (17, 34, 51, 1., '#rgb')
assert parse_color('#cdf') == (204, 221, 255, 1., '#rgb')
assert parse_color('#a3d7') == (170, 51, 221, 119 / 255, '#rgba')
assert parse_color('#584b4f') == (88, 75, 79, 1., '#rrggbb')
assert parse_color('#8cbe22') == (140, 190, 34, 1., '#rrggbb')
assert parse_color('#16cbf055') == (22, 203, 240, 1 / 3, '#rrggbbaa')
assert parse_color('rgb(134, 67, 216)') == (134, 67, 216, 1., 'rgb')
assert parse_color('rgb(0, 111, 222)') == (0, 111, 222, 1., 'rgb')
assert parse_color('rgba(237, 83, 48, .8)') == (237, 83, 48, .8, 'rgba')
assert parse_color('rgba(0, 1, 0, 0.1223)') == (0, 1, 0, .1223, 'rgba')
def test_unparse_color():
"""Test color unparse function"""
assert unparse_color(17, 34, 51, 1., '#rgb') == '#123'
assert unparse_color(204, 221, 255, 1., '#rgb') == '#cdf'
assert unparse_color(170, 51, 221, 119 / 255, '#rgba') == '#a3d7'
assert unparse_color(88, 75, 79, 1., '#rrggbb') == '#584b4f'
assert unparse_color(140, 190, 34, 1., '#rrggbb') == '#8cbe22'
assert unparse_color(22, 203, 240, 1 / 3, '#rrggbbaa') == '#16cbf055'
assert unparse_color(134, 67, 216, 1., 'rgb') == 'rgb(134, 67, 216)'
assert unparse_color(0, 111, 222, 1., 'rgb') == 'rgb(0, 111, 222)'
assert unparse_color(237, 83, 48, .8, 'rgba') == 'rgba(237, 83, 48, 0.8)'
assert unparse_color(0, 1, 0, .1223, 'rgba') == 'rgba(0, 1, 0, 0.1223)'
def test_darken():
"""Test darken color function"""
assert darken('#800', 20) == '#200'
assert darken('#800e', 20) == '#200e'
assert darken('#800', 0) == '#800'
assert darken('#ffffff', 10) == '#e6e6e6'
assert darken('#000000', 10) == '#000000'
assert darken('#f3148a', 25) == '#810747'
assert darken('#f3148aab', 25) == '#810747ab'
assert darken('#121212', 1) == '#0f0f0f'
assert darken('#999999', 100) == '#000000'
assert darken('#99999999', 100) == '#00000099'
assert darken('#1479ac', 8) == '#105f87'
assert darken('rgb(136, 0, 0)', 20) == 'rgb(34, 0, 0)'
assert darken('rgba(20, 121, 172, .13)', 8) == 'rgba(16, 95, 135, 0.13)'
def test_lighten():
"""Test lighten color function"""
assert lighten('#800', 20) == '#e00'
assert lighten('#800', 0) == '#800'
assert lighten('#ffffff', 10) == '#ffffff'
assert lighten('#000000', 10) == '#1a1a1a'
assert lighten('#f3148a', 25) == '#f98dc6'
assert lighten('#121212', 1) == '#151515'
assert lighten('#999999', 100) == '#ffffff'
assert lighten('#1479ac', 8) == '#1893d1'
def test_saturate():
"""Test color saturation function"""
assert saturate('#000', 20) == '#000'
assert saturate('#fff', 20) == '#fff'
assert saturate('#8a8', 100) == '#3f3'
assert saturate('#855', 20) == '#9e3f3f'
def test_desaturate():
"""Test color desaturation function"""
assert desaturate('#000', 20) == '#000'
assert desaturate('#fff', 20) == '#fff'
assert desaturate('#8a8', 100) == '#999'
assert desaturate('#855', 20) == '#726b6b'
def test_rotate():
"""Test color rotation function"""
assert rotate('#000', 45) == '#000'
assert rotate('#fff', 45) == '#fff'
assert rotate('#811', 45) == '#886a11'
assert rotate('#8a8', 360) == '#8a8'
assert rotate('#8a8', 0) == '#8a8'
assert rotate('#8a8', -360) == '#8a8'
def test_hsl_to_rgb_part_0():
"""Test hsl to rgb color function"""
assert hsl_to_rgb(0, 100, 50) == (255, 0, 0)
assert hsl_to_rgb(60, 100, 50) == (255, 255, 0)
assert hsl_to_rgb(120, 100, 50) == (0, 255, 0)
assert hsl_to_rgb(180, 100, 50) == (0, 255, 255)
assert hsl_to_rgb(240, 100, 50) == (0, 0, 255)
assert hsl_to_rgb(300, 100, 50) == (255, 0, 255)
def test_rgb_to_hsl_part_0():
"""Test rgb to hsl color function"""
assert rgb_to_hsl(255, 0, 0) == (0, 100, 50)
assert rgb_to_hsl(255, 255, 0) == (60, 100, 50)
assert rgb_to_hsl(0, 255, 0) == (120, 100, 50)
assert rgb_to_hsl(0, 255, 255) == (180, 100, 50)
assert rgb_to_hsl(0, 0, 255) == (240, 100, 50)
assert rgb_to_hsl(255, 0, 255) == (300, 100, 50)
def test_hsl_to_rgb_part_1():
"""Test hsl to rgb color function"""
assert hsl_to_rgb(-360, 100, 50) == (255, 0, 0)
assert hsl_to_rgb(-300, 100, 50) == (255, 255, 0)
assert hsl_to_rgb(-240, 100, 50) == (0, 255, 0)
assert hsl_to_rgb(-180, 100, 50) == (0, 255, 255)
assert hsl_to_rgb(-120, 100, 50) == (0, 0, 255)
assert hsl_to_rgb(-60, 100, 50) == (255, 0, 255)
def test_rgb_to_hsl_part_1():
"""Test rgb to hsl color function"""
# assert rgb_to_hsl(255, 0, 0) == (-360, 100, 50)
# assert rgb_to_hsl(255, 255, 0) == (-300, 100, 50)
# assert rgb_to_hsl(0, 255, 0) == (-240, 100, 50)
# assert rgb_to_hsl(0, 255, 255) == (-180, 100, 50)
# assert rgb_to_hsl(0, 0, 255) == (-120, 100, 50)
# assert rgb_to_hsl(255, 0, 255) == (-60, 100, 50)
pass
def test_hsl_to_rgb_part_2():
"""Test hsl to rgb color function"""
assert hsl_to_rgb(360, 100, 50) == (255, 0, 0)
assert hsl_to_rgb(420, 100, 50) == (255, 255, 0)
assert hsl_to_rgb(480, 100, 50) == (0, 255, 0)
assert hsl_to_rgb(540, 100, 50) == (0, 255, 255)
assert hsl_to_rgb(600, 100, 50) == (0, 0, 255)
assert hsl_to_rgb(660, 100, 50) == (255, 0, 255)
def test_rgb_to_hsl_part_2():
"""Test rgb to hsl color function"""
# assert rgb_to_hsl(255, 0, 0) == (360, 100, 50)
# assert rgb_to_hsl(255, 255, 0) == (420, 100, 50)
# assert rgb_to_hsl(0, 255, 0) == (480, 100, 50)
# assert rgb_to_hsl(0, 255, 255) == (540, 100, 50)
# assert rgb_to_hsl(0, 0, 255) == (600, 100, 50)
# assert rgb_to_hsl(255, 0, 255) == (660, 100, 50)
pass
def test_hsl_to_rgb_part_3():
"""Test hsl to rgb color function"""
assert hsl_to_rgb(6120, 100, 50) == (255, 0, 0)
assert hsl_to_rgb(-9660, 100, 50) == (255, 255, 0)
assert hsl_to_rgb(99840, 100, 50) == (0, 255, 0)
assert hsl_to_rgb(-900, 100, 50) == (0, 255, 255)
assert hsl_to_rgb(-104880, 100, 50) == (0, 0, 255)
assert hsl_to_rgb(2820, 100, 50) == (255, 0, 255)
def test_rgb_to_hsl_part_3():
"""Test rgb to hsl color function"""
# assert rgb_to_hsl(255, 0, 0) == (6120, 100, 50)
# assert rgb_to_hsl(255, 255, 0) == (-9660, 100, 50)
# assert rgb_to_hsl(0, 255, 0) == (99840, 100, 50)
# assert rgb_to_hsl(0, 255, 255) == (-900, 100, 50)
# assert rgb_to_hsl(0, 0, 255) == (-104880, 100, 50)
# assert rgb_to_hsl(255, 0, 255) == (2820, 100, 50)
pass
def test_hsl_to_rgb_part_4():
"""Test hsl to rgb color function"""
assert hsl_to_rgb(0, 100, 50) == (255, 0, 0)
assert hsl_to_rgb(12, 100, 50) == (255, 51, 0)
assert hsl_to_rgb(24, 100, 50) == (255, 102, 0)
assert hsl_to_rgb(36, 100, 50) == (255, 153, 0)
assert hsl_to_rgb(48, 100, 50) == (255, 204, 0)
assert hsl_to_rgb(60, 100, 50) == (255, 255, 0)
assert hsl_to_rgb(72, 100, 50) == (204, 255, 0)
assert hsl_to_rgb(84, 100, 50) == (153, 255, 0)
assert hsl_to_rgb(96, 100, 50) == (102, 255, 0)
assert hsl_to_rgb(108, 100, 50) == (51, 255, 0)
assert hsl_to_rgb(120, 100, 50) == (0, 255, 0)
def test_rgb_to_hsl_part_4():
"""Test rgb to hsl color function"""
assert rgb_to_hsl(255, 0, 0) == (0, 100, 50)
assert rgb_to_hsl(255, 51, 0) == (12, 100, 50)
assert rgb_to_hsl(255, 102, 0) == (24, 100, 50)
assert rgb_to_hsl(255, 153, 0) == (36, 100, 50)
assert rgb_to_hsl(255, 204, 0) == (48, 100, 50)
assert rgb_to_hsl(255, 255, 0) == (60, 100, 50)
assert rgb_to_hsl(204, 255, 0) == (72, 100, 50)
assert rgb_to_hsl(153, 255, 0) == (84, 100, 50)
assert rgb_to_hsl(102, 255, 0) == (96, 100, 50)
assert rgb_to_hsl(51, 255, 0) == (108, 100, 50)
assert rgb_to_hsl(0, 255, 0) == (120, 100, 50)
def test_hsl_to_rgb_part_5():
"""Test hsl to rgb color function"""
assert hsl_to_rgb(120, 100, 50) == (0, 255, 0)
assert hsl_to_rgb(132, 100, 50) == (0, 255, 51)
assert hsl_to_rgb(144, 100, 50) == (0, 255, 102)
assert hsl_to_rgb(156, 100, 50) == (0, 255, 153)
assert hsl_to_rgb(168, 100, 50) == (0, 255, 204)
assert hsl_to_rgb(180, 100, 50) == (0, 255, 255)
assert hsl_to_rgb(192, 100, 50) == (0, 204, 255)
assert hsl_to_rgb(204, 100, 50) == (0, 153, 255)
assert hsl_to_rgb(216, 100, 50) == (0, 102, 255)
assert hsl_to_rgb(228, 100, 50) == (0, 51, 255)
assert hsl_to_rgb(240, 100, 50) == (0, 0, 255)
def test_rgb_to_hsl_part_5():
"""Test rgb to hsl color function"""
assert rgb_to_hsl(0, 255, 0) == (120, 100, 50)
assert rgb_to_hsl(0, 255, 51) == (132, 100, 50)
assert rgb_to_hsl(0, 255, 102) == (144, 100, 50)
assert rgb_to_hsl(0, 255, 153) == (156, 100, 50)
assert rgb_to_hsl(0, 255, 204) == (168, 100, 50)
assert rgb_to_hsl(0, 255, 255) == (180, 100, 50)
assert rgb_to_hsl(0, 204, 255) == (192, 100, 50)
assert rgb_to_hsl(0, 153, 255) == (204, 100, 50)
assert rgb_to_hsl(0, 102, 255) == (216, 100, 50)
assert rgb_to_hsl(0, 51, 255) == (228, 100, 50)
assert rgb_to_hsl(0, 0, 255) == (240, 100, 50)
def test_hsl_to_rgb_part_6():
"""Test hsl to rgb color function"""
assert hsl_to_rgb(240, 100, 50) == (0, 0, 255)
assert hsl_to_rgb(252, 100, 50) == (51, 0, 255)
assert hsl_to_rgb(264, 100, 50) == (102, 0, 255)
assert hsl_to_rgb(276, 100, 50) == (153, 0, 255)
assert hsl_to_rgb(288, 100, 50) == (204, 0, 255)
assert hsl_to_rgb(300, 100, 50) == (255, 0, 255)
assert hsl_to_rgb(312, 100, 50) == (255, 0, 204)
assert hsl_to_rgb(324, 100, 50) == (255, 0, 153)
assert hsl_to_rgb(336, 100, 50) == (255, 0, 102)
assert hsl_to_rgb(348, 100, 50) == (255, 0, 51)
assert hsl_to_rgb(360, 100, 50) == (255, 0, 0)
def test_rgb_to_hsl_part_6():
"""Test rgb to hsl color function"""
assert rgb_to_hsl(0, 0, 255) == (240, 100, 50)
assert rgb_to_hsl(51, 0, 255) == (252, 100, 50)
assert rgb_to_hsl(102, 0, 255) == (264, 100, 50)
assert rgb_to_hsl(153, 0, 255) == (276, 100, 50)
assert rgb_to_hsl(204, 0, 255) == (288, 100, 50)
assert rgb_to_hsl(255, 0, 255) == (300, 100, 50)
assert rgb_to_hsl(255, 0, 204) == (312, 100, 50)
assert rgb_to_hsl(255, 0, 153) == (324, 100, 50)
assert rgb_to_hsl(255, 0, 102) == (336, 100, 50)
assert rgb_to_hsl(255, 0, 51) == (348, 100, 50)
# assert rgb_to_hsl(255, 0, 0) == (360, 100, 50)
def test_hsl_to_rgb_part_7():
"""Test hsl to rgb color function"""
assert hsl_to_rgb(0, 20, 50) == (153, 102, 102)
assert hsl_to_rgb(0, 60, 50) == (204, 51, 51)
assert hsl_to_rgb(0, 100, 50) == (255, 0, 0)
def test_rgb_to_hsl_part_7():
"""Test rgb to hsl color function"""
assert rgb_to_hsl(153, 102, 102) == (0, 20, 50)
assert rgb_to_hsl(204, 51, 51) == (0, 60, 50)
assert rgb_to_hsl(255, 0, 0) == (0, 100, 50)
def test_hsl_to_rgb_part_8():
"""Test hsl to rgb color function"""
assert hsl_to_rgb(60, 20, 50) == (153, 153, 102)
assert hsl_to_rgb(60, 60, 50) == (204, 204, 51)
assert hsl_to_rgb(60, 100, 50) == (255, 255, 0)
def test_rgb_to_hsl_part_8():
"""Test rgb to hsl color function"""
assert rgb_to_hsl(153, 153, 102) == (60, 20, 50)
assert rgb_to_hsl(204, 204, 51) == (60, 60, 50)
assert rgb_to_hsl(255, 255, 0) == (60, 100, 50)
def test_hsl_to_rgb_part_9():
"""Test hsl to rgb color function"""
assert hsl_to_rgb(120, 20, 50) == (102, 153, 102)
assert hsl_to_rgb(120, 60, 50) == (51, 204, 51)
assert hsl_to_rgb(120, 100, 50) == (0, 255, 0)
def test_rgb_to_hsl_part_9():
"""Test rgb to hsl color function"""
assert rgb_to_hsl(102, 153, 102) == (120, 20, 50)
assert rgb_to_hsl(51, 204, 51) == (120, 60, 50)
assert rgb_to_hsl(0, 255, 0) == (120, 100, 50)
def test_hsl_to_rgb_part_10():
"""Test hsl to rgb color function"""
assert hsl_to_rgb(180, 20, 50) == (102, 153, 153)
assert hsl_to_rgb(180, 60, 50) == (51, 204, 204)
assert hsl_to_rgb(180, 100, 50) == (0, 255, 255)
def test_rgb_to_hsl_part_10():
"""Test rgb to hsl color function"""
assert rgb_to_hsl(102, 153, 153) == (180, 20, 50)
assert rgb_to_hsl(51, 204, 204) == (180, 60, 50)
assert rgb_to_hsl(0, 255, 255) == (180, 100, 50)
def test_hsl_to_rgb_part_11():
"""Test hsl to rgb color function"""
assert hsl_to_rgb(240, 20, 50) == (102, 102, 153)
assert hsl_to_rgb(240, 60, 50) == (51, 51, 204)
assert hsl_to_rgb(240, 100, 50) == (0, 0, 255)
def test_rgb_to_hsl_part_11():
"""Test rgb to hsl color function"""
assert rgb_to_hsl(102, 102, 153) == (240, 20, 50)
assert rgb_to_hsl(51, 51, 204) == (240, 60, 50)
assert rgb_to_hsl(0, 0, 255) == (240, 100, 50)
def test_hsl_to_rgb_part_12():
"""Test hsl to rgb color function"""
assert hsl_to_rgb(300, 20, 50) == (153, 102, 153)
assert hsl_to_rgb(300, 60, 50) == (204, 51, 204)
assert hsl_to_rgb(300, 100, 50) == (255, 0, 255)
def test_rgb_to_hsl_part_12():
"""Test rgb to hsl color function"""
assert rgb_to_hsl(153, 102, 153) == (300, 20, 50)
assert rgb_to_hsl(204, 51, 204) == (300, 60, 50)
assert rgb_to_hsl(255, 0, 255) == (300, 100, 50)
def test_hsl_to_rgb_part_13():
"""Test hsl to rgb color function"""
assert hsl_to_rgb(0, 100, 0) == (0, 0, 0)
assert hsl_to_rgb(0, 100, 10) == (51, 0, 0)
assert hsl_to_rgb(0, 100, 20) == (102, 0, 0)
assert hsl_to_rgb(0, 100, 30) == (153, 0, 0)
assert hsl_to_rgb(0, 100, 40) == (204, 0, 0)
assert hsl_to_rgb(0, 100, 50) == (255, 0, 0)
assert hsl_to_rgb(0, 100, 60) == (255, 51, 51)
assert hsl_to_rgb(0, 100, 70) == (255, 102, 102)
assert hsl_to_rgb(0, 100, 80) == (255, 153, 153)
assert hsl_to_rgb(0, 100, 90) == (255, 204, 204)
assert hsl_to_rgb(0, 100, 100) == (255, 255, 255)
def test_rgb_to_hsl_part_13():
"""Test rgb to hsl color function"""
assert rgb_to_hsl(0, 0, 0) == (0, 0, 0)
assert rgb_to_hsl(51, 0, 0) == (0, 100, 10)
assert rgb_to_hsl(102, 0, 0) == (0, 100, 20)
assert rgb_to_hsl(153, 0, 0) == (0, 100, 30)
assert rgb_to_hsl(204, 0, 0) == (0, 100, 40)
assert rgb_to_hsl(255, 0, 0) == (0, 100, 50)
assert rgb_to_hsl(255, 51, 51) == (0, 100, 60)
assert rgb_to_hsl(255, 102, 102) == (0, 100, 70)
assert rgb_to_hsl(255, 153, 153) == (0, 100, 80)
assert rgb_to_hsl(255, 204, 204) == (0, 100, 90)
assert rgb_to_hsl(255, 255, 255) == (0, 0, 100)
def test_hsl_to_rgb_part_14():
"""Test hsl to rgb color function"""
assert hsl_to_rgb(60, 100, 0) == (0, 0, 0)
assert hsl_to_rgb(60, 100, 10) == (51, 51, 0)
assert hsl_to_rgb(60, 100, 20) == (102, 102, 0)
assert hsl_to_rgb(60, 100, 30) == (153, 153, 0)
assert hsl_to_rgb(60, 100, 40) == (204, 204, 0)
assert hsl_to_rgb(60, 100, 50) == (255, 255, 0)
assert hsl_to_rgb(60, 100, 60) == (255, 255, 51)
assert hsl_to_rgb(60, 100, 70) == (255, 255, 102)
assert hsl_to_rgb(60, 100, 80) == (255, 255, 153)
assert hsl_to_rgb(60, 100, 90) == (255, 255, 204)
assert hsl_to_rgb(60, 100, 100) == (255, 255, 255)
def test_rgb_to_hsl_part_14():
"""Test rgb to hsl color function"""
# assert rgb_to_hsl(0, 0, 0) == (60, 100, 0)
assert rgb_to_hsl(51, 51, 0) == (60, 100, 10)
assert rgb_to_hsl(102, 102, 0) == (60, 100, 20)
assert rgb_to_hsl(153, 153, 0) == (60, 100, 30)
assert rgb_to_hsl(204, 204, 0) == (60, 100, 40)
assert rgb_to_hsl(255, 255, 0) == (60, 100, 50)
assert rgb_to_hsl(255, 255, 51) == (60, 100, 60)
assert rgb_to_hsl(255, 255, 102) == (60, 100, 70)
assert rgb_to_hsl(255, 255, 153) == (60, 100, 80)
assert rgb_to_hsl(255, 255, 204) == (60, 100, 90)
# assert rgb_to_hsl(255, 255, 255) == (60, 100, 100)
def test_hsl_to_rgb_part_15():
"""Test hsl to rgb color function"""
assert hsl_to_rgb(120, 100, 0) == (0, 0, 0)
assert hsl_to_rgb(120, 100, 10) == (0, 51, 0)
assert hsl_to_rgb(120, 100, 20) == (0, 102, 0)
assert hsl_to_rgb(120, 100, 30) == (0, 153, 0)
assert hsl_to_rgb(120, 100, 40) == (0, 204, 0)
assert hsl_to_rgb(120, 100, 50) == (0, 255, 0)
assert hsl_to_rgb(120, 100, 60) == (51, 255, 51)
assert hsl_to_rgb(120, 100, 70) == (102, 255, 102)
assert hsl_to_rgb(120, 100, 80) == (153, 255, 153)
assert hsl_to_rgb(120, 100, 90) == (204, 255, 204)
assert hsl_to_rgb(120, 100, 100) == (255, 255, 255)
def test_rgb_to_hsl_part_15():
"""Test rgb to hsl color function"""
# assert rgb_to_hsl(0, 0, 0) == (120, 100, 0)
assert rgb_to_hsl(0, 51, 0) == (120, 100, 10)
assert rgb_to_hsl(0, 102, 0) == (120, 100, 20)
assert rgb_to_hsl(0, 153, 0) == (120, 100, 30)
assert rgb_to_hsl(0, 204, 0) == (120, 100, 40)
assert rgb_to_hsl(0, 255, 0) == (120, 100, 50)
assert rgb_to_hsl(51, 255, 51) == (120, 100, 60)
assert rgb_to_hsl(102, 255, 102) == (120, 100, 70)
assert rgb_to_hsl(153, 255, 153) == (120, 100, 80)
assert rgb_to_hsl(204, 255, 204) == (120, 100, 90)
# assert rgb_to_hsl(255, 255, 255) == (120, 100, 100)
def test_hsl_to_rgb_part_16():
"""Test hsl to rgb color function"""
assert hsl_to_rgb(180, 100, 0) == (0, 0, 0)
assert hsl_to_rgb(180, 100, 10) == (0, 51, 51)
assert hsl_to_rgb(180, 100, 20) == (0, 102, 102)
assert hsl_to_rgb(180, 100, 30) == (0, 153, 153)
assert hsl_to_rgb(180, 100, 40) == (0, 204, 204)
assert hsl_to_rgb(180, 100, 50) == (0, 255, 255)
assert hsl_to_rgb(180, 100, 60) == (51, 255, 255)
assert hsl_to_rgb(180, 100, 70) == (102, 255, 255)
assert hsl_to_rgb(180, 100, 80) == (153, 255, 255)
assert hsl_to_rgb(180, 100, 90) == (204, 255, 255)
assert hsl_to_rgb(180, 100, 100) == (255, 255, 255)
def test_rgb_to_hsl_part_16():
"""Test rgb to hsl color function"""
# assert rgb_to_hsl(0, 0, 0) == (180, 100, 0)
assert rgb_to_hsl(0, 51, 51) == (180, 100, 10)
assert rgb_to_hsl(0, 102, 102) == (180, 100, 20)
assert rgb_to_hsl(0, 153, 153) == (180, 100, 30)
assert rgb_to_hsl(0, 204, 204) == (180, 100, 40)
assert rgb_to_hsl(0, 255, 255) == (180, 100, 50)
assert rgb_to_hsl(51, 255, 255) == (180, 100, 60)
assert rgb_to_hsl(102, 255, 255) == (180, 100, 70)
assert rgb_to_hsl(153, 255, 255) == (180, 100, 80)
assert rgb_to_hsl(204, 255, 255) == (180, 100, 90)
# assert rgb_to_hsl(255, 255, 255) == (180, 100, 100)
def test_hsl_to_rgb_part_17():
"""Test hsl to rgb color function"""
assert hsl_to_rgb(240, 100, 0) == (0, 0, 0)
assert hsl_to_rgb(240, 100, 10) == (0, 0, 51)
assert hsl_to_rgb(240, 100, 20) == (0, 0, 102)
assert hsl_to_rgb(240, 100, 30) == (0, 0, 153)
assert hsl_to_rgb(240, 100, 40) == (0, 0, 204)
assert hsl_to_rgb(240, 100, 50) == (0, 0, 255)
assert hsl_to_rgb(240, 100, 60) == (51, 51, 255)
assert hsl_to_rgb(240, 100, 70) == (102, 102, 255)
assert hsl_to_rgb(240, 100, 80) == (153, 153, 255)
assert hsl_to_rgb(240, 100, 90) == (204, 204, 255)
assert hsl_to_rgb(240, 100, 100) == (255, 255, 255)
def test_rgb_to_hsl_part_17():
"""Test rgb to hsl color function"""
# assert rgb_to_hsl(0, 0, 0) == (240, 100, 0)
assert rgb_to_hsl(0, 0, 51) == (240, 100, 10)
assert rgb_to_hsl(0, 0, 102) == (240, 100, 20)
assert rgb_to_hsl(0, 0, 153) == (240, 100, 30)
assert rgb_to_hsl(0, 0, 204) == (240, 100, 40)
assert rgb_to_hsl(0, 0, 255) == (240, 100, 50)
assert rgb_to_hsl(51, 51, 255) == (240, 100, 60)
assert rgb_to_hsl(102, 102, 255) == (240, 100, 70)
assert rgb_to_hsl(153, 153, 255) == (240, 100, 80)
assert rgb_to_hsl(204, 204, 255) == (240, 100, 90)
# assert rgb_to_hsl(255, 255, 255) == (240, 100, 100)
def test_hsl_to_rgb_part_18():
"""Test hsl to rgb color function"""
assert hsl_to_rgb(300, 100, 0) == (0, 0, 0)
assert hsl_to_rgb(300, 100, 10) == (51, 0, 51)
assert hsl_to_rgb(300, 100, 20) == (102, 0, 102)
assert hsl_to_rgb(300, 100, 30) == (153, 0, 153)
assert hsl_to_rgb(300, 100, 40) == (204, 0, 204)
assert hsl_to_rgb(300, 100, 50) == (255, 0, 255)
assert hsl_to_rgb(300, 100, 60) == (255, 51, 255)
assert hsl_to_rgb(300, 100, 70) == (255, 102, 255)
assert hsl_to_rgb(300, 100, 80) == (255, 153, 255)
assert hsl_to_rgb(300, 100, 90) == (255, 204, 255)
assert hsl_to_rgb(300, 100, 100) == (255, 255, 255)
def test_rgb_to_hsl_part_18():
"""Test rgb to hsl color function"""
# assert rgb_to_hsl(0, 0, 0) == (300, 100, 0)
assert rgb_to_hsl(51, 0, 51) == (300, 100, 10)
assert rgb_to_hsl(102, 0, 102) == (300, 100, 20)
assert rgb_to_hsl(153, 0, 153) == (300, 100, 30)
assert rgb_to_hsl(204, 0, 204) == (300, 100, 40)
assert rgb_to_hsl(255, 0, 255) == (300, 100, 50)
assert rgb_to_hsl(255, 51, 255) == (300, 100, 60)
assert rgb_to_hsl(255, 102, 255) == (300, 100, 70)
assert rgb_to_hsl(255, 153, 255) == (300, 100, 80)
assert rgb_to_hsl(255, 204, 255) == (300, 100, 90)
# assert rgb_to_hsl(255, 255, 255) == (300, 100, 100)
| lgpl-3.0 |
EnviroCentre/jython-upgrade | jython/lib/site-packages/pkg_resources/_vendor/packaging/_structures.py | 906 | 1809 | # Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
class Infinity(object):
def __repr__(self):
return "Infinity"
def __hash__(self):
return hash(repr(self))
def __lt__(self, other):
return False
def __le__(self, other):
return False
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not isinstance(other, self.__class__)
def __gt__(self, other):
return True
def __ge__(self, other):
return True
def __neg__(self):
return NegativeInfinity
Infinity = Infinity()
class NegativeInfinity(object):
def __repr__(self):
return "-Infinity"
def __hash__(self):
return hash(repr(self))
def __lt__(self, other):
return True
def __le__(self, other):
return True
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not isinstance(other, self.__class__)
def __gt__(self, other):
return False
def __ge__(self, other):
return False
def __neg__(self):
return Infinity
NegativeInfinity = NegativeInfinity()
| mit |
solashirai/edx-platform | lms/djangoapps/course_blocks/transformers/user_partitions.py | 13 | 10294 | """
User Partitions Transformer
"""
from openedx.core.lib.block_structure.transformer import BlockStructureTransformer
from .split_test import SplitTestTransformer
from .utils import get_field_on_block
class UserPartitionTransformer(BlockStructureTransformer):
"""
A transformer that enforces the group access rules on course blocks,
by honoring their user_partitions and group_access fields, and
removing all blocks in the block structure to which the user does
not have group access.
Staff users are *not* exempted from user partition pathways.
"""
VERSION = 1
@classmethod
def name(cls):
"""
Unique identifier for the transformer's class;
same identifier used in setup.py.
"""
return "user_partitions"
@classmethod
def collect(cls, block_structure):
"""
Computes any information for each XBlock that's necessary to
execute this transformer's transform method.
Arguments:
block_structure (BlockStructureCollectedData)
"""
# First have the split test transformer setup its group access
# data for each block.
SplitTestTransformer.collect(block_structure)
# Because user partitions are course-wide, only store data for
# them on the root block.
root_block = block_structure.get_xblock(block_structure.root_block_usage_key)
user_partitions = getattr(root_block, 'user_partitions', []) or []
block_structure.set_transformer_data(cls, 'user_partitions', user_partitions)
# If there are no user partitions, this transformation is a
# no-op, so there is nothing to collect.
if not user_partitions:
return
# For each block, compute merged group access. Because this is a
# topological sort, we know a block's parents are guaranteed to
# already have merged group access computed before the block
# itself.
for block_key in block_structure.topological_traversal():
xblock = block_structure.get_xblock(block_key)
parent_keys = block_structure.get_parents(block_key)
merged_parent_access_list = [
block_structure.get_transformer_block_field(parent_key, cls, 'merged_group_access')
for parent_key in parent_keys
]
merged_group_access = _MergedGroupAccess(user_partitions, xblock, merged_parent_access_list)
block_structure.set_transformer_block_field(block_key, cls, 'merged_group_access', merged_group_access)
def transform(self, usage_info, block_structure):
"""
Mutates block_structure based on the given usage_info.
"""
SplitTestTransformer().transform(usage_info, block_structure)
user_partitions = block_structure.get_transformer_data(self, 'user_partitions')
if not user_partitions:
return
user_groups = _get_user_partition_groups(
usage_info.course_key, user_partitions, usage_info.user
)
block_structure.remove_block_if(
lambda block_key: not block_structure.get_transformer_block_field(
block_key, self, 'merged_group_access'
).check_group_access(user_groups)
)
class _MergedGroupAccess(object):
"""
A class object to represent the computed access value for a block,
merged from the inherited values from its ancestors.
Note: The implementation assumes that the block structure is
topologically traversed so that all parents' merged accesses are
computed before a block's.
How group access restrictions are represented within an XBlock:
- group_access not defined
=> No group access restrictions.
- For each partition:
- partition.id not in group_access
=> All groups have access for this partition
- group_access[partition_id] is None
=> All groups have access for this partition
- group_access[partition_id] == []
=> All groups have access for this partition
- group_access[partition_id] == [group1..groupN]
=> groups 1..N have access for this partition
We internally represent the restrictions in a simplified way:
- self._access == {}
=> No group access restrictions.
- For each partition:
- partition.id not in _access
=> All groups have access for this partition
- _access[partition_id] == set()
=> No groups have access for this partition
- _access[partition_id] == set(group1..groupN)
=> groups 1..N have access for this partition
Note that a user must have access to all partitions in group_access
or _access in order to access a block.
"""
def __init__(self, user_partitions, xblock, merged_parent_access_list):
"""
Arguments:
user_partitions (list[UserPartition])
xblock (XBlock)
merged_parent_access_list (list[_MergedGroupAccess])
"""
# { partition.id: set(IDs of groups that can access partition) }
# If partition id is absent in this dict, no group access
# restrictions exist for that partition.
self._access = {}
# Get the group_access value that is directly set on the xblock.
# Do not get the inherited value since field inheritance doesn't
# take a union of them for DAGs.
xblock_group_access = get_field_on_block(xblock, 'group_access', default_value={})
for partition in user_partitions:
# Running list of all groups that have access to this
# block, computed as a "union" from all parent chains.
#
# Set the default to universal access, for the case when
# there are no parents.
merged_parent_group_ids = None
if merged_parent_access_list:
# Set the default to most restrictive as we iterate
# through all the parent chains.
merged_parent_group_ids = set()
# Loop through parent_access from each parent-chain
for merged_parent_access in merged_parent_access_list:
# pylint: disable=protected-access
if partition.id in merged_parent_access._access:
# Since this parent has group access
# restrictions, merge it with the running list
# of parent-introduced restrictions.
merged_parent_group_ids.update(merged_parent_access._access[partition.id])
else:
# Since this parent chain has no group access
# restrictions for this partition, allow
# unfettered group access for this partition
# and don't bother checking the rest of the
# parents.
merged_parent_group_ids = None
break
# Group access for this partition as stored on the xblock
xblock_partition_access = set(xblock_group_access.get(partition.id) or []) or None
# Compute this block's access by intersecting the block's
# own access with the merged access from its parent chains.
merged_group_ids = _MergedGroupAccess._intersection(xblock_partition_access, merged_parent_group_ids)
# Add this partition's access only if group restrictions
# exist.
if merged_group_ids is not None:
self._access[partition.id] = merged_group_ids
@staticmethod
def _intersection(*sets):
"""
Compute an intersection of sets, interpreting None as the
Universe set.
This makes __init__ a bit more elegant.
Arguments:
sets (list[set or None]), where None represents the Universe
set.
Returns:
set or None, where None represents the Universe set.
"""
non_universe_sets = [set_ for set_ in sets if set_ is not None]
if non_universe_sets:
first, rest = non_universe_sets[0], non_universe_sets[1:]
return first.intersection(*rest)
else:
return None
def check_group_access(self, user_groups):
"""
Arguments:
dict[int: Group]: Given a user, a mapping from user
partition IDs to the group to which the user belongs in
each partition.
Returns:
bool: Whether said user has group access.
"""
for partition_id, allowed_group_ids in self._access.iteritems():
# If the user is not assigned to a group for this partition,
# deny access.
if partition_id not in user_groups:
return False
# If the user belongs to one of the allowed groups for this
# partition, then move and check the next partition.
elif user_groups[partition_id].id in allowed_group_ids:
continue
# Else, deny access.
else:
return False
# The user has access for every partition, grant access.
return True
def _get_user_partition_groups(course_key, user_partitions, user):
"""
Collect group ID for each partition in this course for this user.
Arguments:
course_key (CourseKey)
user_partitions (list[UserPartition])
user (User)
Returns:
dict[int: Group]: Mapping from user partitions to the group to
which the user belongs in each partition. If the user isn't
in a group for a particular partition, then that partition's
ID will not be in the dict.
"""
partition_groups = {}
for partition in user_partitions:
group = partition.scheme.get_group_for_user(
course_key,
user,
partition,
)
if group is not None:
partition_groups[partition.id] = group
return partition_groups
| agpl-3.0 |
DepthDeluxe/ansible | test/integration/targets/module_utils/library/test.py | 91 | 2819 | #!/usr/bin/python
results = {}
# Test import with no from
import ansible.module_utils.foo0
results['foo0'] = ansible.module_utils.foo0.data
# Test depthful import with no from
import ansible.module_utils.bar0.foo
results['bar0'] = ansible.module_utils.bar0.foo.data
# Test import of module_utils/foo1.py
from ansible.module_utils import foo1
results['foo1'] = foo1.data
# Test import of an identifier inside of module_utils/foo2.py
from ansible.module_utils.foo2 import data
results['foo2'] = data
# Test import of module_utils/bar1/__init__.py
from ansible.module_utils import bar1
results['bar1'] = bar1.data
# Test import of an identifier inside of module_utils/bar2/__init__.py
from ansible.module_utils.bar2 import data
results['bar2'] = data
# Test import of module_utils/baz1/one.py
from ansible.module_utils.baz1 import one
results['baz1'] = one.data
# Test import of an identifier inside of module_utils/baz2/one.py
from ansible.module_utils.baz2.one import data
results['baz2'] = data
# Test import of module_utils/spam1/ham/eggs/__init__.py
from ansible.module_utils.spam1.ham import eggs
results['spam1'] = eggs.data
# Test import of an identifier inside module_utils/spam2/ham/eggs/__init__.py
from ansible.module_utils.spam2.ham.eggs import data
results['spam2'] = data
# Test import of module_utils/spam3/ham/bacon.py
from ansible.module_utils.spam3.ham import bacon
results['spam3'] = bacon.data
# Test import of an identifier inside of module_utils/spam4/ham/bacon.py
from ansible.module_utils.spam4.ham.bacon import data
results['spam4'] = data
# Test import of module_utils.spam5.ham bacon and eggs (modules)
from ansible.module_utils.spam5.ham import bacon, eggs
results['spam5'] = (bacon.data, eggs.data)
# Test import of module_utils.spam6.ham bacon and eggs (identifiers)
from ansible.module_utils.spam6.ham import bacon, eggs
results['spam6'] = (bacon, eggs)
# Test import of module_utils.spam7.ham bacon and eggs (module and identifier)
from ansible.module_utils.spam7.ham import bacon, eggs
results['spam7'] = (bacon.data, eggs)
# Test import of module_utils/spam8/ham/bacon.py and module_utils/spam8/ham/eggs.py separately
from ansible.module_utils.spam8.ham import bacon
from ansible.module_utils.spam8.ham import eggs
results['spam8'] = (bacon.data, eggs)
# Test that import of module_utils/qux1/quux.py using as works
from ansible.module_utils.qux1 import quux as one
results['qux1'] = one.data
# Test that importing qux2/quux.py and qux2/quuz.py using as works
from ansible.module_utils.qux2 import quux as one, quuz as two
results['qux2'] = (one.data, two.data)
# Test depth
from ansible.module_utils.a.b.c.d.e.f.g.h import data
results['abcdefgh'] = data
from ansible.module_utils.basic import AnsibleModule
AnsibleModule(argument_spec=dict()).exit_json(**results)
| gpl-3.0 |
duguhaotian/superscp | src/superscp_tool.py | 1 | 2572 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import os
import sys
import subprocess
from pathmanager import node
from pathmanager import link
from pathmanager import paths
from pathmanager import tool
def superscp(argv):
if len(argv) != 5:
usage()
return
src = argv[2]
tip = argv[3]
tdir = argv[4]
srcnid = None
ips = tool.get_ips()
if len(ips) > 1:
print("---------------------------------------")
keys = ips.keys()
i = 0
for key in keys:
print("%d. %s" % (i, ips[key]))
i += 1
print("---------------------------------------")
select = input("which ip use to scp, select the index: ")
print("you select ip is : %s" % ips[keys[select]] )
srcnid = keys[select]
elif len(ips) < 1:
print("no ether for scp")
return
else:
srcnid = ips.keys()[0]
srcnid = tool.get_mac(srcnid)
srcnode = node.get_node(srcnid)
if srcnode == None:
print("current host is not register")
return
print(srcnode.show())
tnodes = node.find_by_ip(tip)
tnode = None
if len(tnodes) > 1:
i = 0
print("***********************************")
for tmp in tnodes:
print("%d. %s" % (i, tmp.show()))
i += 1
print("***********************************")
select = input("which target ip use to scp, select the index: ")
tnode = tnodes[select]
elif len(tnodes) < 1:
print("can not find target node by target ip : %s" % tip)
return
else:
tnode = tnodes[0]
print(tnode.show())
idxs = paths.search_by_target(srcnode, tnode)
path = None
if len(idxs) > 1:
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
i = 0
for idx in idxs:
print("%d. %s" % (i, paths.get(idx).show()))
i += 1
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
select = input("select one path to scp, which index you chose: ")
path = paths.get(idxs[i])
elif len(idxs) < 1:
print("cannot find sourceip: %s to targetip: %s path" % (srcnode.nip, tnode.nip))
return
else:
path = paths.get(idxs[0])
rdir=os.path.split(os.path.realpath(__file__))[0]
scpfname = rdir + "/scptool/.data.superscp"
paths.generate_scp_data(path, scpfname)
cmdstr = rdir+"/scptool/magic.sh " + src + " " + tdir
rts = subprocess.check_output(cmdstr, shell=True).decode().strip()
print("magic return: %s", rts)
| apache-2.0 |
Workday/OpenFrame | tools/telemetry/third_party/gsutilz/gslib/addlhelp/anon.py | 28 | 1856 | # -*- coding: utf-8 -*-
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Additional help text for anonymous access."""
from __future__ import absolute_import
from gslib.help_provider import HelpProvider
_DETAILED_HELP_TEXT = ("""
<B>OVERVIEW</B>
gsutil users can access publicly readable data without obtaining
credentials. For example, the gs://uspto-pair bucket contains a number
of publicly readable objects, so any user can run the following command
without first obtaining credentials:
gsutil ls gs://uspto-pair/applications/0800401*
Users can similarly download objects they find via the above gsutil ls
command.
If a user without credentials attempts to access protected data using gsutil,
they will be prompted to run "gsutil config" to obtain credentials.
See "gsutil help acls" for more details about data protection.
""")
class CommandOptions(HelpProvider):
"""Additional help text for anonymous access."""
# Help specification. See help_provider.py for documentation.
help_spec = HelpProvider.HelpSpec(
help_name='anon',
help_name_aliases=['anonymous', 'public'],
help_type='additional_help',
help_one_line_summary='Accessing Public Data Without Credentials',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
| bsd-3-clause |
appsembler/edx-platform | openedx/core/djangoapps/appsembler/sites/api.py | 1 | 10738 | import logging
import requests
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from rest_framework import generics, views, viewsets
from rest_framework import status
from rest_framework.generics import CreateAPIView
from rest_framework.parsers import JSONParser, MultiPartParser
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from organizations.models import Organization, UserOrganizationMapping
from branding.api import get_base_url
from openedx.core.djangoapps.site_configuration.models import SiteConfiguration
from rest_framework.views import APIView
from openedx.core.lib.api.permissions import ApiKeyHeaderPermission
from openedx.core.lib.api.authentication import (
OAuth2AuthenticationAllowInactiveUser,
)
from openedx.core.djangoapps.appsembler.sites.models import AlternativeDomain
from openedx.core.djangoapps.appsembler.sites.permissions import AMCAdminPermission
from openedx.core.djangoapps.appsembler.sites.serializers import (
SiteConfigurationSerializer,
SiteConfigurationListSerializer,
SiteSerializer,
RegistrationSerializer,
AlternativeDomainSerializer,
)
from openedx.core.djangoapps.appsembler.sites.utils import (
delete_site,
get_customer_files_storage,
to_safe_file_name,
)
log = logging.Logger(__name__)
class SiteViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Site.objects.all()
serializer_class = SiteSerializer
authentication_classes = (OAuth2AuthenticationAllowInactiveUser,)
permission_classes = (IsAuthenticated, AMCAdminPermission)
def get_queryset(self):
queryset = Site.objects.exclude(id=settings.SITE_ID)
user = self.request.user
if not user.is_superuser:
queryset = queryset.filter(organizations=user.organizations.all())
return queryset
class SiteConfigurationViewSet(viewsets.ModelViewSet):
queryset = SiteConfiguration.objects.all()
serializer_class = SiteConfigurationSerializer
list_serializer_class = SiteConfigurationListSerializer
create_serializer_class = SiteSerializer
authentication_classes = (OAuth2AuthenticationAllowInactiveUser,)
permission_classes = (IsAuthenticated, AMCAdminPermission)
def get_serializer_class(self):
if self.action == 'list':
return self.list_serializer_class
if self.action == 'create':
return self.create_serializer_class
return super(SiteConfigurationViewSet, self).get_serializer_class()
def perform_destroy(self, instance):
delete_site(instance.site)
class FileUploadView(views.APIView):
parser_classes = (MultiPartParser,)
# TODO: oauth token isn't present after step 3 in signup, fix later
#permission_classes = (AMCAdminPermission,)
def post(self, request, format=None):
file_obj = request.data['file']
file_path = self.handle_uploaded_file(file_obj, request.GET.get('filename'))
return Response({'file_path': file_path}, status=201)
def handle_uploaded_file(self, content, filename):
storage = get_customer_files_storage()
name = storage.save(filename, content)
return storage.url(name)
class HostFilesView(views.APIView):
"""
Host remote static files internally.
This view hosts files on a Django Storage Backend (e.g. S3 or FileSystem).
This view is stupid and doesn't try to fix errors, thus it will fail
if any of the files it will give up and throw an error.
Usage:
POST /appsembler/api/host_files
{
"urls": [
"https://openclipart.org/download/292749/abstract-icon1.png",
"https://openclipart.org/download/292749/abstract-icon2.png",
"https://openclipart.org/download/292749/abstract-icon3.png",
]
}
Response on Success:
Code = 200
{
"success": true,
"urls": [{
"source": "https://openclipart.org/download/292749/abstract-icon1.png",
"dest": "https://tahoe.appsembler.com/customer_files/c334d1943576/abstract.png"
}, {
"source": "https://openclipart.org/download/292749/abstract-icon2.png",
"dest": "https://tahoe.appsembler.com/customer_files/a12bc334fd/abstract.png"
}, {
"source": "https://openclipart.org/download/292749/abstract-icon3.png",
"dest": "https://tahoe.appsembler.com/customer_files/c334d1334df/abstract.png"
}]
}
Response on Error:
Code = 400 or 500
{
"success": false,
"value": "Error processing the provided file",
"url": "https://openclipart.org/download/292749/abstract-icon3.png"
}
"""
parser_classes = (JSONParser,)
def _logged_response(self, json, status):
logging.info('Error in processing a file for "HostFilesView", "%s". http_status=%s', json, status)
return Response(json, status=status)
def post(self, request):
storage = get_customer_files_storage()
urls = request.data.get('urls')
if not (isinstance(urls, list) and urls):
return self._logged_response({
'success': False,
'value': 'No files were provided.',
}, status=status.HTTP_400_BAD_REQUEST)
maximum_files = settings.APPSEMBLER_FEATURES.get('FILE_HOST_MAXIMUM_FILES', 10)
timeout_secs = settings.APPSEMBLER_FEATURES.get('FILE_HOST_TIMEOUT', 1)
max_download_size_bytes = settings.APPSEMBLER_FEATURES.get('FILE_HOST_MAX_DOWNLOAD_SIZE', 512 * 1024)
if len(urls) > maximum_files:
return self._logged_response({
'success': False,
'value': 'Too many files were provided.',
'maximum_files': maximum_files
}, status=status.HTTP_400_BAD_REQUEST)
stored_urls = []
for source_url in urls:
try:
response = requests.get(source_url, timeout=timeout_secs)
except requests.exceptions.Timeout:
return self._logged_response({
'success': False,
'value': 'Request to the needed URL timed out.',
'url': source_url,
'timeout_seconds': timeout_secs,
}, status=status.HTTP_400_BAD_REQUEST)
except requests.exceptions.RequestException:
return self._logged_response({
'success': False,
'value': 'Error processing the provided URL.',
'url': source_url,
}, status=status.HTTP_400_BAD_REQUEST)
if len(response.content) > max_download_size_bytes:
# TODO: Use a more streamed limit, but probably the timeout would protect against 1TB downloads
# as most servers can't really download anything over than 12MBytes in a single second
# But if you're willing see: https://stackoverflow.com/a/23514616/161278
return self._logged_response({
'success': False,
'value': 'The file is too large to download.',
'url': source_url,
'max_size_bytes': max_download_size_bytes,
}, status=status.HTTP_400_BAD_REQUEST)
cleaned_up = to_safe_file_name(source_url)
new_file_name = storage.get_available_name(cleaned_up, max_length=100)
with storage.open(new_file_name, 'wb') as f:
f.write(response.content)
dest_url = get_base_url(request.is_secure()) + storage.url(new_file_name)
stored_urls.append({
'source': source_url,
'dest': dest_url,
})
return Response({
'success': True,
'urls': stored_urls,
}, status=status.HTTP_200_OK)
class SiteCreateView(generics.CreateAPIView):
serializer_class = RegistrationSerializer
permission_classes = (ApiKeyHeaderPermission,)
class UsernameAvailabilityView(APIView):
def get(self, request, username, format=None):
try:
User.objects.get(username=username)
return Response(None, status=status.HTTP_200_OK)
except User.DoesNotExist:
return Response(None, status=status.HTTP_404_NOT_FOUND)
class FindUsernameByEmailView(APIView):
"""
View to find username by email to be used in AMC signup workflow.
"""
permission_classes = [ApiKeyHeaderPermission]
def get(self, request):
user_email = request.GET.get('email')
organization_name = request.GET.get('organization_name')
if user_email and organization_name:
try:
organization = Organization.objects.get(name=organization_name)
mapping = UserOrganizationMapping.objects.get(user__email=user_email, organization=organization)
return Response({'username': mapping.user.username}, status=status.HTTP_200_OK)
except (Organization.DoesNotExist, UserOrganizationMapping.DoesNotExist):
pass
return Response({}, status=status.HTTP_404_NOT_FOUND)
class DomainAvailabilityView(APIView):
def get(self, request, subdomain, format=None):
try:
Site.objects.get(name=subdomain)
return Response(None, status=status.HTTP_200_OK)
except Site.DoesNotExist:
return Response(None, status=status.HTTP_404_NOT_FOUND)
class DomainSwitchView(APIView):
def post(self, request, format=None):
site_id = request.data.get('site')
if not site_id:
return Response("Site ID needed", status=status.HTTP_400_BAD_REQUEST)
try:
site = Site.objects.get(id=site_id)
if not site.alternative_domain:
return Response("Site {} does not have a custom domain".format(site.domain),
status=status.HTTP_404_NOT_FOUND)
site.alternative_domain.switch_with_active()
return Response(status=status.HTTP_200_OK)
except Site.DoesNotExist:
return Response("The site with ID {} does not exist".format(site_id),
status=status.HTTP_404_NOT_FOUND)
class CustomDomainView(CreateAPIView):
queryset = AlternativeDomain.objects.all()
serializer_class = AlternativeDomainSerializer
| agpl-3.0 |
nebril/fuel-web | fuel_upgrade_system/fuel_upgrade/fuel_upgrade/pre_upgrade_hooks/from_5_0_1_to_any_fix_host_system_repo.py | 7 | 2392 | # -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
from fuel_upgrade.engines.host_system import HostSystemUpgrader
from fuel_upgrade.pre_upgrade_hooks.base import PreUpgradeHookBase
from fuel_upgrade import utils
logger = logging.getLogger(__name__)
class FixHostSystemRepoHook(PreUpgradeHookBase):
"""During 5.0.1 upgrade we add repository
where as repository path we set path to
repository which is from upgrade tar ball.
When user deletes this information he deletes
the repo. As result we can get broken repo
which fails during the next upgrade [1].
[1] https://bugs.launchpad.net/fuel/+bug/1358686
"""
#: this hook is required only for host-system engine
enable_for_engines = [HostSystemUpgrader]
#: path to 5.0.1 repository which is created by upgrade script
repo_path = '/var/www/nailgun/5.0.1/centos/x86_64'
#: path to the file for yum repo
yum_repo_file = '/etc/yum.repos.d/5.0.1_nailgun.repo'
#: path to repo template
repo_template = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', 'templates', 'nailgun.repo'))
def __init__(self, *args, **kwargs):
super(FixHostSystemRepoHook, self).__init__(*args, **kwargs)
def check_if_required(self):
"""The hack is required if we're going to upgrade from 5.0.1
and only repo path for 5.0.1 is exists
"""
return (self.config.from_version == '5.0.1' and
utils.file_exists(self.repo_path) and
utils.file_exists(self.yum_repo_file))
def run(self):
"""Change repo path
"""
utils.render_template_to_file(
self.repo_template,
self.yum_repo_file,
{'repo_path': self.repo_path, 'version': '5.0.1'})
| apache-2.0 |
benspaulding/django | tests/regressiontests/urlpatterns_reverse/namespace_urls.py | 42 | 2458 | from __future__ import absolute_import
from django.conf.urls import patterns, url, include
from .views import view_class_instance
class URLObject(object):
def __init__(self, app_name, namespace):
self.app_name = app_name
self.namespace = namespace
def urls(self):
return patterns('',
url(r'^inner/$', 'empty_view', name='urlobject-view'),
url(r'^inner/(?P<arg1>\d+)/(?P<arg2>\d+)/$', 'empty_view', name='urlobject-view'),
url(r'^inner/\+\\\$\*/$', 'empty_view', name='urlobject-special-view'),
), self.app_name, self.namespace
urls = property(urls)
testobj1 = URLObject('testapp', 'test-ns1')
testobj2 = URLObject('testapp', 'test-ns2')
default_testobj = URLObject('testapp', 'testapp')
otherobj1 = URLObject('nodefault', 'other-ns1')
otherobj2 = URLObject('nodefault', 'other-ns2')
urlpatterns = patterns('regressiontests.urlpatterns_reverse.views',
url(r'^normal/$', 'empty_view', name='normal-view'),
url(r'^normal/(?P<arg1>\d+)/(?P<arg2>\d+)/$', 'empty_view', name='normal-view'),
url(r'^\+\\\$\*/$', 'empty_view', name='special-view'),
url(r'^mixed_args/(\d+)/(?P<arg2>\d+)/$', 'empty_view', name='mixed-args'),
url(r'^no_kwargs/(\d+)/(\d+)/$', 'empty_view', name='no-kwargs'),
url(r'^view_class/(?P<arg1>\d+)/(?P<arg2>\d+)/$', view_class_instance, name='view-class'),
(r'^unnamed/normal/(?P<arg1>\d+)/(?P<arg2>\d+)/$', 'empty_view'),
(r'^unnamed/view_class/(?P<arg1>\d+)/(?P<arg2>\d+)/$', view_class_instance),
(r'^test1/', include(testobj1.urls)),
(r'^test2/', include(testobj2.urls)),
(r'^default/', include(default_testobj.urls)),
(r'^other1/', include(otherobj1.urls)),
(r'^other[246]/', include(otherobj2.urls)),
(r'^ns-included[135]/', include('regressiontests.urlpatterns_reverse.included_namespace_urls', namespace='inc-ns1')),
(r'^ns-included2/', include('regressiontests.urlpatterns_reverse.included_namespace_urls', namespace='inc-ns2')),
(r'^included/', include('regressiontests.urlpatterns_reverse.included_namespace_urls')),
(r'^inc(?P<outer>\d+)/', include('regressiontests.urlpatterns_reverse.included_urls', namespace='inc-ns5')),
(r'^ns-outer/(?P<outer>\d+)/', include('regressiontests.urlpatterns_reverse.included_namespace_urls', namespace='inc-outer')),
(r'^\+\\\$\*/', include('regressiontests.urlpatterns_reverse.namespace_urls', namespace='special')),
)
| bsd-3-clause |
birocorneliu/conference | lib/to_delete.py | 1 | 2829 | from datetime import datetime
import endpoints
from google.appengine.ext import ndb
from google.appengine.api import taskqueue, memcache
from lib.db import Profile, Conference
from lib.models import ConflictException, ProfileForm, BooleanMessage, ConferenceForm, TeeShirtSize
MEMCACHE_ANNOUNCEMENTS_KEY = "RECENT_ANNOUNCEMENTS"
@ndb.transactional()
def _updateConferenceObject(self, request):
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
# update existing conference
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check that conference exists
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
# check that user is owner
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException(
'Only the owner can update the conference.')
# Not getting all the fields, so don't create a new object; just
# copy relevant fields from ConferenceForm to Conference object
for field in request.all_fields():
data = getattr(request, field.name)
# only copy fields where we get data
if data not in (None, []):
# special handling for dates (convert string to Date)
if field.name in ('startDate', 'endDate'):
data = datetime.strptime(data, "%Y-%m-%d").date()
if field.name == 'startDate':
conf.month = data.month
# write to Conference object
setattr(conf, field.name, data)
conf.put()
prof = ndb.Key(Profile, user_id).get()
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@staticmethod
def _cacheAnnouncement():
"""Create Announcement & assign to memcache; used by
memcache cron job & putAnnouncement().
"""
confs = Conference.query(ndb.AND(
Conference.seatsAvailable <= 5,
Conference.seatsAvailable > 0)
).fetch(projection=[Conference.name])
if confs:
# If there are almost sold out conferences,
# format announcement and set it in memcache
announcement = '%s %s' % (
'Last chance to attend! The following conferences '
'are nearly sold out:',
', '.join(conf.name for conf in confs))
memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement)
else:
# If there are no sold out conferences,
# delete the memcache announcements entry
announcement = ""
memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY)
return announcement
| apache-2.0 |
idea4bsd/idea4bsd | python/testData/inspections/GoogleDocstringParametersInspection/test.py | 40 | 1256 | """ test docstring inspection"""
def foo1(a, b):
"""
Parameters:
a: foo
b: bar
"""
pass
def foo(a, <weak_warning descr="Missing parameter b in docstring">b</weak_warning>, <weak_warning descr="Missing parameter n in docstring">n</weak_warning>):
"""
Parameters:
a: foo
"""
pass
def foo():
"""
Parameters:
<weak_warning descr="Unexpected parameter a in docstring">a</weak_warning>: foo
"""
pass
def compare(a, b, *, key=None):
"""
Parameters:
a:
b:
key:
"""
pass
def foo(a, <weak_warning descr="Missing parameter c in docstring">c</weak_warning>):
"""
Parameters:
a:
<weak_warning descr="Unexpected parameter b in docstring">b</weak_warning>:
"""
pass
def varagrs_defined_without_stars(x, *args, y, **kwargs):
"""
Args:
x:
args:
y:
kwargs:
"""
def varagrs_dont_exist():
"""
Args:
*<weak_warning descr="Unexpected parameter args in docstring">args</weak_warning>:
**<weak_warning descr="Unexpected parameter kwargs in docstring">kwargs</weak_warning>:
"""
def varagrs_undefined(x, *args, y, **kwargs):
"""
Args:
x:
y:
"""
def no_parameters_declared(x, y):
"""
"""
| apache-2.0 |
alexanderturner/ansible | lib/ansible/modules/cloud/azure/azure_rm_networkinterface.py | 9 | 25157 | #!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'committer',
'version': '1.0'}
DOCUMENTATION = '''
---
module: azure_rm_networkinterface
version_added: "2.1"
short_description: Manage Azure network interfaces.
description:
- Create, update or delete a network interface. When creating a network interface you must provide the name of an
existing virtual network, the name of an existing subnet within the virtual network. A default security group
and public IP address will be created automatically, or you can provide the name of an existing security group
and public IP address. See the examples below for more details.
options:
resource_group:
description:
- Name of a resource group where the network interface exists or will be created.
required: true
name:
description:
- Name of the network interface.
required: true
state:
description:
- Assert the state of the network interface. Use 'present' to create or update an interface and
'absent' to delete an interface.
default: present
choices:
- absent
- present
required: false
location:
description:
- Valid azure location. Defaults to location of the resource group.
default: resource_group location
required: false
virtual_network_name:
description:
- Name of an existing virtual network with which the network interface will be associated. Required
when creating a network interface.
aliases:
- virtual_network
required: false
default: null
subnet_name:
description:
- Name of an existing subnet within the specified virtual network. Required when creating a network
interface
aliases:
- subnet
required: false
default: null
os_type:
description:
- Determines any rules to be added to a default security group. When creating a network interface, if no
security group name is provided, a default security group will be created. If the os_type is 'Windows',
a rule will be added allowing RDP access. If the os_type is 'Linux', a rule allowing SSH access will be
added.
choices:
- Windows
- Linux
default: Linux
required: false
private_ip_address:
description:
- Valid IPv4 address that falls within the specified subnet.
required: false
private_ip_allocation_method:
description:
- "Specify whether or not the assigned IP address is permanent. NOTE: when creating a network interface
specifying a value of 'Static' requires that a private_ip_address value be provided. You can update
the allocation method to 'Static' after a dynamic private ip address has been assigned."
default: Dynamic
choices:
- Dynamic
- Static
required: false
public_ip:
description:
- When creating a network interface, if no public IP address name is provided a default public IP
address will be created. Set to false, if you do not want a public IP address automatically created.
default: true
required: false
public_ip_address_name:
description:
- Name of an existing public IP address object to associate with the security group.
aliases:
- public_ip_address
- public_ip_name
required: false
default: null
public_ip_allocation_method:
description:
- If a public_ip_address_name is not provided, a default public IP address will be created. The allocation
method determines whether or not the public IP address assigned to the network interface is permanent.
choices:
- Dynamic
- Static
default: Dynamic
required: false
security_group_name:
description:
- Name of an existing security group with which to associate the network interface. If not provided, a
default security group will be created.
aliases:
- security_group
required: false
default: null
open_ports:
description:
- When a default security group is created for a Linux host a rule will be added allowing inbound TCP
connections to the default SSH port 22, and for a Windows host rules will be added allowing inbound
access to RDP ports 3389 and 5986. Override the default ports by providing a list of open ports.
type: list
required: false
default: null
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
- name: Create a network interface with minimal parameters
azure_rm_networkinterface:
name: nic001
resource_group: Testing
virtual_network_name: vnet001
subnet_name: subnet001
- name: Create a network interface with private IP address only (no Public IP)
azure_rm_networkinterface:
name: nic001
resource_group: Testing
virtual_network_name: vnet001
subnet_name: subnet001
public_ip: no
- name: Create a network interface for use in a Windows host (opens RDP port) with custom RDP port
azure_rm_networkinterface:
name: nic002
resource_group: Testing
virtual_network_name: vnet001
subnet_name: subnet001
os_type: Windows
rdp_port: 3399
- name: Create a network interface using existing security group and public IP
azure_rm_networkinterface:
name: nic003
resource_group: Testing
virtual_network_name: vnet001
subnet_name: subnet001
security_group_name: secgroup001
public_ip_address_name: publicip001
- name: Delete network interface
azure_rm_networkinterface:
resource_group: Testing
name: nic003
state: absent
'''
RETURN = '''
state:
description: The current state of the network interface.
returned: always
type: dict
sample: {
"dns_settings": {
"applied_dns_servers": [],
"dns_servers": [],
"internal_dns_name_label": null,
"internal_fqdn": null
},
"enable_ip_forwarding": false,
"etag": 'W/"be115a43-2148-4545-a324-f33ad444c926"',
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkInterfaces/nic003",
"ip_configuration": {
"name": "default",
"private_ip_address": "10.1.0.10",
"private_ip_allocation_method": "Static",
"public_ip_address": {
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/publicIPAddresses/publicip001",
"name": "publicip001"
},
"subnet": {}
},
"location": "eastus2",
"mac_address": null,
"name": "nic003",
"network_security_group": {},
"primary": null,
"provisioning_state": "Succeeded",
"tags": null,
"type": "Microsoft.Network/networkInterfaces"
}
'''
from ansible.module_utils.basic import *
from ansible.module_utils.azure_rm_common import *
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.network.models import NetworkInterface, NetworkInterfaceIPConfiguration, Subnet, \
PublicIPAddress, NetworkSecurityGroup
except ImportError:
# This is handled in azure_rm_common
pass
def nic_to_dict(nic):
result = dict(
id=nic.id,
name=nic.name,
type=nic.type,
location=nic.location,
tags=nic.tags,
network_security_group=dict(),
ip_configuration=dict(
name=nic.ip_configurations[0].name,
private_ip_address=nic.ip_configurations[0].private_ip_address,
private_ip_allocation_method=nic.ip_configurations[0].private_ip_allocation_method,
subnet=dict(),
public_ip_address=dict(),
),
dns_settings=dict(
dns_servers=nic.dns_settings.dns_servers,
applied_dns_servers=nic.dns_settings.applied_dns_servers,
internal_dns_name_label=nic.dns_settings.internal_dns_name_label,
internal_fqdn=nic.dns_settings.internal_fqdn
),
mac_address=nic.mac_address,
primary=nic.primary,
enable_ip_forwarding=nic.enable_ip_forwarding,
provisioning_state=nic.provisioning_state,
etag=nic.etag,
)
if nic.network_security_group:
result['network_security_group']['id'] = nic.network_security_group.id
id_keys = azure_id_to_dict(nic.network_security_group.id)
result['network_security_group']['name'] = id_keys['networkSecurityGroups']
if nic.ip_configurations[0].subnet:
result['ip_configuration']['subnet']['id'] = \
nic.ip_configurations[0].subnet.id
id_keys = azure_id_to_dict(nic.ip_configurations[0].subnet.id)
result['ip_configuration']['subnet']['virtual_network_name'] = id_keys['virtualNetworks']
result['ip_configuration']['subnet']['name'] = id_keys['subnets']
if nic.ip_configurations[0].public_ip_address:
result['ip_configuration']['public_ip_address']['id'] = \
nic.ip_configurations[0].public_ip_address.id
id_keys = azure_id_to_dict(nic.ip_configurations[0].public_ip_address.id)
result['ip_configuration']['public_ip_address']['name'] = id_keys['publicIPAddresses']
return result
class AzureRMNetworkInterface(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(type='str', required=True),
name=dict(type='str', required=True),
location=dict(type='str'),
security_group_name=dict(type='str', aliases=['security_group']),
state=dict(default='present', choices=['present', 'absent']),
private_ip_address=dict(type='str'),
private_ip_allocation_method=dict(type='str', choices=['Dynamic', 'Static'], default='Dynamic'),
public_ip_address_name=dict(type='str', aliases=['public_ip_address', 'public_ip_name']),
public_ip=dict(type='bool', default=True),
subnet_name=dict(type='str', aliases=['subnet']),
virtual_network_name=dict(type='str', aliases=['virtual_network']),
os_type=dict(type='str', choices=['Windows', 'Linux'], default='Linux'),
open_ports=dict(type='list'),
public_ip_allocation_method=dict(type='str', choices=['Dynamic', 'Static'], default='Dynamic'),
)
self.resource_group = None
self.name = None
self.location = None
self.security_group_name = None
self.private_ip_address = None
self.private_ip_allocation_method = None
self.public_ip_address_name = None
self.state = None
self.subnet_name = None
self.tags = None
self.virtual_network_name = None
self.security_group_name = None
self.os_type = None
self.open_ports = None
self.public_ip_allocation_method = None
self.public_ip = None
self.results = dict(
changed=False,
state=dict(),
)
super(AzureRMNetworkInterface, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec.keys() + ['tags']:
setattr(self, key, kwargs[key])
results = dict()
changed = False
nic = None
subnet = None
nsg = None
pip = None
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
# Set default location
self.location = resource_group.location
if self.state == 'present':
if self.virtual_network_name and not self.subnet_name:
self.fail("Parameter error: a subnet is required when passing a virtual_network_name.")
if self.subnet_name and not self.virtual_network_name:
self.fail("Parameter error: virtual_network_name is required when passing a subnet value.")
if self.virtual_network_name and self.subnet_name:
subnet = self.get_subnet(self.virtual_network_name, self.subnet_name)
if self.public_ip_address_name:
pip = self.get_public_ip_address(self.public_ip_address_name)
if self.security_group_name:
nsg = self.get_security_group(self.security_group_name)
try:
self.log('Fetching network interface {0}'.format(self.name))
nic = self.network_client.network_interfaces.get(self.resource_group, self.name)
self.log('Network interface {0} exists'.format(self.name))
self.check_provisioning_state(nic, self.state)
results = nic_to_dict(nic)
self.log(results, pretty_print=True)
if self.state == 'present':
update_tags, results['tags'] = self.update_tags(results['tags'])
if update_tags:
changed = True
if self.private_ip_address:
if results['ip_configuration']['private_ip_address'] != self.private_ip_address:
self.log("CHANGED: network interface {0} private ip".format(self.name))
changed = True
results['ip_configuration']['private_ip_address'] = self.private_ip_address
if self.public_ip_address_name:
if results['ip_configuration']['public_ip_address'].get('id') != pip.id:
self.log("CHANGED: network interface {0} public ip".format(self.name))
changed = True
results['ip_configuration']['public_ip_address']['id'] = pip.id
results['ip_configuration']['public_ip_address']['name'] = pip.name
if self.security_group_name:
if results['network_security_group'].get('id') != nsg.id:
self.log("CHANGED: network interface {0} network security group".format(self.name))
changed = True
results['network_security_group']['id'] = nsg.id
results['network_security_group']['name'] = nsg.name
if self.private_ip_allocation_method:
if results['ip_configuration']['private_ip_allocation_method'] != self.private_ip_allocation_method:
self.log("CHANGED: network interface {0} private ip allocation".format(self.name))
changed = True
results['ip_configuration']['private_ip_allocation_method'] = self.private_ip_allocation_method
if self.private_ip_allocation_method == 'Dynamic':
results['ip_configuration']['private_ip_address'] = None
if self.subnet_name:
if results['ip_configuration']['subnet'].get('id') != subnet.id:
changed = True
self.log("CHANGED: network interface {0} subnet".format(self.name))
results['ip_configuration']['subnet']['id'] = subnet.id
results['ip_configuration']['subnet']['name'] = subnet.name
results['ip_configuration']['subnet']['virtual_network_name'] = self.virtual_network_name
elif self.state == 'absent':
self.log("CHANGED: network interface {0} exists but requested state is 'absent'".format(self.name))
changed = True
except CloudError:
self.log('Network interface {0} does not exist'.format(self.name))
if self.state == 'present':
self.log("CHANGED: network interface {0} does not exist but requested state is "
"'present'".format(self.name))
changed = True
self.results['changed'] = changed
self.results['state'] = results
if self.check_mode:
return self.results
if changed:
if self.state == 'present':
if not nic:
# create network interface
self.log("Creating network interface {0}.".format(self.name))
# check required parameters
if not self.subnet_name:
self.fail("parameter error: subnet_name required when creating a network interface.")
if not self.virtual_network_name:
self.fail("parameter error: virtual_network_name required when creating a network interface.")
if not self.security_group_name:
# create default security group
nsg = self.create_default_securitygroup(self.resource_group, self.location, self.name,
self.os_type, self.open_ports)
if not pip and self.public_ip:
# create a default public_ip
pip = self.create_default_pip(self.resource_group, self.location, self.name,
self.public_ip_allocation_method)
nic = NetworkInterface(
location=self.location,
tags=self.tags,
ip_configurations=[
NetworkInterfaceIPConfiguration(
private_ip_allocation_method=self.private_ip_allocation_method,
)
]
)
#nic.name = self.name
nic.ip_configurations[0].subnet = Subnet(id=subnet.id)
nic.ip_configurations[0].name = 'default'
nic.network_security_group = NetworkSecurityGroup(id=nsg.id,
location=nsg.location,
resource_guid=nsg.resource_guid)
if self.private_ip_address:
nic.ip_configurations[0].private_ip_address = self.private_ip_address
if pip:
nic.ip_configurations[0].public_ip_address = PublicIPAddress(
id=pip.id,
location=pip.location,
resource_guid=pip.resource_guid)
else:
self.log("Updating network interface {0}.".format(self.name))
nic = NetworkInterface(
id=results['id'],
location=results['location'],
tags=results['tags'],
ip_configurations=[
NetworkInterfaceIPConfiguration(
private_ip_allocation_method=
results['ip_configuration']['private_ip_allocation_method']
)
]
)
subnet = self.get_subnet(results['ip_configuration']['subnet']['virtual_network_name'],
results['ip_configuration']['subnet']['name'])
nic.ip_configurations[0].subnet = Subnet(id=subnet.id)
nic.ip_configurations[0].name = results['ip_configuration']['name']
#nic.name = name=results['name'],
if results['ip_configuration'].get('private_ip_address'):
nic.ip_configurations[0].private_ip_address = results['ip_configuration']['private_ip_address']
if results['ip_configuration']['public_ip_address'].get('id'):
pip = \
self.get_public_ip_address(results['ip_configuration']['public_ip_address']['name'])
nic.ip_configurations[0].public_ip_address = PublicIPAddress(
id=pip.id,
location=pip.location,
resource_guid=pip.resource_guid)
#name=pip.name,
if results['network_security_group'].get('id'):
nsg = self.get_security_group(results['network_security_group']['name'])
nic.network_security_group = NetworkSecurityGroup(id=nsg.id,
location=nsg.location,
resource_guid=nsg.resource_guid)
# See what actually gets sent to the API
request = self.serialize_obj(nic, 'NetworkInterface')
self.log(request, pretty_print=True)
self.results['state'] = self.create_or_update_nic(nic)
elif self.state == 'absent':
self.log('Deleting network interface {0}'.format(self.name))
self.delete_nic()
return self.results
def create_or_update_nic(self, nic):
try:
poller = self.network_client.network_interfaces.create_or_update(self.resource_group, self.name, nic)
new_nic = self.get_poller_result(poller)
except Exception as exc:
self.fail("Error creating or updating network interface {0} - {1}".format(self.name, str(exc)))
return nic_to_dict(new_nic)
def delete_nic(self):
try:
poller = self.network_client.network_interfaces.delete(self.resource_group, self.name)
self.get_poller_result(poller)
except Exception as exc:
self.fail("Error deleting network interface {0} - {1}".format(self.name, str(exc)))
# Delete doesn't return anything. If we get this far, assume success
self.results['state']['status'] = 'Deleted'
return True
def get_public_ip_address(self, name):
self.log("Fetching public ip address {0}".format(name))
try:
public_ip = self.network_client.public_ip_addresses.get(self.resource_group, name)
except Exception as exc:
self.fail("Error: fetching public ip address {0} - {1}".format(self.name, str(exc)))
return public_ip
def get_subnet(self, vnet_name, subnet_name):
self.log("Fetching subnet {0} in virtual network {1}".format(subnet_name, vnet_name))
try:
subnet = self.network_client.subnets.get(self.resource_group, vnet_name, subnet_name)
except Exception as exc:
self.fail("Error: fetching subnet {0} in virtual network {1} - {2}".format(subnet_name,
vnet_name,
str(exc)))
return subnet
def get_security_group(self, name):
self.log("Fetching security group {0}".format(name))
try:
nsg = self.network_client.network_security_groups.get(self.resource_group, name)
except Exception as exc:
self.fail("Error: fetching network security group {0} - {1}.".format(name, str(exc)))
return nsg
def main():
AzureRMNetworkInterface()
if __name__ == '__main__':
main()
| gpl-3.0 |
saurabhjn76/sympy | sympy/polys/tests/test_polyclasses.py | 93 | 12631 | """Tests for OO layer of several polynomial representations. """
from sympy.polys.polyclasses import DMP, DMF, ANP
from sympy.polys.domains import ZZ, QQ
from sympy.polys.specialpolys import f_polys
from sympy.polys.polyerrors import ExactQuotientFailed
from sympy.core.compatibility import long
from sympy.utilities.pytest import raises
f_0, f_1, f_2, f_3, f_4, f_5, f_6 = [ f.to_dense() for f in f_polys() ]
def test_DMP___init__():
f = DMP([[0], [], [0, 1, 2], [3]], ZZ)
assert f.rep == [[1, 2], [3]]
assert f.dom == ZZ
assert f.lev == 1
f = DMP([[1, 2], [3]], ZZ, 1)
assert f.rep == [[1, 2], [3]]
assert f.dom == ZZ
assert f.lev == 1
f = DMP({(1, 1): 1, (0, 0): 2}, ZZ, 1)
assert f.rep == [[1, 0], [2]]
assert f.dom == ZZ
assert f.lev == 1
def test_DMP___eq__():
assert DMP([[ZZ(1), ZZ(2)], [ZZ(3)]], ZZ) == \
DMP([[ZZ(1), ZZ(2)], [ZZ(3)]], ZZ)
assert DMP([[ZZ(1), ZZ(2)], [ZZ(3)]], ZZ) == \
DMP([[QQ(1), QQ(2)], [QQ(3)]], QQ)
assert DMP([[QQ(1), QQ(2)], [QQ(3)]], QQ) == \
DMP([[ZZ(1), ZZ(2)], [ZZ(3)]], ZZ)
assert DMP([[[ZZ(1)]]], ZZ) != DMP([[ZZ(1)]], ZZ)
assert DMP([[ZZ(1)]], ZZ) != DMP([[[ZZ(1)]]], ZZ)
def test_DMP___bool__():
assert bool(DMP([[]], ZZ)) is False
assert bool(DMP([[1]], ZZ)) is True
def test_DMP_to_dict():
f = DMP([[3], [], [2], [], [8]], ZZ)
assert f.to_dict() == \
{(4, 0): 3, (2, 0): 2, (0, 0): 8}
assert f.to_sympy_dict() == \
{(4, 0): ZZ.to_sympy(3), (2, 0): ZZ.to_sympy(2), (0, 0):
ZZ.to_sympy(8)}
def test_DMP_properties():
assert DMP([[]], ZZ).is_zero is True
assert DMP([[1]], ZZ).is_zero is False
assert DMP([[1]], ZZ).is_one is True
assert DMP([[2]], ZZ).is_one is False
assert DMP([[1]], ZZ).is_ground is True
assert DMP([[1], [2], [1]], ZZ).is_ground is False
assert DMP([[1], [2, 0], [1, 0]], ZZ).is_sqf is True
assert DMP([[1], [2, 0], [1, 0, 0]], ZZ).is_sqf is False
assert DMP([[1, 2], [3]], ZZ).is_monic is True
assert DMP([[2, 2], [3]], ZZ).is_monic is False
assert DMP([[1, 2], [3]], ZZ).is_primitive is True
assert DMP([[2, 4], [6]], ZZ).is_primitive is False
def test_DMP_arithmetics():
f = DMP([[2], [2, 0]], ZZ)
assert f.mul_ground(2) == DMP([[4], [4, 0]], ZZ)
assert f.quo_ground(2) == DMP([[1], [1, 0]], ZZ)
raises(ExactQuotientFailed, lambda: f.exquo_ground(3))
f = DMP([[-5]], ZZ)
g = DMP([[5]], ZZ)
assert f.abs() == g
assert abs(f) == g
assert g.neg() == f
assert -g == f
h = DMP([[]], ZZ)
assert f.add(g) == h
assert f + g == h
assert g + f == h
assert f + 5 == h
assert 5 + f == h
h = DMP([[-10]], ZZ)
assert f.sub(g) == h
assert f - g == h
assert g - f == -h
assert f - 5 == h
assert 5 - f == -h
h = DMP([[-25]], ZZ)
assert f.mul(g) == h
assert f * g == h
assert g * f == h
assert f * 5 == h
assert 5 * f == h
h = DMP([[25]], ZZ)
assert f.sqr() == h
assert f.pow(2) == h
assert f**2 == h
raises(TypeError, lambda: f.pow('x'))
f = DMP([[1], [], [1, 0, 0]], ZZ)
g = DMP([[2], [-2, 0]], ZZ)
q = DMP([[2], [2, 0]], ZZ)
r = DMP([[8, 0, 0]], ZZ)
assert f.pdiv(g) == (q, r)
assert f.pquo(g) == q
assert f.prem(g) == r
raises(ExactQuotientFailed, lambda: f.pexquo(g))
f = DMP([[1], [], [1, 0, 0]], ZZ)
g = DMP([[1], [-1, 0]], ZZ)
q = DMP([[1], [1, 0]], ZZ)
r = DMP([[2, 0, 0]], ZZ)
assert f.div(g) == (q, r)
assert f.quo(g) == q
assert f.rem(g) == r
assert divmod(f, g) == (q, r)
assert f // g == q
assert f % g == r
raises(ExactQuotientFailed, lambda: f.exquo(g))
def test_DMP_functionality():
f = DMP([[1], [2, 0], [1, 0, 0]], ZZ)
g = DMP([[1], [1, 0]], ZZ)
h = DMP([[1]], ZZ)
assert f.degree() == 2
assert f.degree_list() == (2, 2)
assert f.total_degree() == 2
assert f.LC() == ZZ(1)
assert f.TC() == ZZ(0)
assert f.nth(1, 1) == ZZ(2)
raises(TypeError, lambda: f.nth(0, 'x'))
assert f.max_norm() == 2
assert f.l1_norm() == 4
u = DMP([[2], [2, 0]], ZZ)
assert f.diff(m=1, j=0) == u
assert f.diff(m=1, j=1) == u
raises(TypeError, lambda: f.diff(m='x', j=0))
u = DMP([1, 2, 1], ZZ)
v = DMP([1, 2, 1], ZZ)
assert f.eval(a=1, j=0) == u
assert f.eval(a=1, j=1) == v
assert f.eval(1).eval(1) == ZZ(4)
assert f.cofactors(g) == (g, g, h)
assert f.gcd(g) == g
assert f.lcm(g) == f
u = DMP([[QQ(45), QQ(30), QQ(5)]], QQ)
v = DMP([[QQ(1), QQ(2, 3), QQ(1, 9)]], QQ)
assert u.monic() == v
assert (4*f).content() == ZZ(4)
assert (4*f).primitive() == (ZZ(4), f)
f = DMP([[1], [2], [3], [4], [5], [6]], ZZ)
assert f.trunc(3) == DMP([[1], [-1], [], [1], [-1], []], ZZ)
f = DMP(f_4, ZZ)
assert f.sqf_part() == -f
assert f.sqf_list() == (ZZ(-1), [(-f, 1)])
f = DMP([[-1], [], [], [5]], ZZ)
g = DMP([[3, 1], [], []], ZZ)
h = DMP([[45, 30, 5]], ZZ)
r = DMP([675, 675, 225, 25], ZZ)
assert f.subresultants(g) == [f, g, h]
assert f.resultant(g) == r
f = DMP([1, 3, 9, -13], ZZ)
assert f.discriminant() == -11664
f = DMP([QQ(2), QQ(0)], QQ)
g = DMP([QQ(1), QQ(0), QQ(-16)], QQ)
s = DMP([QQ(1, 32), QQ(0)], QQ)
t = DMP([QQ(-1, 16)], QQ)
h = DMP([QQ(1)], QQ)
assert f.half_gcdex(g) == (s, h)
assert f.gcdex(g) == (s, t, h)
assert f.invert(g) == s
f = DMP([[1], [2], [3]], QQ)
raises(ValueError, lambda: f.half_gcdex(f))
raises(ValueError, lambda: f.gcdex(f))
raises(ValueError, lambda: f.invert(f))
f = DMP([1, 0, 20, 0, 150, 0, 500, 0, 625, -2, 0, -10, 9], ZZ)
g = DMP([1, 0, 0, -2, 9], ZZ)
h = DMP([1, 0, 5, 0], ZZ)
assert g.compose(h) == f
assert f.decompose() == [g, h]
f = DMP([[1], [2], [3]], QQ)
raises(ValueError, lambda: f.decompose())
raises(ValueError, lambda: f.sturm())
def test_DMP_exclude():
f = [[[[[[[[[[[[[[[[[[[[[[[[[[1]], [[]]]]]]]]]]]]]]]]]]]]]]]]]]
J = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 24, 25]
assert DMP(f, ZZ).exclude() == (J, DMP([1, 0], ZZ))
assert DMP([[1], [1, 0]], ZZ).exclude() == ([], DMP([[1], [1, 0]], ZZ))
def test_DMF__init__():
f = DMF(([[0], [], [0, 1, 2], [3]], [[1, 2, 3]]), ZZ)
assert f.num == [[1, 2], [3]]
assert f.den == [[1, 2, 3]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[1, 2], [3]], [[1, 2, 3]]), ZZ, 1)
assert f.num == [[1, 2], [3]]
assert f.den == [[1, 2, 3]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[-1], [-2]], [[3], [-4]]), ZZ)
assert f.num == [[-1], [-2]]
assert f.den == [[3], [-4]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[1], [2]], [[-3], [4]]), ZZ)
assert f.num == [[-1], [-2]]
assert f.den == [[3], [-4]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[1], [2]], [[-3], [4]]), ZZ)
assert f.num == [[-1], [-2]]
assert f.den == [[3], [-4]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[]], [[-3], [4]]), ZZ)
assert f.num == [[]]
assert f.den == [[1]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(17, ZZ, 1)
assert f.num == [[17]]
assert f.den == [[1]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[1], [2]]), ZZ)
assert f.num == [[1], [2]]
assert f.den == [[1]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF([[0], [], [0, 1, 2], [3]], ZZ)
assert f.num == [[1, 2], [3]]
assert f.den == [[1]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF({(1, 1): 1, (0, 0): 2}, ZZ, 1)
assert f.num == [[1, 0], [2]]
assert f.den == [[1]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[QQ(1)], [QQ(2)]], [[-QQ(3)], [QQ(4)]]), QQ)
assert f.num == [[-QQ(1)], [-QQ(2)]]
assert f.den == [[QQ(3)], [-QQ(4)]]
assert f.lev == 1
assert f.dom == QQ
f = DMF(([[QQ(1, 5)], [QQ(2, 5)]], [[-QQ(3, 7)], [QQ(4, 7)]]), QQ)
assert f.num == [[-QQ(7)], [-QQ(14)]]
assert f.den == [[QQ(15)], [-QQ(20)]]
assert f.lev == 1
assert f.dom == QQ
raises(ValueError, lambda: DMF(([1], [[1]]), ZZ))
raises(ZeroDivisionError, lambda: DMF(([1], []), ZZ))
def test_DMF__bool__():
assert bool(DMF([[]], ZZ)) is False
assert bool(DMF([[1]], ZZ)) is True
def test_DMF_properties():
assert DMF([[]], ZZ).is_zero is True
assert DMF([[]], ZZ).is_one is False
assert DMF([[1]], ZZ).is_zero is False
assert DMF([[1]], ZZ).is_one is True
assert DMF(([[1]], [[2]]), ZZ).is_one is False
def test_DMF_arithmetics():
f = DMF([[7], [-9]], ZZ)
g = DMF([[-7], [9]], ZZ)
assert f.neg() == -f == g
f = DMF(([[1]], [[1], []]), ZZ)
g = DMF(([[1]], [[1, 0]]), ZZ)
h = DMF(([[1], [1, 0]], [[1, 0], []]), ZZ)
assert f.add(g) == f + g == h
assert g.add(f) == g + f == h
h = DMF(([[-1], [1, 0]], [[1, 0], []]), ZZ)
assert f.sub(g) == f - g == h
h = DMF(([[1]], [[1, 0], []]), ZZ)
assert f.mul(g) == f*g == h
assert g.mul(f) == g*f == h
h = DMF(([[1, 0]], [[1], []]), ZZ)
assert f.quo(g) == f/g == h
h = DMF(([[1]], [[1], [], [], []]), ZZ)
assert f.pow(3) == f**3 == h
h = DMF(([[1]], [[1, 0, 0, 0]]), ZZ)
assert g.pow(3) == g**3 == h
def test_ANP___init__():
rep = [QQ(1), QQ(1)]
mod = [QQ(1), QQ(0), QQ(1)]
f = ANP(rep, mod, QQ)
assert f.rep == [QQ(1), QQ(1)]
assert f.mod == [QQ(1), QQ(0), QQ(1)]
assert f.dom == QQ
rep = {1: QQ(1), 0: QQ(1)}
mod = {2: QQ(1), 0: QQ(1)}
f = ANP(rep, mod, QQ)
assert f.rep == [QQ(1), QQ(1)]
assert f.mod == [QQ(1), QQ(0), QQ(1)]
assert f.dom == QQ
f = ANP(1, mod, QQ)
assert f.rep == [QQ(1)]
assert f.mod == [QQ(1), QQ(0), QQ(1)]
assert f.dom == QQ
def test_ANP___eq__():
a = ANP([QQ(1), QQ(1)], [QQ(1), QQ(0), QQ(1)], QQ)
b = ANP([QQ(1), QQ(1)], [QQ(1), QQ(0), QQ(2)], QQ)
assert (a == a) is True
assert (a != a) is False
assert (a == b) is False
assert (a != b) is True
b = ANP([QQ(1), QQ(2)], [QQ(1), QQ(0), QQ(1)], QQ)
assert (a == b) is False
assert (a != b) is True
def test_ANP___bool__():
assert bool(ANP([], [QQ(1), QQ(0), QQ(1)], QQ)) is False
assert bool(ANP([QQ(1)], [QQ(1), QQ(0), QQ(1)], QQ)) is True
def test_ANP_properties():
mod = [QQ(1), QQ(0), QQ(1)]
assert ANP([QQ(0)], mod, QQ).is_zero is True
assert ANP([QQ(1)], mod, QQ).is_zero is False
assert ANP([QQ(1)], mod, QQ).is_one is True
assert ANP([QQ(2)], mod, QQ).is_one is False
def test_ANP_arithmetics():
mod = [QQ(1), QQ(0), QQ(0), QQ(-2)]
a = ANP([QQ(2), QQ(-1), QQ(1)], mod, QQ)
b = ANP([QQ(1), QQ(2)], mod, QQ)
c = ANP([QQ(-2), QQ(1), QQ(-1)], mod, QQ)
assert a.neg() == -a == c
c = ANP([QQ(2), QQ(0), QQ(3)], mod, QQ)
assert a.add(b) == a + b == c
assert b.add(a) == b + a == c
c = ANP([QQ(2), QQ(-2), QQ(-1)], mod, QQ)
assert a.sub(b) == a - b == c
c = ANP([QQ(-2), QQ(2), QQ(1)], mod, QQ)
assert b.sub(a) == b - a == c
c = ANP([QQ(3), QQ(-1), QQ(6)], mod, QQ)
assert a.mul(b) == a*b == c
assert b.mul(a) == b*a == c
c = ANP([QQ(-1, 43), QQ(9, 43), QQ(5, 43)], mod, QQ)
assert a.pow(0) == a**(0) == ANP(1, mod, QQ)
assert a.pow(1) == a**(1) == a
assert a.pow(-1) == a**(-1) == c
assert a.quo(a) == a.mul(a.pow(-1)) == a*a**(-1) == ANP(1, mod, QQ)
def test_ANP_unify():
mod = [QQ(1), QQ(0), QQ(-2)]
a = ANP([QQ(1)], mod, QQ)
b = ANP([ZZ(1)], mod, ZZ)
assert a.unify(b)[0] == QQ
assert b.unify(a)[0] == QQ
assert a.unify(a)[0] == QQ
assert b.unify(b)[0] == ZZ
def test___hash__():
# issue 5571
# Make sure int vs. long doesn't affect hashing with Python ground types
assert DMP([[1, 2], [3]], ZZ) == DMP([[long(1), long(2)], [long(3)]], ZZ)
assert hash(DMP([[1, 2], [3]], ZZ)) == hash(DMP([[long(1), long(2)], [long(3)]], ZZ))
assert DMF(
([[1, 2], [3]], [[1]]), ZZ) == DMF(([[long(1), long(2)], [long(3)]], [[long(1)]]), ZZ)
assert hash(DMF(([[1, 2], [3]], [[1]]), ZZ)) == hash(DMF(([[long(1),
long(2)], [long(3)]], [[long(1)]]), ZZ))
assert ANP([1, 1], [1, 0, 1], ZZ) == ANP([long(1), long(1)], [long(1), long(0), long(1)], ZZ)
assert hash(
ANP([1, 1], [1, 0, 1], ZZ)) == hash(ANP([long(1), long(1)], [long(1), long(0), long(1)], ZZ))
| bsd-3-clause |
aWhereAPI/API-Code-Samples | python/header.py | 1 | 7933 | from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import str
from builtins import bytes
from future import standard_library
standard_library.install_aliases()
from builtins import object
import requests as rq
import base64
import pprint
import json
import random
from menus import Menus
class AWhereAPI(object):
def __init__(self, api_key, api_secret):
"""
Initializes the AWhereAPI class, which is used to perform HTTP requests
to the aWhere V2 API.
Docs:
http://developer.awhere.com/api/reference
"""
self._fields_url = 'https://api.awhere.com/v2/fields'
self._weather_url = 'https://api.awhere.com/v2/weather/fields'
self.api_key = api_key
self.api_secret = api_secret
self.base_64_encoded_secret_key = self.encode_secret_and_key(
self.api_key, self.api_secret)
self.auth_token = self.get_oauth_token(self.base_64_encoded_secret_key)
self._menu = Menus()
def create_test_field(self):
"""
Performs a HTTP POST request to create and add a Field to your aWhere App.AWhereAPI
Docs:
http://developer.awhere.com/api/reference/fields/create-field
"""
# Each Field requires a unique ID
testField = 'TestField-'
testField += str(random.randint(1, 999))
# Next, we build the request body. Please refer to the docs above for
# more info.
fieldBody = {'id': testField,
'name': testField,
'farmId': 'Farm1Test',
'centerPoint': {'latitude': 39.82,
'longitude': -98.56},
'acres': 100}
# Setup the HTTP request headers
auth_headers = {
"Authorization": "Bearer %s" % self.auth_token,
"Content-Type": 'application/json'
}
# Perform the POST request to create your Field
print('Attempting to create new field....\n')
response = rq.post(self._fields_url,
headers=auth_headers,
json=fieldBody)
# A successful request will return a 201 status code
print('The server responded with a status code of %d \n' %
response.status_code)
pprint.pprint(response.json())
print('\n\n\n')
if response.status_code == 201:
print(
'Your field "{0}" was successfully created!'.format(testField))
else:
print('An error occurred. Please review the above resonse and try again.')
def delete_field_by_id(self, field_id):
"""
Performs a HTTP DELETE request to delete a Field from your aWhere App.
Docs: http://developer.awhere.com/api/reference/fields/delete-field
Args:
field_id: The field to be deleted
"""
# Setup the HTTP request headers
auth_headers = {
"Authorization": "Bearer %s" % self.auth_token,
"Content-Type": 'application/json'
}
# Perform the POST request to Delete your Field
response = rq.delete(self._fields_url + '/{0}'.format(field_id),
headers=auth_headers)
print('The server responded with a status code of %d' %
response.status_code)
def encode_secret_and_key(self, key, secret):
"""
Docs:
http://developer.awhere.com/api/authentication
Returns:
Returns the base64-encoded {key}:{secret} combination, seperated by a colon.
"""
# Base64 Encode the Secret and Key
key_secret = '%s:%s' % (key, secret)
#print('\nKey and Secret before Base64 Encoding: %s' % key_secret)
encoded_key_secret = base64.b64encode(
bytes(key_secret, 'utf-8')).decode('ascii')
#print('Key and Secret after Base64 Encoding: %s' % encoded_key_secret)
return encoded_key_secret
def get_fields(self):
"""
Performs a HTTP GET request to obtain all Fields you've created on your aWhere App.
Docs:
http://developer.awhere.com/api/reference/fields/get-fields
"""
# Setup the HTTP request headers
auth_headers = {
"Authorization": "Bearer %s" % self.auth_token,
}
# Perform the HTTP request to obtain a list of all Fields
fields_response = rq.get(self._fields_url,
headers=auth_headers)
responseJSON = fields_response.json()
# Display the count of Fields the user has on their account
print('You have %s fields registered on your account' %
len(responseJSON["fields"]))
# Iterate over the fields and display their name and ID
print('{0} {1} \t\t {2}'.format('#', 'Field Name', 'Field ID'))
print('-------------------------------------------')
count = 0
for field in responseJSON["fields"]:
count += 1
print('{0}. {1} \t {2}\r'.format(
count, field["name"], field["id"]))
def get_weather_by_id(self, field_id):
"""
Performs a HTTP GET request to obtain Forecast, Historical Norms and Forecasts
Docs:
1. Forecast: http://developer.awhere.com/api/forecast-weather-api
2. Historical Norms: http://developer.awhere.com/api/reference/weather/norms
3. Observations: http://developer.awhere.com/api/reference/weather/observations
"""
# Setup the HTTP request headers
auth_headers = {
"Authorization": "Bearer %s" % self.auth_token,
}
# Perform the HTTP request to obtain the Forecast for the Field
response = rq.get(self._weather_url + '/{0}/forecasts?blockSize=24'.format(field_id),
headers=auth_headers)
pprint.pprint(response.json())
print('\nThe above response from the Forecast API endpoint shows the forecast for your field location ({0}).'.format(field_id))
self._menu.os_pause()
# Next, let's obtain the historic norms for a Field
response = rq.get(self._weather_url + '/{0}/norms/04-04'.format(field_id),
headers=auth_headers)
pprint.pprint(response.json())
print('\nThe above response from the Norms API endpoint shows the averages of the last 10 for an arbitrary date, April 4th.')
self._menu.os_pause()
# Finally, display the observed weather. Returns the last 7 days of data for the provided Field.
response = rq.get(self._weather_url + '/{0}/observations'.format(field_id),
headers=auth_headers)
pprint.pprint(response.json())
print('\nThe above response from the Observed Weather API endpoint shows the last 7 days of data for the provided field ({0})'.format(field_id))
def get_oauth_token(self, encoded_key_secret):
"""
Demonstrates how to make a HTTP POST request to obtain an OAuth Token
Docs:
http://developer.awhere.com/api/authentication
Returns:
The access token provided by the aWhere API
"""
auth_url = 'https://api.awhere.com/oauth/token'
auth_headers = {
"Authorization": "Basic %s" % encoded_key_secret,
'Content-Type': 'application/x-www-form-urlencoded'
}
body = "grant_type=client_credentials"
response = rq.post(auth_url,
headers=auth_headers,
data=body)
# .json method is a requests lib method that decodes the response
return response.json()['access_token']
| mit |
RusDavies/ansible-modules-core | cloud/amazon/ec2_vpc_net.py | 88 | 9882 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_vpc_net
short_description: Configure AWS virtual private clouds
description:
- Create or terminate AWS virtual private clouds. This module has a dependency on python-boto.
version_added: "2.0"
author: Jonathan Davila (@defionscode)
options:
name:
description:
- The name to give your VPC. This is used in combination with the cidr_block paramater to determine if a VPC already exists.
required: yes
cidr_block:
description:
- The CIDR of the VPC
required: yes
tenancy:
description:
- Whether to be default or dedicated tenancy. This cannot be changed after the VPC has been created.
required: false
default: default
choices: [ 'default', 'dedicated' ]
dns_support:
description:
- Whether to enable AWS DNS support.
required: false
default: yes
choices: [ 'yes', 'no' ]
dns_hostnames:
description:
- Whether to enable AWS hostname support.
required: false
default: yes
choices: [ 'yes', 'no' ]
dhcp_opts_id:
description:
- the id of the DHCP options to use for this vpc
default: null
required: false
tags:
description:
- The tags you want attached to the VPC. This is independent of the name value, note if you pass a 'Name' key it would override the Name of the VPC if it's different.
default: None
required: false
aliases: [ 'resource_tags' ]
state:
description:
- The state of the VPC. Either absent or present.
default: present
required: false
choices: [ 'present', 'absent' ]
multi_ok:
description:
- By default the module will not create another VPC if there is another VPC with the same name and CIDR block. Specify this as true if you want duplicate VPCs created.
default: false
required: false
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create a VPC with dedicate tenancy and a couple of tags
- ec2_vpc_net:
name: Module_dev2
cidr_block: 10.10.0.0/16
region: us-east-1
tags:
module: ec2_vpc_net
this: works
tenancy: dedicated
'''
import time
import sys
try:
import boto
import boto.ec2
import boto.vpc
from boto.exception import BotoServerError
HAS_BOTO=True
except ImportError:
HAS_BOTO=False
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = err.message
else:
error = '%s: %s' % (Exception, err)
return error
def vpc_exists(module, vpc, name, cidr_block, multi):
"""Returns True or False in regards to the existence of a VPC. When supplied
with a CIDR, it will check for matching tags to determine if it is a match
otherwise it will assume the VPC does not exist and thus return false.
"""
matched_vpc = None
try:
matching_vpcs=vpc.get_all_vpcs(filters={'tag:Name' : name, 'cidr-block' : cidr_block})
except Exception, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
if len(matching_vpcs) == 1:
matched_vpc = matching_vpcs[0]
elif len(matching_vpcs) > 1:
if multi:
module.fail_json(msg='Currently there are %d VPCs that have the same name and '
'CIDR block you specified. If you would like to create '
'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs))
return matched_vpc
def update_vpc_tags(vpc, module, vpc_obj, tags, name):
if tags is None:
tags = dict()
tags.update({'Name': name})
try:
current_tags = dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_obj.id}))
if cmp(tags, current_tags):
vpc.create_tags(vpc_obj.id, tags)
return True
else:
return False
except Exception, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
def update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
if vpc_obj.dhcp_options_id != dhcp_id:
connection.associate_dhcp_options(dhcp_id, vpc_obj.id)
return True
else:
return False
def get_vpc_values(vpc_obj):
if vpc_obj is not None:
vpc_values = vpc_obj.__dict__
if "region" in vpc_values:
vpc_values.pop("region")
if "item" in vpc_values:
vpc_values.pop("item")
if "connection" in vpc_values:
vpc_values.pop("connection")
return vpc_values
else:
return None
def main():
argument_spec=ec2_argument_spec()
argument_spec.update(dict(
name = dict(type='str', default=None, required=True),
cidr_block = dict(type='str', default=None, required=True),
tenancy = dict(choices=['default', 'dedicated'], default='default'),
dns_support = dict(type='bool', default=True),
dns_hostnames = dict(type='bool', default=True),
dhcp_opts_id = dict(type='str', default=None, required=False),
tags = dict(type='dict', required=False, default=None, aliases=['resource_tags']),
state = dict(choices=['present', 'absent'], default='present'),
multi_ok = dict(type='bool', default=False)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto is required for this module')
name=module.params.get('name')
cidr_block=module.params.get('cidr_block')
tenancy=module.params.get('tenancy')
dns_support=module.params.get('dns_support')
dns_hostnames=module.params.get('dns_hostnames')
dhcp_id=module.params.get('dhcp_opts_id')
tags=module.params.get('tags')
state=module.params.get('state')
multi=module.params.get('multi_ok')
changed=False
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
if dns_hostnames and not dns_support:
module.fail_json('In order to enable DNS Hostnames you must also enable DNS support')
if state == 'present':
# Check if VPC exists
vpc_obj = vpc_exists(module, connection, name, cidr_block, multi)
if vpc_obj is None:
try:
vpc_obj = connection.create_vpc(cidr_block, instance_tenancy=tenancy)
changed = True
except BotoServerError, e:
module.fail_json(msg=e)
if dhcp_id is not None:
try:
if update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
changed = True
except BotoServerError, e:
module.fail_json(msg=e)
if tags is not None or name is not None:
try:
if update_vpc_tags(connection, module, vpc_obj, tags, name):
changed = True
except BotoServerError, e:
module.fail_json(msg=e)
# Note: Boto currently doesn't currently provide an interface to ec2-describe-vpc-attribute
# which is needed in order to detect the current status of DNS options. For now we just update
# the attribute each time and is not used as a changed-factor.
try:
connection.modify_vpc_attribute(vpc_obj.id, enable_dns_support=dns_support)
connection.modify_vpc_attribute(vpc_obj.id, enable_dns_hostnames=dns_hostnames)
except BotoServerError, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
# get the vpc obj again in case it has changed
try:
vpc_obj = connection.get_all_vpcs(vpc_obj.id)[0]
except BotoServerError, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
elif state == 'absent':
# Check if VPC exists
vpc_obj = vpc_exists(module, connection, name, cidr_block, multi)
if vpc_obj is not None:
try:
connection.delete_vpc(vpc_obj.id)
vpc_obj = None
changed = True
except BotoServerError, e:
e_msg = boto_exception(e)
module.fail_json(msg="%s. You may want to use the ec2_vpc_subnet, ec2_vpc_igw, "
"and/or ec2_vpc_route_table modules to ensure the other components are absent." % e_msg)
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 |
bmya/tkobr-addons | tko_l10n_br_point_of_sale_print_cupom_fiscal/account_journal.py | 1 | 1240 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# Thinkopen Brasil
# Copyright (C) Thinkopen Solutions Brasil (<http://www.tkobr.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, api, fields, _
class account_journal(models.Model):
_inherit = 'account.journal'
fiscal_code = fields.Integer('Fiscal Code')
| agpl-3.0 |
eerwitt/tensorflow | tensorflow/python/framework/gen_docs_combined.py | 17 | 13830 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Updates generated docs from Python doc comments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import os.path
import sys
import tensorflow as tf
from tensorflow.contrib import ffmpeg
from tensorflow.python import debug as tf_debug
from tensorflow.python.client import client_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import docs
from tensorflow.python.framework import framework_lib
FLAGS = None
PREFIX_TEXT = """
Note: Functions taking `Tensor` arguments can also take anything accepted by
@{tf.convert_to_tensor}.
"""
def module_names():
return [
"tf",
"tf.errors",
"tf.image",
"tf.nn",
"tf.train",
"tf.python_io",
"tf.saved_model",
"tf.summary",
"tf.test",
"tf.contrib.bayesflow.entropy",
"tf.contrib.bayesflow.monte_carlo",
"tf.contrib.bayesflow.stochastic_graph",
"tf.contrib.bayesflow.stochastic_tensor",
"tf.contrib.bayesflow.variational_inference",
"tf.contrib.copy_graph",
"tf.contrib.crf",
"tf.contrib.distributions",
"tf.contrib.distributions.bijector",
"tf.contrib.ffmpeg",
"tf.contrib.framework",
"tf.contrib.graph_editor",
"tf.contrib.integrate",
"tf.contrib.layers",
"tf.contrib.learn",
"tf.contrib.learn.monitors",
"tf.contrib.legacy_seq2seq",
"tf.contrib.linalg",
"tf.contrib.losses",
"tf.contrib.metrics",
"tf.contrib.opt",
"tf.contrib.rnn",
"tf.contrib.solvers",
"tf.contrib.training",
"tf.contrib.util",
"tf_debug",
]
def find_module(base_module, name):
if name == "tf":
return base_module
# Special case for ffmpeg is needed since it's not linked in by default due
# to size concerns.
elif name == "tf.contrib.ffmpeg":
return ffmpeg
elif name == "tf_debug":
return tf_debug
elif name.startswith("tf."):
subname = name[3:]
subnames = subname.split(".")
parent_module = base_module
for s in subnames:
if not hasattr(parent_module, s):
raise ValueError(
"Module not found: {}. Submodule {} not found in parent module {}."
" Possible candidates are {}".format(
name, s, parent_module.__name__, dir(parent_module)))
parent_module = getattr(parent_module, s)
return parent_module
else:
raise ValueError(
"Invalid module name: {}. Module names must start with 'tf.'".format(
name))
def get_module_to_name(names):
return collections.OrderedDict([(find_module(tf, x), x) for x in names])
def all_libraries(module_to_name, members, documented):
"""Make a list of the individual files that we want to create.
Args:
module_to_name: Dictionary mapping modules to short names.
members: Dictionary mapping member name to (fullname, member).
documented: Set of documented names to update.
Returns:
List of (filename, docs.Library) pairs.
"""
def library(name, title, module=None, **args):
if module is None:
module = sys.modules["tensorflow.python.ops." + name]
return (name + ".md", docs.Library(title=title,
module_to_name=module_to_name,
members=members,
documented=documented,
module=module,
**args))
return collections.OrderedDict([
# Splits of module 'tf'.
library("framework", "Building Graphs", framework_lib),
library("check_ops", "Asserts and boolean checks."),
library("constant_op", "Constants, Sequences, and Random Values",
constant_op, prefix=PREFIX_TEXT),
library("state_ops",
"Variables",
exclude_symbols=["create_partitioned_variables"],
prefix=PREFIX_TEXT),
library("array_ops",
"Tensor Transformations",
exclude_symbols=["list_diff"],
prefix=PREFIX_TEXT),
library("math_ops",
"Math",
exclude_symbols=["sparse_matmul", "arg_min", "arg_max",
"lin_space", "sparse_segment_mean_grad"],
prefix=PREFIX_TEXT),
library("string_ops", "Strings",
prefix=PREFIX_TEXT),
library("histogram_ops", "Histograms"),
library("control_flow_ops", "Control Flow", prefix=PREFIX_TEXT),
library("functional_ops", "Higher Order Functions", prefix=PREFIX_TEXT),
library("tensor_array_ops", "TensorArray Operations", prefix=PREFIX_TEXT),
library("session_ops", "Tensor Handle Operations", prefix=PREFIX_TEXT),
library("image", "Images", tf.image, exclude_symbols=["ResizeMethod"],
prefix=PREFIX_TEXT),
library("sparse_ops",
"Sparse Tensors",
exclude_symbols=["serialize_sparse", "serialize_many_sparse",
"deserialize_many_sparse"],
prefix=PREFIX_TEXT),
library("io_ops",
"Inputs and Readers",
exclude_symbols=["LookupTableBase", "HashTable",
"initialize_all_tables",
"tables_initializer",
"parse_single_sequence_example",
"string_to_hash_bucket"],
prefix=PREFIX_TEXT),
library("python_io", "Data IO (Python functions)", tf.python_io),
library("nn",
"Neural Network",
tf.nn,
exclude_symbols=["conv2d_backprop_input",
"conv2d_backprop_filter", "avg_pool_grad",
"max_pool_grad", "max_pool_grad_with_argmax",
"batch_norm_with_global_normalization_grad",
"lrn_grad", "relu6_grad", "softplus_grad",
"softsign_grad", "xw_plus_b", "relu_layer",
"lrn", "batch_norm_with_global_normalization",
"batch_norm_with_global_normalization_grad",
"all_candidate_sampler", "seq2seq"],
prefix=PREFIX_TEXT),
library("client", "Running Graphs", client_lib),
library("train",
"Training",
tf.train,
exclude_symbols=["Feature", "Features", "BytesList", "FloatList",
"Int64List", "Example", "InferenceExample",
"FeatureList", "FeatureLists", "RankingExample",
"SequenceExample"]),
library("script_ops",
"Wraps python functions",
prefix=PREFIX_TEXT),
library("summary", "Summary Operations", tf.summary),
library("test", "Testing", tf.test),
library("contrib.bayesflow.entropy",
"BayesFlow Entropy (contrib)",
tf.contrib.bayesflow.entropy),
library("contrib.bayesflow.monte_carlo",
"BayesFlow Monte Carlo (contrib)",
tf.contrib.bayesflow.monte_carlo),
library("contrib.bayesflow.stochastic_graph",
"BayesFlow Stochastic Graph (contrib)",
tf.contrib.bayesflow.stochastic_graph),
library("contrib.bayesflow.stochastic_tensor",
"BayesFlow Stochastic Tensors (contrib)",
tf.contrib.bayesflow.stochastic_tensor),
library("contrib.bayesflow.variational_inference",
"BayesFlow Variational Inference (contrib)",
tf.contrib.bayesflow.variational_inference),
library("contrib.crf", "CRF (contrib)", tf.contrib.crf),
library("contrib.distributions", "Statistical Distributions (contrib)",
tf.contrib.distributions),
library("contrib.distributions.bijector",
"Random variable transformations (contrib)",
tf.contrib.distributions.bijector),
library("contrib.ffmpeg", "FFmpeg (contrib)", ffmpeg),
library("contrib.framework", "Framework (contrib)", tf.contrib.framework),
library("contrib.graph_editor", "Graph Editor (contrib)",
tf.contrib.graph_editor),
library("contrib.integrate", "Integrate (contrib)", tf.contrib.integrate),
library("contrib.layers", "Layers (contrib)", tf.contrib.layers),
library("contrib.learn", "Learn (contrib)", tf.contrib.learn),
library("contrib.learn.monitors", "Monitors (contrib)",
tf.contrib.learn.monitors),
library("contrib.legacy_seq2seq", "Sequence to Sequence (contrib)",
tf.contrib.legacy_seq2seq),
library("contrib.linalg", "Linear Algebra (contrib)",
tf.contrib.linalg),
library("contrib.losses", "Losses (contrib)", tf.contrib.losses),
library("contrib.opt", "Optimization (contrib)", tf.contrib.opt),
library("contrib.rnn", "RNN and Cells (contrib)", tf.contrib.rnn),
library("contrib.metrics", "Metrics (contrib)", tf.contrib.metrics),
library("contrib.training", "Training (contrib)", tf.contrib.training),
library("contrib.util", "Utilities (contrib)", tf.contrib.util),
library("contrib.copy_graph", "Copying Graph Elements (contrib)",
tf.contrib.copy_graph),
library("tf_debug", "TensorFlow Debugger", tf_debug),
])
_hidden_symbols = ["Event", "LogMessage", "Summary", "SessionLog", "xrange",
"HistogramProto", "ConfigProto", "NodeDef", "GraphDef",
"GPUOptions", "GraphOptions", "RunOptions", "RunMetadata",
"SessionInterface", "BaseSession", "NameAttrList",
"AttrValue", "OptimizerOptions",
"CollectionDef", "MetaGraphDef", "QueueRunnerDef",
"SaverDef", "VariableDef", "TestCase", "GrpcServer",
"ClusterDef", "JobDef", "ServerDef", "TensorInfo"]
# TODO(skleinfeld, deannarubin) Address shortname
# conflict between tf.contrib.learn.NanLossDuringTrainingError and
# tf.contrib.learn.monitors.NanLossDuringTrainingError, arising due
# to imports in learn/python/learn/__init__.py
# TODO(wicke): Remove contrib.layers.relu* after shortnames are
# disabled. These conflict with tf.nn.relu*
EXCLUDE = frozenset(["tf.contrib.learn.monitors.NanLossDuringTrainingError",
"tf.contrib.layers.dropout",
"tf.contrib.layers.bias_add",
"tf.contrib.layers.conv2d",
"tf.contrib.layers.conv2d_transpose",
"tf.contrib.layers.separable_conv2d",
"tf.contrib.layers.softmax",
"tf.contrib.layers.relu", "tf.contrib.layers.relu6",
"tf.contrib.framework.assert_global_step",
"tf.contrib.framework.get_global_step",
"tf.contrib.learn.NanLossDuringTrainingError",
"tf.contrib.layers.stack",
"tf.contrib.layers.ProblemType",
"tf.confusion_matrix"])
def main(unused_argv):
if not FLAGS.out_dir:
tf.logging.error("out_dir not specified")
return -1
# Document libraries
documented = set()
module_to_name = get_module_to_name(module_names())
members = docs.collect_members(module_to_name, exclude=EXCLUDE)
libraries = all_libraries(module_to_name, members, documented).items()
# Define catch_all library before calling write_libraries to avoid complaining
# about generically hidden symbols.
catch_all = docs.Library(title="Catch All", module=None,
exclude_symbols=_hidden_symbols,
module_to_name=module_to_name, members=members,
documented=documented)
# Write docs to files
docs.write_libraries(FLAGS.out_dir, libraries)
# Make it easy to search for hidden symbols
if FLAGS.print_hidden_regex:
hidden = set(_hidden_symbols)
for _, lib in libraries:
hidden.update(lib.exclude_symbols)
print(r"hidden symbols regex = r'\b(%s)\b'" % "|".join(sorted(hidden)))
# Verify that all symbols are mentioned in some library doc.
catch_all.assert_no_leftovers()
# Generate index
with open(os.path.join(FLAGS.out_dir, "index.md"), "w") as f:
docs.Index(module_to_name, members, libraries,
"../../api_docs/python/").write_markdown_to_file(f)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--out_dir",
type=str,
default=None,
help="Directory to which docs should be written.")
parser.add_argument(
"--print_hidden_regex",
type="bool",
nargs="?",
const=True,
default=False,
help="Dump a regular expression matching any hidden symbol")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
xiaolonw/fast-rcnn_flow2 | tools/compress_net.py | 30 | 3804 | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Compress a Fast R-CNN network using truncated SVD."""
import _init_paths
import caffe
import argparse
import numpy as np
import os, sys
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Compress a Fast R-CNN network')
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the uncompressed network; '
'e.g., models/VGG16/test.prototxt',
default=None, type=str)
parser.add_argument('--def-svd', dest='prototxt_svd',
help='prototxt file defining the SVD compressed network '
'e.g., models/VGG16/compressed/test.prototxt',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to compress',
default=None, type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def compress_weights(W, l):
"""Compress the weight matrix W of an inner product (fully connected) layer
using truncated SVD.
Parameters:
W: N x M weights matrix
l: number of singular values to retain
Returns:
Ul, L: matrices such that W \approx Ul*L
"""
# numpy doesn't seem to have a fast truncated SVD algorithm...
# this could be faster
U, s, V = np.linalg.svd(W, full_matrices=False)
Ul = U[:, :l]
sl = s[:l]
Vl = V[:l, :]
L = np.dot(np.diag(sl), Vl)
return Ul, L
def main():
args = parse_args()
net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
net_svd = caffe.Net(args.prototxt_svd, args.caffemodel, caffe.TEST)
print('Uncompressed network {} : {}'.format(args.prototxt, args.caffemodel))
print('Compressed network prototxt {}'.format(args.prototxt_svd))
out = os.path.splitext(os.path.basename(args.caffemodel))[0] + '_svd'
out_dir = os.path.dirname(args.caffemodel)
# Compress fc6
if net_svd.params.has_key('fc6_L'):
l_fc6 = net_svd.params['fc6_L'][0].data.shape[0]
print(' fc6_L bottleneck size: {}'.format(l_fc6))
# uncompressed weights and biases
W_fc6 = net.params['fc6'][0].data
B_fc6 = net.params['fc6'][1].data
print(' compressing fc6...')
Ul_fc6, L_fc6 = compress_weights(W_fc6, l_fc6)
assert(len(net_svd.params['fc6_L']) == 1)
# install compressed matrix factors (and original biases)
net_svd.params['fc6_L'][0].data[...] = L_fc6
net_svd.params['fc6_U'][0].data[...] = Ul_fc6
net_svd.params['fc6_U'][1].data[...] = B_fc6
out += '_fc6_{}'.format(l_fc6)
# Compress fc7
if net_svd.params.has_key('fc7_L'):
l_fc7 = net_svd.params['fc7_L'][0].data.shape[0]
print ' fc7_L bottleneck size: {}'.format(l_fc7)
W_fc7 = net.params['fc7'][0].data
B_fc7 = net.params['fc7'][1].data
print(' compressing fc7...')
Ul_fc7, L_fc7 = compress_weights(W_fc7, l_fc7)
assert(len(net_svd.params['fc7_L']) == 1)
net_svd.params['fc7_L'][0].data[...] = L_fc7
net_svd.params['fc7_U'][0].data[...] = Ul_fc7
net_svd.params['fc7_U'][1].data[...] = B_fc7
out += '_fc7_{}'.format(l_fc7)
filename = '{}/{}.caffemodel'.format(out_dir, out)
net_svd.save(filename)
print 'Wrote svd model to: {:s}'.format(filename)
if __name__ == '__main__':
main()
| mit |
TeslaProject/external_chromium_org | chrome/app/PRESUBMIT.py | 80 | 1770 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for changes affecting chrome/app/
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import os
def _CheckNoProductNameInGeneratedResources(input_api, output_api):
"""Check that no PRODUCT_NAME placeholders are found in resources files.
These kinds of strings prevent proper localization in some languages. For
more information, see the following chromium-dev thread:
https://groups.google.com/a/chromium.org/forum/#!msg/chromium-dev/PBs5JfR0Aoc/NOcIHII9u14J
"""
problems = []
filename_filter = lambda x: x.LocalPath().endswith('.grd')
for f, line_num, line in input_api.RightHandSideLines(filename_filter):
if 'PRODUCT_NAME' in line:
problems.append('%s:%d' % (f.LocalPath(), line_num))
if problems:
return [output_api.PresubmitPromptWarning(
"Don't use PRODUCT_NAME placeholders in string resources. Instead, add "
"separate strings to google_chrome_strings.grd and "
"chromium_strings.grd. See http://goo.gl/6614MQ for more information."
"Problems with this check? Contact dubroy@chromium.org.",
items=problems)]
return []
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(_CheckNoProductNameInGeneratedResources(input_api, output_api))
return results
def CheckChangeOnUpload(input_api, output_api):
return _CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _CommonChecks(input_api, output_api)
| bsd-3-clause |
meego-tablet-ux/meego-app-browser | third_party/mesa/MesaLib/src/gallium/tests/graw/fragment-shader/fragment-shader.py | 32 | 7321 | #!/usr/bin/env python
##########################################################################
#
# Copyright 2009 VMware, Inc.
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sub license, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice (including the
# next paragraph) shall be included in all copies or substantial portions
# of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
# IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
##########################################################################
import struct
from gallium import *
def make_image(surface):
data = surface.get_tile_rgba8(0, 0, surface.width, surface.height)
import Image
outimage = Image.fromstring('RGBA', (surface.width, surface.height), data, "raw", 'RGBA', 0, 1)
return outimage
def save_image(filename, surface):
outimage = make_image(surface)
outimage.save(filename, "PNG")
def test(dev, name):
ctx = dev.context_create()
width = 320
height = 320
minz = 0.0
maxz = 1.0
# disabled blending/masking
blend = Blend()
blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_ONE
blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_ONE
blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_ZERO
blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ZERO
blend.rt[0].colormask = PIPE_MASK_RGBA
ctx.set_blend(blend)
# depth/stencil/alpha
depth_stencil_alpha = DepthStencilAlpha()
depth_stencil_alpha.depth.enabled = 0
depth_stencil_alpha.depth.writemask = 1
depth_stencil_alpha.depth.func = PIPE_FUNC_LESS
ctx.set_depth_stencil_alpha(depth_stencil_alpha)
# rasterizer
rasterizer = Rasterizer()
rasterizer.front_winding = PIPE_WINDING_CW
rasterizer.cull_mode = PIPE_WINDING_NONE
rasterizer.scissor = 1
ctx.set_rasterizer(rasterizer)
# viewport
viewport = Viewport()
scale = FloatArray(4)
scale[0] = width / 2.0
scale[1] = -height / 2.0
scale[2] = (maxz - minz) / 2.0
scale[3] = 1.0
viewport.scale = scale
translate = FloatArray(4)
translate[0] = width / 2.0
translate[1] = height / 2.0
translate[2] = (maxz - minz) / 2.0
translate[3] = 0.0
viewport.translate = translate
ctx.set_viewport(viewport)
# samplers
sampler = Sampler()
sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE
sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE
sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE
sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE
sampler.min_img_filter = PIPE_TEX_MIPFILTER_NEAREST
sampler.mag_img_filter = PIPE_TEX_MIPFILTER_NEAREST
sampler.normalized_coords = 1
ctx.set_fragment_sampler(0, sampler)
# scissor
scissor = Scissor()
scissor.minx = 0
scissor.miny = 0
scissor.maxx = width
scissor.maxy = height
ctx.set_scissor(scissor)
clip = Clip()
clip.nr = 0
ctx.set_clip(clip)
# framebuffer
cbuf = dev.resource_create(
PIPE_FORMAT_B8G8R8X8_UNORM,
width, height,
bind=PIPE_BIND_RENDER_TARGET,
).get_surface()
fb = Framebuffer()
fb.width = width
fb.height = height
fb.nr_cbufs = 1
fb.set_cbuf(0, cbuf)
ctx.set_framebuffer(fb)
rgba = FloatArray(4);
rgba[0] = 0.5
rgba[1] = 0.5
rgba[2] = 0.5
rgba[3] = 0.5
ctx.clear(PIPE_CLEAR_COLOR, rgba, 0.0, 0)
# vertex shader
vs = Shader('''
VERT
DCL IN[0], POSITION
DCL IN[1], COLOR
DCL OUT[0], POSITION
DCL OUT[1], COLOR
MOV OUT[0], IN[0]
MOV OUT[1], IN[1]
END
''')
ctx.set_vertex_shader(vs)
# fragment shader
fs = Shader(file('frag-' + name + '.sh', 'rt').read())
ctx.set_fragment_shader(fs)
constbuf0 = dev.buffer_create(64,
(PIPE_BUFFER_USAGE_CONSTANT |
PIPE_BUFFER_USAGE_GPU_READ |
PIPE_BUFFER_USAGE_CPU_WRITE),
4 * 4 * 4)
cbdata = ''
cbdata += struct.pack('4f', 0.4, 0.0, 0.0, 1.0)
cbdata += struct.pack('4f', 1.0, 1.0, 1.0, 1.0)
cbdata += struct.pack('4f', 2.0, 2.0, 2.0, 2.0)
cbdata += struct.pack('4f', 4.0, 8.0, 16.0, 32.0)
constbuf0.write(cbdata, 0)
ctx.set_constant_buffer(PIPE_SHADER_FRAGMENT,
0,
constbuf0)
constbuf1 = dev.buffer_create(64,
(PIPE_BUFFER_USAGE_CONSTANT |
PIPE_BUFFER_USAGE_GPU_READ |
PIPE_BUFFER_USAGE_CPU_WRITE),
4 * 4 * 4)
cbdata = ''
cbdata += struct.pack('4f', 0.1, 0.1, 0.1, 0.1)
cbdata += struct.pack('4f', 0.25, 0.25, 0.25, 0.25)
cbdata += struct.pack('4f', 0.5, 0.5, 0.5, 0.5)
cbdata += struct.pack('4f', 0.75, 0.75, 0.75, 0.75)
constbuf1.write(cbdata, 0)
ctx.set_constant_buffer(PIPE_SHADER_FRAGMENT,
1,
constbuf1)
xy = [
-0.8, -0.8,
0.8, -0.8,
0.0, 0.8,
]
color = [
1.0, 0.0, 0.0,
0.0, 1.0, 0.0,
0.0, 0.0, 1.0,
]
nverts = 3
nattrs = 2
verts = FloatArray(nverts * nattrs * 4)
for i in range(0, nverts):
verts[i * nattrs * 4 + 0] = xy[i * 2 + 0] # x
verts[i * nattrs * 4 + 1] = xy[i * 2 + 1] # y
verts[i * nattrs * 4 + 2] = 0.5 # z
verts[i * nattrs * 4 + 3] = 1.0 # w
verts[i * nattrs * 4 + 4] = color[i * 3 + 0] # r
verts[i * nattrs * 4 + 5] = color[i * 3 + 1] # g
verts[i * nattrs * 4 + 6] = color[i * 3 + 2] # b
verts[i * nattrs * 4 + 7] = 1.0 # a
ctx.draw_vertices(PIPE_PRIM_TRIANGLES,
nverts,
nattrs,
verts)
ctx.flush()
save_image('frag-' + name + '.png', cbuf)
def main():
tests = [
'abs',
'add',
'cb-1d',
'cb-2d',
'dp3',
'dp4',
'dst',
'ex2',
'flr',
'frc',
'lg2',
'lit',
'lrp',
'mad',
'max',
'min',
'mov',
'mul',
'rcp',
'rsq',
'sge',
'slt',
'srcmod-abs',
'srcmod-absneg',
'srcmod-neg',
'srcmod-swz',
'sub',
'xpd',
]
dev = Device()
for t in tests:
test(dev, t)
if __name__ == '__main__':
main()
| bsd-3-clause |
Thhhza/XlsxWriter | xlsxwriter/test/comparison/test_repeat04.py | 8 | 1357 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'repeat04.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = ['xl/printerSettings/printerSettings1.bin',
'xl/worksheets/_rels/sheet1.xml.rels']
self.ignore_elements = {'[Content_Types].xml': ['<Default Extension="bin"'],
'xl/worksheets/sheet1.xml': ['<pageMargins', '<pageSetup']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with repeat rowswhen the sheet name contains a space."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet('Sheet 1')
worksheet.repeat_rows(0)
worksheet.write('A1', 'Foo')
workbook.close()
self.assertExcelEqual()
| bsd-2-clause |
dongjoon-hyun/spark | examples/src/main/python/mllib/word2vec.py | 27 | 1789 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This example uses text8 file from http://mattmahoney.net/dc/text8.zip
# The file was downloaded, unzipped and split into multiple lines using
#
# wget http://mattmahoney.net/dc/text8.zip
# unzip text8.zip
# grep -o -E '\w+(\W+\w+){0,15}' text8 > text8_lines
# This was done so that the example can be run in local mode
import sys
from pyspark import SparkContext
from pyspark.mllib.feature import Word2Vec
USAGE = ("bin/spark-submit --driver-memory 4g "
"examples/src/main/python/mllib/word2vec.py text8_lines")
if __name__ == "__main__":
if len(sys.argv) < 2:
print(USAGE)
sys.exit("Argument for file not provided")
file_path = sys.argv[1]
sc = SparkContext(appName='Word2Vec')
inp = sc.textFile(file_path).map(lambda row: row.split(" "))
word2vec = Word2Vec()
model = word2vec.fit(inp)
synonyms = model.findSynonyms('china', 40)
for word, cosine_distance in synonyms:
print("{}: {}".format(word, cosine_distance))
sc.stop()
| apache-2.0 |
strazzere/pfp | pfp/native/compat_math.py | 3 | 2377 | #!/usr/bin/env python
# encoding: utf-8
"""
This module of native functions is implemented for
compatability with 010 editor functions. Some of these functions
are nops, some are fully implemented.
"""
import sys
from pfp.native import native
import pfp.fields
# http://www.sweetscape.com/010editor/manual/FuncMath.htm
#double Abs( double x )
@native(name="Abs", ret=pfp.fields.Double)
def Abs(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#double Ceil( double x )
@native(name="Ceil", ret=pfp.fields.Double)
def Ceil(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#double Cos( double a )
@native(name="Cos", ret=pfp.fields.Double)
def Cos(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#double Exp( double x )
@native(name="Exp", ret=pfp.fields.Double)
def Exp(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#double Floor( double x)
@native(name="Floor", ret=pfp.fields.Double)
def Floor(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#double Log( double x )
@native(name="Log", ret=pfp.fields.Double)
def Log(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#double Max( double a, double b )
@native(name="Max", ret=pfp.fields.Double)
def Max(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#double Min( double a, double b)
@native(name="Min", ret=pfp.fields.Double)
def Min(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#double Pow( double x, double y)
@native(name="Pow", ret=pfp.fields.Double)
def Pow(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int Random( int maximum )
@native(name="Random", ret=pfp.fields.Int)
def Random(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#double Sin( double a )
@native(name="Sin", ret=pfp.fields.Double)
def Sin(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#double Sqrt( double x )
@native(name="Sqrt", ret=pfp.fields.Double)
def Sqrt(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#data_type SwapBytes( data_type x )
@native(name="SwapBytes", ret=pfp.fields.Int)
def SwapBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#double Tan( double a )
@native(name="Tan", ret=pfp.fields.Double)
def Tan(params, ctxt, scope, stream, coord):
raise NotImplementedError()
| mit |
clucas111/delineating-linear-elements | Code/clf_preprocessing.py | 1 | 1569 | # -*- coding: utf-8 -*-
"""
@author: Chris Lucas
"""
import numpy as np
import pandas as pd
def merge_dataframes(dfs, key_field_name):
"""
Merges dataframes containing data of one class into one dataframe with
the class in a column.
Parameters
----------
dfs : dict of DataFrames
Dictionary with the class as key and the value as the dataframes
to be merged.
Returns
-------
df : DataFrame
The merged dataframe.
"""
df = pd.DataFrame()
for k, v in dfs.iteritems():
v[key_field_name] = k
df = df.append(v)
return df
def correlated_features(df, features, corr_th=0.98):
"""
Determines highly correlated features which can consequently be dropped.
Parameters
----------
df : DataFrame
The feature values.
features : list of strings
The names of the features (column names in the dataframe)
to be checked.
corr_th : float
The correlation coefficient threshold to determine what is highly
correlated.
Returns
-------
drops : list fo strings
The names of the features which can be dropped.
"""
df_corr = df[features].astype(np.float64).corr(method='pearson')
mask = np.ones(df_corr.columns.size) - np.eye(df_corr.columns.size)
df_corr = mask * df_corr
drops = []
for col in df_corr.columns.values:
if not np.in1d([col], drops):
corr = df_corr[abs(df_corr[col]) > corr_th].index
drops = np.union1d(drops, corr)
return drops
| apache-2.0 |
etuna-SBF-kog/Stadsparken | env/lib/python2.7/site-packages/django/contrib/localflavor/pl/pl_administrativeunits.py | 433 | 13194 | # -*- coding: utf-8 -*-
"""
Polish administrative units as in http://pl.wikipedia.org/wiki/Podzia%C5%82_administracyjny_Polski
"""
ADMINISTRATIVE_UNIT_CHOICES = (
('wroclaw', u'Wrocław'),
('jeleniagora', u'Jelenia Góra'),
('legnica', u'Legnica'),
('boleslawiecki', u'bolesławiecki'),
('dzierzoniowski', u'dzierżoniowski'),
('glogowski', u'głogowski'),
('gorowski', u'górowski'),
('jaworski', u'jaworski'),
('jeleniogorski', u'jeleniogórski'),
('kamiennogorski', u'kamiennogórski'),
('klodzki', u'kłodzki'),
('legnicki', u'legnicki'),
('lubanski', u'lubański'),
('lubinski', u'lubiński'),
('lwowecki', u'lwówecki'),
('milicki', u'milicki'),
('olesnicki', u'oleśnicki'),
('olawski', u'oławski'),
('polkowicki', u'polkowicki'),
('strzelinski', u'strzeliński'),
('sredzki', u'średzki'),
('swidnicki', u'świdnicki'),
('trzebnicki', u'trzebnicki'),
('walbrzyski', u'wałbrzyski'),
('wolowski', u'wołowski'),
('wroclawski', u'wrocławski'),
('zabkowicki', u'ząbkowicki'),
('zgorzelecki', u'zgorzelecki'),
('zlotoryjski', u'złotoryjski'),
('bydgoszcz', u'Bydgoszcz'),
('torun', u'Toruń'),
('wloclawek', u'Włocławek'),
('grudziadz', u'Grudziądz'),
('aleksandrowski', u'aleksandrowski'),
('brodnicki', u'brodnicki'),
('bydgoski', u'bydgoski'),
('chelminski', u'chełmiński'),
('golubsko-dobrzynski', u'golubsko-dobrzyński'),
('grudziadzki', u'grudziądzki'),
('inowroclawski', u'inowrocławski'),
('lipnowski', u'lipnowski'),
('mogilenski', u'mogileński'),
('nakielski', u'nakielski'),
('radziejowski', u'radziejowski'),
('rypinski', u'rypiński'),
('sepolenski', u'sępoleński'),
('swiecki', u'świecki'),
('torunski', u'toruński'),
('tucholski', u'tucholski'),
('wabrzeski', u'wąbrzeski'),
('wloclawski', u'wrocławski'),
('zninski', u'źniński'),
('lublin', u'Lublin'),
('biala-podlaska', u'Biała Podlaska'),
('chelm', u'Chełm'),
('zamosc', u'Zamość'),
('bialski', u'bialski'),
('bilgorajski', u'biłgorajski'),
('chelmski', u'chełmski'),
('hrubieszowski', u'hrubieszowski'),
('janowski', u'janowski'),
('krasnostawski', u'krasnostawski'),
('krasnicki', u'kraśnicki'),
('lubartowski', u'lubartowski'),
('lubelski', u'lubelski'),
('leczynski', u'łęczyński'),
('lukowski', u'łukowski'),
('opolski', u'opolski'),
('parczewski', u'parczewski'),
('pulawski', u'puławski'),
('radzynski', u'radzyński'),
('rycki', u'rycki'),
('swidnicki', u'świdnicki'),
('tomaszowski', u'tomaszowski'),
('wlodawski', u'włodawski'),
('zamojski', u'zamojski'),
('gorzow-wielkopolski', u'Gorzów Wielkopolski'),
('zielona-gora', u'Zielona Góra'),
('gorzowski', u'gorzowski'),
('krosnienski', u'krośnieński'),
('miedzyrzecki', u'międzyrzecki'),
('nowosolski', u'nowosolski'),
('slubicki', u'słubicki'),
('strzelecko-drezdenecki', u'strzelecko-drezdenecki'),
('sulecinski', u'suleńciński'),
('swiebodzinski', u'świebodziński'),
('wschowski', u'wschowski'),
('zielonogorski', u'zielonogórski'),
('zaganski', u'żagański'),
('zarski', u'żarski'),
('lodz', u'Łódź'),
('piotrkow-trybunalski', u'Piotrków Trybunalski'),
('skierniewice', u'Skierniewice'),
('belchatowski', u'bełchatowski'),
('brzezinski', u'brzeziński'),
('kutnowski', u'kutnowski'),
('laski', u'łaski'),
('leczycki', u'łęczycki'),
('lowicki', u'łowicki'),
('lodzki wschodni', u'łódzki wschodni'),
('opoczynski', u'opoczyński'),
('pabianicki', u'pabianicki'),
('pajeczanski', u'pajęczański'),
('piotrkowski', u'piotrkowski'),
('poddebicki', u'poddębicki'),
('radomszczanski', u'radomszczański'),
('rawski', u'rawski'),
('sieradzki', u'sieradzki'),
('skierniewicki', u'skierniewicki'),
('tomaszowski', u'tomaszowski'),
('wielunski', u'wieluński'),
('wieruszowski', u'wieruszowski'),
('zdunskowolski', u'zduńskowolski'),
('zgierski', u'zgierski'),
('krakow', u'Kraków'),
('tarnow', u'Tarnów'),
('nowy-sacz', u'Nowy Sącz'),
('bochenski', u'bocheński'),
('brzeski', u'brzeski'),
('chrzanowski', u'chrzanowski'),
('dabrowski', u'dąbrowski'),
('gorlicki', u'gorlicki'),
('krakowski', u'krakowski'),
('limanowski', u'limanowski'),
('miechowski', u'miechowski'),
('myslenicki', u'myślenicki'),
('nowosadecki', u'nowosądecki'),
('nowotarski', u'nowotarski'),
('olkuski', u'olkuski'),
('oswiecimski', u'oświęcimski'),
('proszowicki', u'proszowicki'),
('suski', u'suski'),
('tarnowski', u'tarnowski'),
('tatrzanski', u'tatrzański'),
('wadowicki', u'wadowicki'),
('wielicki', u'wielicki'),
('warszawa', u'Warszawa'),
('ostroleka', u'Ostrołęka'),
('plock', u'Płock'),
('radom', u'Radom'),
('siedlce', u'Siedlce'),
('bialobrzeski', u'białobrzeski'),
('ciechanowski', u'ciechanowski'),
('garwolinski', u'garwoliński'),
('gostyninski', u'gostyniński'),
('grodziski', u'grodziski'),
('grojecki', u'grójecki'),
('kozienicki', u'kozenicki'),
('legionowski', u'legionowski'),
('lipski', u'lipski'),
('losicki', u'łosicki'),
('makowski', u'makowski'),
('minski', u'miński'),
('mlawski', u'mławski'),
('nowodworski', u'nowodworski'),
('ostrolecki', u'ostrołęcki'),
('ostrowski', u'ostrowski'),
('otwocki', u'otwocki'),
('piaseczynski', u'piaseczyński'),
('plocki', u'płocki'),
('plonski', u'płoński'),
('pruszkowski', u'pruszkowski'),
('przasnyski', u'przasnyski'),
('przysuski', u'przysuski'),
('pultuski', u'pułtuski'),
('radomski', u'radomski'),
('siedlecki', u'siedlecki'),
('sierpecki', u'sierpecki'),
('sochaczewski', u'sochaczewski'),
('sokolowski', u'sokołowski'),
('szydlowiecki', u'szydłowiecki'),
('warszawski-zachodni', u'warszawski zachodni'),
('wegrowski', u'węgrowski'),
('wolominski', u'wołomiński'),
('wyszkowski', u'wyszkowski'),
('zwolenski', u'zwoleński'),
('zurominski', u'żuromiński'),
('zyrardowski', u'żyrardowski'),
('opole', u'Opole'),
('brzeski', u'brzeski'),
('glubczycki', u'głubczyski'),
('kedzierzynsko-kozielski', u'kędzierzyński-kozielski'),
('kluczborski', u'kluczborski'),
('krapkowicki', u'krapkowicki'),
('namyslowski', u'namysłowski'),
('nyski', u'nyski'),
('oleski', u'oleski'),
('opolski', u'opolski'),
('prudnicki', u'prudnicki'),
('strzelecki', u'strzelecki'),
('rzeszow', u'Rzeszów'),
('krosno', u'Krosno'),
('przemysl', u'Przemyśl'),
('tarnobrzeg', u'Tarnobrzeg'),
('bieszczadzki', u'bieszczadzki'),
('brzozowski', u'brzozowski'),
('debicki', u'dębicki'),
('jaroslawski', u'jarosławski'),
('jasielski', u'jasielski'),
('kolbuszowski', u'kolbuszowski'),
('krosnienski', u'krośnieński'),
('leski', u'leski'),
('lezajski', u'leżajski'),
('lubaczowski', u'lubaczowski'),
('lancucki', u'łańcucki'),
('mielecki', u'mielecki'),
('nizanski', u'niżański'),
('przemyski', u'przemyski'),
('przeworski', u'przeworski'),
('ropczycko-sedziszowski', u'ropczycko-sędziszowski'),
('rzeszowski', u'rzeszowski'),
('sanocki', u'sanocki'),
('stalowowolski', u'stalowowolski'),
('strzyzowski', u'strzyżowski'),
('tarnobrzeski', u'tarnobrzeski'),
('bialystok', u'Białystok'),
('lomza', u'Łomża'),
('suwalki', u'Suwałki'),
('augustowski', u'augustowski'),
('bialostocki', u'białostocki'),
('bielski', u'bielski'),
('grajewski', u'grajewski'),
('hajnowski', u'hajnowski'),
('kolnenski', u'kolneński'),
('łomzynski', u'łomżyński'),
('moniecki', u'moniecki'),
('sejnenski', u'sejneński'),
('siemiatycki', u'siematycki'),
('sokolski', u'sokólski'),
('suwalski', u'suwalski'),
('wysokomazowiecki', u'wysokomazowiecki'),
('zambrowski', u'zambrowski'),
('gdansk', u'Gdańsk'),
('gdynia', u'Gdynia'),
('slupsk', u'Słupsk'),
('sopot', u'Sopot'),
('bytowski', u'bytowski'),
('chojnicki', u'chojnicki'),
('czluchowski', u'człuchowski'),
('kartuski', u'kartuski'),
('koscierski', u'kościerski'),
('kwidzynski', u'kwidzyński'),
('leborski', u'lęborski'),
('malborski', u'malborski'),
('nowodworski', u'nowodworski'),
('gdanski', u'gdański'),
('pucki', u'pucki'),
('slupski', u'słupski'),
('starogardzki', u'starogardzki'),
('sztumski', u'sztumski'),
('tczewski', u'tczewski'),
('wejherowski', u'wejcherowski'),
('katowice', u'Katowice'),
('bielsko-biala', u'Bielsko-Biała'),
('bytom', u'Bytom'),
('chorzow', u'Chorzów'),
('czestochowa', u'Częstochowa'),
('dabrowa-gornicza', u'Dąbrowa Górnicza'),
('gliwice', u'Gliwice'),
('jastrzebie-zdroj', u'Jastrzębie Zdrój'),
('jaworzno', u'Jaworzno'),
('myslowice', u'Mysłowice'),
('piekary-slaskie', u'Piekary Śląskie'),
('ruda-slaska', u'Ruda Śląska'),
('rybnik', u'Rybnik'),
('siemianowice-slaskie', u'Siemianowice Śląskie'),
('sosnowiec', u'Sosnowiec'),
('swietochlowice', u'Świętochłowice'),
('tychy', u'Tychy'),
('zabrze', u'Zabrze'),
('zory', u'Żory'),
('bedzinski', u'będziński'),
('bielski', u'bielski'),
('bierunsko-ledzinski', u'bieruńsko-lędziński'),
('cieszynski', u'cieszyński'),
('czestochowski', u'częstochowski'),
('gliwicki', u'gliwicki'),
('klobucki', u'kłobucki'),
('lubliniecki', u'lubliniecki'),
('mikolowski', u'mikołowski'),
('myszkowski', u'myszkowski'),
('pszczynski', u'pszczyński'),
('raciborski', u'raciborski'),
('rybnicki', u'rybnicki'),
('tarnogorski', u'tarnogórski'),
('wodzislawski', u'wodzisławski'),
('zawiercianski', u'zawierciański'),
('zywiecki', u'żywiecki'),
('kielce', u'Kielce'),
('buski', u'buski'),
('jedrzejowski', u'jędrzejowski'),
('kazimierski', u'kazimierski'),
('kielecki', u'kielecki'),
('konecki', u'konecki'),
('opatowski', u'opatowski'),
('ostrowiecki', u'ostrowiecki'),
('pinczowski', u'pińczowski'),
('sandomierski', u'sandomierski'),
('skarzyski', u'skarżyski'),
('starachowicki', u'starachowicki'),
('staszowski', u'staszowski'),
('wloszczowski', u'włoszczowski'),
('olsztyn', u'Olsztyn'),
('elblag', u'Elbląg'),
('bartoszycki', u'bartoszycki'),
('braniewski', u'braniewski'),
('dzialdowski', u'działdowski'),
('elblaski', u'elbląski'),
('elcki', u'ełcki'),
('gizycki', u'giżycki'),
('goldapski', u'gołdapski'),
('ilawski', u'iławski'),
('ketrzynski', u'kętrzyński'),
('lidzbarski', u'lidzbarski'),
('mragowski', u'mrągowski'),
('nidzicki', u'nidzicki'),
('nowomiejski', u'nowomiejski'),
('olecki', u'olecki'),
('olsztynski', u'olsztyński'),
('ostrodzki', u'ostródzki'),
('piski', u'piski'),
('szczycienski', u'szczycieński'),
('wegorzewski', u'węgorzewski'),
('poznan', u'Poznań'),
('kalisz', u'Kalisz'),
('konin', u'Konin'),
('leszno', u'Leszno'),
('chodzieski', u'chodziejski'),
('czarnkowsko-trzcianecki', u'czarnkowsko-trzcianecki'),
('gnieznienski', u'gnieźnieński'),
('gostynski', u'gostyński'),
('grodziski', u'grodziski'),
('jarocinski', u'jarociński'),
('kaliski', u'kaliski'),
('kepinski', u'kępiński'),
('kolski', u'kolski'),
('koninski', u'koniński'),
('koscianski', u'kościański'),
('krotoszynski', u'krotoszyński'),
('leszczynski', u'leszczyński'),
('miedzychodzki', u'międzychodzki'),
('nowotomyski', u'nowotomyski'),
('obornicki', u'obornicki'),
('ostrowski', u'ostrowski'),
('ostrzeszowski', u'ostrzeszowski'),
('pilski', u'pilski'),
('pleszewski', u'pleszewski'),
('poznanski', u'poznański'),
('rawicki', u'rawicki'),
('slupecki', u'słupecki'),
('szamotulski', u'szamotulski'),
('sredzki', u'średzki'),
('sremski', u'śremski'),
('turecki', u'turecki'),
('wagrowiecki', u'wągrowiecki'),
('wolsztynski', u'wolsztyński'),
('wrzesinski', u'wrzesiński'),
('zlotowski', u'złotowski'),
('bialogardzki', u'białogardzki'),
('choszczenski', u'choszczeński'),
('drawski', u'drawski'),
('goleniowski', u'goleniowski'),
('gryficki', u'gryficki'),
('gryfinski', u'gryfiński'),
('kamienski', u'kamieński'),
('kolobrzeski', u'kołobrzeski'),
('koszalinski', u'koszaliński'),
('lobeski', u'łobeski'),
('mysliborski', u'myśliborski'),
('policki', u'policki'),
('pyrzycki', u'pyrzycki'),
('slawienski', u'sławieński'),
('stargardzki', u'stargardzki'),
('szczecinecki', u'szczecinecki'),
('swidwinski', u'świdwiński'),
('walecki', u'wałecki'),
)
| gpl-3.0 |
praekelt/rapidpro | temba/schedules/migrations/0001_initial.py | 7 | 2320 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Schedule',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_active', models.BooleanField(default=True, help_text=b'Whether this item is active, use this instead of deleting')),
('created_on', models.DateTimeField(help_text=b'When this item was originally created', auto_now_add=True)),
('modified_on', models.DateTimeField(help_text=b'When this item was last modified', auto_now=True)),
('status', models.CharField(default='U', max_length=1, choices=[('U', 'Unscheduled'), ('S', 'Scheduled')])),
('repeat_hour_of_day', models.IntegerField(help_text='The hour of the day', null=True)),
('repeat_day_of_month', models.IntegerField(help_text='The day of the month to repeat on', null=True)),
('repeat_period', models.CharField(help_text='When this schedule repeats', max_length=1, null=True, choices=[('O', 'Never'), ('D', 'Daily'), ('W', 'Weekly'), ('M', 'Monthly')])),
('repeat_days', models.IntegerField(default=0, help_text='bit mask of days of the week', null=True, blank=True)),
('last_fire', models.DateTimeField(default=None, help_text='When this schedule last fired', null=True, blank=True)),
('next_fire', models.DateTimeField(default=None, help_text='When this schedule fires next', null=True, blank=True)),
('created_by', models.ForeignKey(related_name=b'schedules_schedule_creations', to=settings.AUTH_USER_MODEL, help_text=b'The user which originally created this item')),
('modified_by', models.ForeignKey(related_name=b'schedules_schedule_modifications', to=settings.AUTH_USER_MODEL, help_text=b'The user which last modified this item')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
| agpl-3.0 |
petemounce/ansible | lib/ansible/modules/source_control/gitlab_project.py | 38 | 15438 | #!/usr/bin/python
# (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gitlab_project
short_description: Creates/updates/deletes Gitlab Projects
description:
- When the project does not exists in Gitlab, it will be created.
- When the project does exists and state=absent, the project will be deleted.
- When changes are made to the project, the project will be updated.
version_added: "2.1"
author: "Werner Dijkerman (@dj-wasabi)"
requirements:
- pyapi-gitlab python module
options:
server_url:
description:
- Url of Gitlab server, with protocol (http or https).
required: true
validate_certs:
description:
- When using https if SSL certificate needs to be verified.
required: false
default: true
aliases:
- verify_ssl
login_user:
description:
- Gitlab user name.
required: false
default: null
login_password:
description:
- Gitlab password for login_user
required: false
default: null
login_token:
description:
- Gitlab token for logging in.
required: false
default: null
group:
description:
- The name of the group of which this projects belongs to.
- When not provided, project will belong to user which is configured in 'login_user' or 'login_token'
- When provided with username, project will be created for this user. 'login_user' or 'login_token' needs admin rights.
required: false
default: null
name:
description:
- The name of the project
required: true
path:
description:
- The path of the project you want to create, this will be server_url/<group>/path
- If not supplied, name will be used.
required: false
default: null
description:
description:
- An description for the project.
required: false
default: null
issues_enabled:
description:
- Whether you want to create issues or not.
- Possible values are true and false.
required: false
default: true
merge_requests_enabled:
description:
- If merge requests can be made or not.
- Possible values are true and false.
required: false
default: true
wiki_enabled:
description:
- If an wiki for this project should be available or not.
- Possible values are true and false.
required: false
default: true
snippets_enabled:
description:
- If creating snippets should be available or not.
- Possible values are true and false.
required: false
default: true
public:
description:
- If the project is public available or not.
- Setting this to true is same as setting visibility_level to 20.
- Possible values are true and false.
required: false
default: false
visibility_level:
description:
- Private. visibility_level is 0. Project access must be granted explicitly for each user.
- Internal. visibility_level is 10. The project can be cloned by any logged in user.
- Public. visibility_level is 20. The project can be cloned without any authentication.
- Possible values are 0, 10 and 20.
required: false
default: 0
import_url:
description:
- Git repository which will me imported into gitlab.
- Gitlab server needs read access to this git repository.
required: false
default: false
state:
description:
- create or delete project.
- Possible values are present and absent.
required: false
default: "present"
choices: ["present", "absent"]
'''
EXAMPLES = '''
- name: Delete Gitlab Project
gitlab_project:
server_url: http://gitlab.example.com
validate_certs: False
login_token: WnUzDsxjy8230-Dy_k
name: my_first_project
state: absent
delegate_to: localhost
- name: Create Gitlab Project in group Ansible
gitlab_project:
server_url: https://gitlab.example.com
validate_certs: True
login_user: dj-wasabi
login_password: MySecretPassword
name: my_first_project
group: ansible
issues_enabled: False
wiki_enabled: True
snippets_enabled: True
import_url: http://git.example.com/example/lab.git
state: present
delegate_to: localhost
'''
RETURN = '''# '''
try:
import gitlab
HAS_GITLAB_PACKAGE = True
except:
HAS_GITLAB_PACKAGE = False
from ansible.module_utils.basic import *
from ansible.module_utils.pycompat24 import get_exception
class GitLabProject(object):
def __init__(self, module, git):
self._module = module
self._gitlab = git
def createOrUpdateProject(self, project_exists, group_name, import_url, arguments):
is_user = False
group_id = self.getGroupId(group_name)
if not group_id:
group_id = self.getUserId(group_name)
is_user = True
if project_exists:
# Edit project
return self.updateProject(group_name, arguments)
else:
# Create project
if self._module.check_mode:
self._module.exit_json(changed=True)
return self.createProject(is_user, group_id, import_url, arguments)
def createProject(self, is_user, user_id, import_url, arguments):
if is_user:
return self._gitlab.createprojectuser(user_id=user_id, import_url=import_url, **arguments)
else:
group_id = user_id
return self._gitlab.createproject(namespace_id=group_id, import_url=import_url, **arguments)
def deleteProject(self, group_name, project_name):
if self.existsGroup(group_name):
project_owner = group_name
else:
project_owner = self._gitlab.currentuser()['username']
search_results = self._gitlab.searchproject(search=project_name)
for result in search_results:
owner = result['namespace']['name']
if owner == project_owner:
return self._gitlab.deleteproject(result['id'])
def existsProject(self, group_name, project_name):
if self.existsGroup(group_name):
project_owner = group_name
else:
project_owner = self._gitlab.currentuser()['username']
search_results = self._gitlab.searchproject(search=project_name)
for result in search_results:
owner = result['namespace']['name']
if owner == project_owner:
return True
return False
def existsGroup(self, group_name):
if group_name is not None:
# Find the group, if group not exists we try for user
for group in self._gitlab.getall(self._gitlab.getgroups):
if group['name'] == group_name:
return True
user_name = group_name
user_data = self._gitlab.getusers(search=user_name)
for data in user_data:
if 'id' in user_data:
return True
return False
def getGroupId(self, group_name):
if group_name is not None:
# Find the group, if group not exists we try for user
for group in self._gitlab.getall(self._gitlab.getgroups):
if group['name'] == group_name:
return group['id']
def getProjectId(self, group_name, project_name):
if self.existsGroup(group_name):
project_owner = group_name
else:
project_owner = self._gitlab.currentuser()['username']
search_results = self._gitlab.searchproject(search=project_name)
for result in search_results:
owner = result['namespace']['name']
if owner == project_owner:
return result['id']
def getUserId(self, user_name):
user_data = self._gitlab.getusers(search=user_name)
for data in user_data:
if 'id' in data:
return data['id']
return self._gitlab.currentuser()['id']
def to_bool(self, value):
if value:
return 1
else:
return 0
def updateProject(self, group_name, arguments):
project_changed = False
project_name = arguments['name']
project_id = self.getProjectId(group_name, project_name)
project_data = self._gitlab.getproject(project_id=project_id)
for arg_key, arg_value in arguments.items():
project_data_value = project_data[arg_key]
if isinstance(project_data_value, bool) or project_data_value is None:
to_bool = self.to_bool(project_data_value)
if to_bool != arg_value:
project_changed = True
continue
else:
if project_data_value != arg_value:
project_changed = True
if project_changed:
if self._module.check_mode:
self._module.exit_json(changed=True)
return self._gitlab.editproject(project_id=project_id, **arguments)
else:
return False
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(required=True),
validate_certs=dict(required=False, default=True, type='bool', aliases=['verify_ssl']),
login_user=dict(required=False, no_log=True),
login_password=dict(required=False, no_log=True),
login_token=dict(required=False, no_log=True),
group=dict(required=False),
name=dict(required=True),
path=dict(required=False),
description=dict(required=False),
issues_enabled=dict(default=True, type='bool'),
merge_requests_enabled=dict(default=True, type='bool'),
wiki_enabled=dict(default=True, type='bool'),
snippets_enabled=dict(default=True, type='bool'),
public=dict(default=False, type='bool'),
visibility_level=dict(default="0", choices=["0", "10", "20"]),
import_url=dict(required=False),
state=dict(default="present", choices=["present", 'absent']),
),
supports_check_mode=True
)
if not HAS_GITLAB_PACKAGE:
module.fail_json(msg="Missing required gitlab module (check docs or install with: pip install pyapi-gitlab")
server_url = module.params['server_url']
verify_ssl = module.params['validate_certs']
login_user = module.params['login_user']
login_password = module.params['login_password']
login_token = module.params['login_token']
group_name = module.params['group']
project_name = module.params['name']
project_path = module.params['path']
description = module.params['description']
issues_enabled = module.params['issues_enabled']
merge_requests_enabled = module.params['merge_requests_enabled']
wiki_enabled = module.params['wiki_enabled']
snippets_enabled = module.params['snippets_enabled']
public = module.params['public']
visibility_level = module.params['visibility_level']
import_url = module.params['import_url']
state = module.params['state']
# We need both login_user and login_password or login_token, otherwise we fail.
if login_user is not None and login_password is not None:
use_credentials = True
elif login_token is not None:
use_credentials = False
else:
module.fail_json(msg="No login credentials are given. Use login_user with login_password, or login_token")
# Set project_path to project_name if it is empty.
if project_path is None:
project_path = project_name.replace(" ", "_")
# Gitlab API makes no difference between upper and lower cases, so we lower them.
project_name = project_name.lower()
project_path = project_path.lower()
if group_name is not None:
group_name = group_name.lower()
# Lets make an connection to the Gitlab server_url, with either login_user and login_password
# or with login_token
try:
if use_credentials:
git = gitlab.Gitlab(host=server_url, verify_ssl=verify_ssl)
git.login(user=login_user, password=login_password)
else:
git = gitlab.Gitlab(server_url, token=login_token, verify_ssl=verify_ssl)
except Exception:
e = get_exception()
module.fail_json(msg="Failed to connect to Gitlab server: %s " % e)
# Check if user is authorized or not before proceeding to any operations
# if not, exit from here
auth_msg = git.currentuser().get('message', None)
if auth_msg is not None and auth_msg == '401 Unauthorized':
module.fail_json(msg='User unauthorized',
details="User is not allowed to access Gitlab server "
"using login_token. Please check login_token")
# Validate if project exists and take action based on "state"
project = GitLabProject(module, git)
project_exists = project.existsProject(group_name, project_name)
# Creating the project dict
arguments = {"name": project_name,
"path": project_path,
"description": description,
"issues_enabled": project.to_bool(issues_enabled),
"merge_requests_enabled": project.to_bool(merge_requests_enabled),
"wiki_enabled": project.to_bool(wiki_enabled),
"snippets_enabled": project.to_bool(snippets_enabled),
"public": project.to_bool(public),
"visibility_level": int(visibility_level)}
if project_exists and state == "absent":
project.deleteProject(group_name, project_name)
module.exit_json(changed=True, result="Successfully deleted project %s" % project_name)
else:
if state == "absent":
module.exit_json(changed=False, result="Project deleted or does not exists")
else:
if project.createOrUpdateProject(project_exists, group_name, import_url, arguments):
module.exit_json(changed=True, result="Successfully created or updated the project %s" % project_name)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| gpl-3.0 |
tmhm/scikit-learn | sklearn/cluster/setup.py | 263 | 1449 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.cpp'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
jeffery-do/Vizdoombot | examples/python/scenarios.py | 1 | 2862 | #!/usr/bin/env python
#####################################################################
# This script presents how to run some scenarios.
# Configuration is loaded from "../../examples/config/<SCENARIO_NAME>.cfg" file.
# <episodes> number of episodes are played.
# Random combination of buttons is chosen for every action.
# Game variables from state and last reward are printed.
#
# To see the scenario description go to "../../scenarios/README.md"
#####################################################################
from __future__ import print_function
import itertools as it
from random import choice
from time import sleep
from vizdoom import DoomGame, ScreenResolution
game = DoomGame()
# Choose scenario config file you wish to watch.
# Don't load two configs cause the second will overrite the first one.
# Multiple config files are ok but combining these ones doesn't make much sense.
game.load_config("../../examples/config/basic.cfg")
# game.load_config("../../examples/config/simpler_basic.cfg")
# game.load_config("../../examples/config/rocket_basic.cfg")
# game.load_config("../../examples/config/deadly_corridor.cfg")
# game.load_config("../../examples/config/deathmatch.cfg")
# game.load_config("../../examples/config/defend_the_center.cfg")
# game.load_config("../../examples/config/defend_the_line.cfg")
# game.load_config("../../examples/config/health_gathering.cfg")
# game.load_config("../../examples/config/my_way_home.cfg")
# game.load_config("../../examples/config/predict_position.cfg")
# game.load_config("../../examples/config/take_cover.cfg")
# Makes the screen bigger to see more details.
game.set_screen_resolution(ScreenResolution.RES_640X480)
game.set_window_visible(True)
game.init()
# Creates all possible actions depending on how many buttons there are.
actions_num = game.get_available_buttons_size()
actions = []
for perm in it.product([False, True], repeat=actions_num):
actions.append(list(perm))
episodes = 10
sleep_time = 0.028
for i in range(episodes):
print("Episode #" + str(i + 1))
# Not needed for the first episode but the loop is nicer.
game.new_episode()
while not game.is_episode_finished():
# Gets the state and possibly to something with it
state = game.get_state()
# Makes a random action and save the reward.
reward = game.make_action(choice(actions))
print("State #" + str(state.number))
print("Game Variables:", state.game_variables)
print("Performed action:", game.get_last_action())
print("Last Reward:", reward)
print("=====================")
# Sleep some time because processing is too fast to watch.
if sleep_time > 0:
sleep(sleep_time)
print("Episode finished!")
print("total reward:", game.get_total_reward())
print("************************")
| mit |
BeATz-UnKNoWN/python-for-android | python3-alpha/python3-src/Lib/pdb.py | 47 | 56534 | #! /usr/bin/env python3
"""
The Python Debugger Pdb
=======================
To use the debugger in its simplest form:
>>> import pdb
>>> pdb.run('<a statement>')
The debugger's prompt is '(Pdb) '. This will stop in the first
function call in <a statement>.
Alternatively, if a statement terminated with an unhandled exception,
you can use pdb's post-mortem facility to inspect the contents of the
traceback:
>>> <a statement>
<exception traceback>
>>> import pdb
>>> pdb.pm()
The commands recognized by the debugger are listed in the next
section. Most can be abbreviated as indicated; e.g., h(elp) means
that 'help' can be typed as 'h' or 'help' (but not as 'he' or 'hel',
nor as 'H' or 'Help' or 'HELP'). Optional arguments are enclosed in
square brackets. Alternatives in the command syntax are separated
by a vertical bar (|).
A blank line repeats the previous command literally, except for
'list', where it lists the next 11 lines.
Commands that the debugger doesn't recognize are assumed to be Python
statements and are executed in the context of the program being
debugged. Python statements can also be prefixed with an exclamation
point ('!'). This is a powerful way to inspect the program being
debugged; it is even possible to change variables or call functions.
When an exception occurs in such a statement, the exception name is
printed but the debugger's state is not changed.
The debugger supports aliases, which can save typing. And aliases can
have parameters (see the alias help entry) which allows one a certain
level of adaptability to the context under examination.
Multiple commands may be entered on a single line, separated by the
pair ';;'. No intelligence is applied to separating the commands; the
input is split at the first ';;', even if it is in the middle of a
quoted string.
If a file ".pdbrc" exists in your home directory or in the current
directory, it is read in and executed as if it had been typed at the
debugger prompt. This is particularly useful for aliases. If both
files exist, the one in the home directory is read first and aliases
defined there can be overriden by the local file.
Aside from aliases, the debugger is not directly programmable; but it
is implemented as a class from which you can derive your own debugger
class, which you can make as fancy as you like.
Debugger commands
=================
"""
# NOTE: the actual command documentation is collected from docstrings of the
# commands and is appended to __doc__ after the class has been defined.
import os
import re
import sys
import cmd
import bdb
import dis
import code
import pprint
import signal
import inspect
import traceback
import linecache
class Restart(Exception):
"""Causes a debugger to be restarted for the debugged python program."""
pass
__all__ = ["run", "pm", "Pdb", "runeval", "runctx", "runcall", "set_trace",
"post_mortem", "help"]
def find_function(funcname, filename):
cre = re.compile(r'def\s+%s\s*[(]' % re.escape(funcname))
try:
fp = open(filename)
except IOError:
return None
# consumer of this info expects the first line to be 1
lineno = 1
answer = None
while True:
line = fp.readline()
if line == '':
break
if cre.match(line):
answer = funcname, filename, lineno
break
lineno += 1
fp.close()
return answer
def getsourcelines(obj):
lines, lineno = inspect.findsource(obj)
if inspect.isframe(obj) and obj.f_globals is obj.f_locals:
# must be a module frame: do not try to cut a block out of it
return lines, 1
elif inspect.ismodule(obj):
return lines, 1
return inspect.getblock(lines[lineno:]), lineno+1
def lasti2lineno(code, lasti):
linestarts = list(dis.findlinestarts(code))
linestarts.reverse()
for i, lineno in linestarts:
if lasti >= i:
return lineno
return 0
class _rstr(str):
"""String that doesn't quote its repr."""
def __repr__(self):
return self
# Interaction prompt line will separate file and call info from code
# text using value of line_prefix string. A newline and arrow may
# be to your liking. You can set it once pdb is imported using the
# command "pdb.line_prefix = '\n% '".
# line_prefix = ': ' # Use this to get the old situation back
line_prefix = '\n-> ' # Probably a better default
class Pdb(bdb.Bdb, cmd.Cmd):
def __init__(self, completekey='tab', stdin=None, stdout=None, skip=None,
nosigint=False):
bdb.Bdb.__init__(self, skip=skip)
cmd.Cmd.__init__(self, completekey, stdin, stdout)
if stdout:
self.use_rawinput = 0
self.prompt = '(Pdb) '
self.aliases = {}
self.displaying = {}
self.mainpyfile = ''
self._wait_for_mainpyfile = False
self.tb_lineno = {}
# Try to load readline if it exists
try:
import readline
except ImportError:
pass
self.allow_kbdint = False
self.nosigint = nosigint
# Read $HOME/.pdbrc and ./.pdbrc
self.rcLines = []
if 'HOME' in os.environ:
envHome = os.environ['HOME']
try:
with open(os.path.join(envHome, ".pdbrc")) as rcFile:
self.rcLines.extend(rcFile)
except IOError:
pass
try:
with open(".pdbrc") as rcFile:
self.rcLines.extend(rcFile)
except IOError:
pass
self.commands = {} # associates a command list to breakpoint numbers
self.commands_doprompt = {} # for each bp num, tells if the prompt
# must be disp. after execing the cmd list
self.commands_silent = {} # for each bp num, tells if the stack trace
# must be disp. after execing the cmd list
self.commands_defining = False # True while in the process of defining
# a command list
self.commands_bnum = None # The breakpoint number for which we are
# defining a list
def sigint_handler(self, signum, frame):
if self.allow_kbdint:
raise KeyboardInterrupt
self.message("\nProgram interrupted. (Use 'cont' to resume).")
self.set_step()
self.set_trace(frame)
# restore previous signal handler
signal.signal(signal.SIGINT, self._previous_sigint_handler)
def reset(self):
bdb.Bdb.reset(self)
self.forget()
def forget(self):
self.lineno = None
self.stack = []
self.curindex = 0
self.curframe = None
self.tb_lineno.clear()
def setup(self, f, tb):
self.forget()
self.stack, self.curindex = self.get_stack(f, tb)
while tb:
# when setting up post-mortem debugging with a traceback, save all
# the original line numbers to be displayed along the current line
# numbers (which can be different, e.g. due to finally clauses)
lineno = lasti2lineno(tb.tb_frame.f_code, tb.tb_lasti)
self.tb_lineno[tb.tb_frame] = lineno
tb = tb.tb_next
self.curframe = self.stack[self.curindex][0]
# The f_locals dictionary is updated from the actual frame
# locals whenever the .f_locals accessor is called, so we
# cache it here to ensure that modifications are not overwritten.
self.curframe_locals = self.curframe.f_locals
return self.execRcLines()
# Can be executed earlier than 'setup' if desired
def execRcLines(self):
if not self.rcLines:
return
# local copy because of recursion
rcLines = self.rcLines
rcLines.reverse()
# execute every line only once
self.rcLines = []
while rcLines:
line = rcLines.pop().strip()
if line and line[0] != '#':
if self.onecmd(line):
# if onecmd returns True, the command wants to exit
# from the interaction, save leftover rc lines
# to execute before next interaction
self.rcLines += reversed(rcLines)
return True
# Override Bdb methods
def user_call(self, frame, argument_list):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
if self._wait_for_mainpyfile:
return
if self.stop_here(frame):
self.message('--Call--')
self.interaction(frame, None)
def user_line(self, frame):
"""This function is called when we stop or break at this line."""
if self._wait_for_mainpyfile:
if (self.mainpyfile != self.canonic(frame.f_code.co_filename)
or frame.f_lineno <= 0):
return
self._wait_for_mainpyfile = False
if self.bp_commands(frame):
self.interaction(frame, None)
def bp_commands(self, frame):
"""Call every command that was set for the current active breakpoint
(if there is one).
Returns True if the normal interaction function must be called,
False otherwise."""
# self.currentbp is set in bdb in Bdb.break_here if a breakpoint was hit
if getattr(self, "currentbp", False) and \
self.currentbp in self.commands:
currentbp = self.currentbp
self.currentbp = 0
lastcmd_back = self.lastcmd
self.setup(frame, None)
for line in self.commands[currentbp]:
self.onecmd(line)
self.lastcmd = lastcmd_back
if not self.commands_silent[currentbp]:
self.print_stack_entry(self.stack[self.curindex])
if self.commands_doprompt[currentbp]:
self._cmdloop()
self.forget()
return
return 1
def user_return(self, frame, return_value):
"""This function is called when a return trap is set here."""
if self._wait_for_mainpyfile:
return
frame.f_locals['__return__'] = return_value
self.message('--Return--')
self.interaction(frame, None)
def user_exception(self, frame, exc_info):
"""This function is called if an exception occurs,
but only if we are to stop at or just below this level."""
if self._wait_for_mainpyfile:
return
exc_type, exc_value, exc_traceback = exc_info
frame.f_locals['__exception__'] = exc_type, exc_value
self.message(traceback.format_exception_only(exc_type,
exc_value)[-1].strip())
self.interaction(frame, exc_traceback)
# General interaction function
def _cmdloop(self):
while True:
try:
# keyboard interrupts allow for an easy way to cancel
# the current command, so allow them during interactive input
self.allow_kbdint = True
self.cmdloop()
self.allow_kbdint = False
break
except KeyboardInterrupt:
self.message('--KeyboardInterrupt--')
# Called before loop, handles display expressions
def preloop(self):
displaying = self.displaying.get(self.curframe)
if displaying:
for expr, oldvalue in displaying.items():
newvalue = self._getval_except(expr)
# check for identity first; this prevents custom __eq__ to
# be called at every loop, and also prevents instances whose
# fields are changed to be displayed
if newvalue is not oldvalue and newvalue != oldvalue:
displaying[expr] = newvalue
self.message('display %s: %r [old: %r]' %
(expr, newvalue, oldvalue))
def interaction(self, frame, traceback):
if self.setup(frame, traceback):
# no interaction desired at this time (happens if .pdbrc contains
# a command like "continue")
self.forget()
return
self.print_stack_entry(self.stack[self.curindex])
self._cmdloop()
self.forget()
def displayhook(self, obj):
"""Custom displayhook for the exec in default(), which prevents
assignment of the _ variable in the builtins.
"""
# reproduce the behavior of the standard displayhook, not printing None
if obj is not None:
self.message(repr(obj))
def default(self, line):
if line[:1] == '!': line = line[1:]
locals = self.curframe_locals
globals = self.curframe.f_globals
try:
code = compile(line + '\n', '<stdin>', 'single')
save_stdout = sys.stdout
save_stdin = sys.stdin
save_displayhook = sys.displayhook
try:
sys.stdin = self.stdin
sys.stdout = self.stdout
sys.displayhook = self.displayhook
exec(code, globals, locals)
finally:
sys.stdout = save_stdout
sys.stdin = save_stdin
sys.displayhook = save_displayhook
except:
exc_info = sys.exc_info()[:2]
self.error(traceback.format_exception_only(*exc_info)[-1].strip())
def precmd(self, line):
"""Handle alias expansion and ';;' separator."""
if not line.strip():
return line
args = line.split()
while args[0] in self.aliases:
line = self.aliases[args[0]]
ii = 1
for tmpArg in args[1:]:
line = line.replace("%" + str(ii),
tmpArg)
ii += 1
line = line.replace("%*", ' '.join(args[1:]))
args = line.split()
# split into ';;' separated commands
# unless it's an alias command
if args[0] != 'alias':
marker = line.find(';;')
if marker >= 0:
# queue up everything after marker
next = line[marker+2:].lstrip()
self.cmdqueue.append(next)
line = line[:marker].rstrip()
return line
def onecmd(self, line):
"""Interpret the argument as though it had been typed in response
to the prompt.
Checks whether this line is typed at the normal prompt or in
a breakpoint command list definition.
"""
if not self.commands_defining:
return cmd.Cmd.onecmd(self, line)
else:
return self.handle_command_def(line)
def handle_command_def(self, line):
"""Handles one command line during command list definition."""
cmd, arg, line = self.parseline(line)
if not cmd:
return
if cmd == 'silent':
self.commands_silent[self.commands_bnum] = True
return # continue to handle other cmd def in the cmd list
elif cmd == 'end':
self.cmdqueue = []
return 1 # end of cmd list
cmdlist = self.commands[self.commands_bnum]
if arg:
cmdlist.append(cmd+' '+arg)
else:
cmdlist.append(cmd)
# Determine if we must stop
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
func = self.default
# one of the resuming commands
if func.__name__ in self.commands_resuming:
self.commands_doprompt[self.commands_bnum] = False
self.cmdqueue = []
return 1
return
# interface abstraction functions
def message(self, msg):
print(msg, file=self.stdout)
def error(self, msg):
print('***', msg, file=self.stdout)
# Command definitions, called by cmdloop()
# The argument is the remaining string on the command line
# Return true to exit from the command loop
def do_commands(self, arg):
"""commands [bpnumber]
(com) ...
(com) end
(Pdb)
Specify a list of commands for breakpoint number bpnumber.
The commands themselves are entered on the following lines.
Type a line containing just 'end' to terminate the commands.
The commands are executed when the breakpoint is hit.
To remove all commands from a breakpoint, type commands and
follow it immediately with end; that is, give no commands.
With no bpnumber argument, commands refers to the last
breakpoint set.
You can use breakpoint commands to start your program up
again. Simply use the continue command, or step, or any other
command that resumes execution.
Specifying any command resuming execution (currently continue,
step, next, return, jump, quit and their abbreviations)
terminates the command list (as if that command was
immediately followed by end). This is because any time you
resume execution (even with a simple next or step), you may
encounter another breakpoint -- which could have its own
command list, leading to ambiguities about which list to
execute.
If you use the 'silent' command in the command list, the usual
message about stopping at a breakpoint is not printed. This
may be desirable for breakpoints that are to print a specific
message and then continue. If none of the other commands
print anything, you will see no sign that the breakpoint was
reached.
"""
if not arg:
bnum = len(bdb.Breakpoint.bpbynumber) - 1
else:
try:
bnum = int(arg)
except:
self.error("Usage: commands [bnum]\n ...\n end")
return
self.commands_bnum = bnum
# Save old definitions for the case of a keyboard interrupt.
if bnum in self.commands:
old_command_defs = (self.commands[bnum],
self.commands_doprompt[bnum],
self.commands_silent[bnum])
else:
old_command_defs = None
self.commands[bnum] = []
self.commands_doprompt[bnum] = True
self.commands_silent[bnum] = False
prompt_back = self.prompt
self.prompt = '(com) '
self.commands_defining = True
try:
self.cmdloop()
except KeyboardInterrupt:
# Restore old definitions.
if old_command_defs:
self.commands[bnum] = old_command_defs[0]
self.commands_doprompt[bnum] = old_command_defs[1]
self.commands_silent[bnum] = old_command_defs[2]
else:
del self.commands[bnum]
del self.commands_doprompt[bnum]
del self.commands_silent[bnum]
self.error('command definition aborted, old commands restored')
finally:
self.commands_defining = False
self.prompt = prompt_back
def do_break(self, arg, temporary = 0):
"""b(reak) [ ([filename:]lineno | function) [, condition] ]
Without argument, list all breaks.
With a line number argument, set a break at this line in the
current file. With a function name, set a break at the first
executable line of that function. If a second argument is
present, it is a string specifying an expression which must
evaluate to true before the breakpoint is honored.
The line number may be prefixed with a filename and a colon,
to specify a breakpoint in another file (probably one that
hasn't been loaded yet). The file is searched for on
sys.path; the .py suffix may be omitted.
"""
if not arg:
if self.breaks: # There's at least one
self.message("Num Type Disp Enb Where")
for bp in bdb.Breakpoint.bpbynumber:
if bp:
self.message(bp.bpformat())
return
# parse arguments; comma has lowest precedence
# and cannot occur in filename
filename = None
lineno = None
cond = None
comma = arg.find(',')
if comma > 0:
# parse stuff after comma: "condition"
cond = arg[comma+1:].lstrip()
arg = arg[:comma].rstrip()
# parse stuff before comma: [filename:]lineno | function
colon = arg.rfind(':')
funcname = None
if colon >= 0:
filename = arg[:colon].rstrip()
f = self.lookupmodule(filename)
if not f:
self.error('%r not found from sys.path' % filename)
return
else:
filename = f
arg = arg[colon+1:].lstrip()
try:
lineno = int(arg)
except ValueError:
self.error('Bad lineno: %s' % arg)
return
else:
# no colon; can be lineno or function
try:
lineno = int(arg)
except ValueError:
try:
func = eval(arg,
self.curframe.f_globals,
self.curframe_locals)
except:
func = arg
try:
if hasattr(func, '__func__'):
func = func.__func__
code = func.__code__
#use co_name to identify the bkpt (function names
#could be aliased, but co_name is invariant)
funcname = code.co_name
lineno = code.co_firstlineno
filename = code.co_filename
except:
# last thing to try
(ok, filename, ln) = self.lineinfo(arg)
if not ok:
self.error('The specified object %r is not a function '
'or was not found along sys.path.' % arg)
return
funcname = ok # ok contains a function name
lineno = int(ln)
if not filename:
filename = self.defaultFile()
# Check for reasonable breakpoint
line = self.checkline(filename, lineno)
if line:
# now set the break point
err = self.set_break(filename, line, temporary, cond, funcname)
if err:
self.error(err, file=self.stdout)
else:
bp = self.get_breaks(filename, line)[-1]
self.message("Breakpoint %d at %s:%d" %
(bp.number, bp.file, bp.line))
# To be overridden in derived debuggers
def defaultFile(self):
"""Produce a reasonable default."""
filename = self.curframe.f_code.co_filename
if filename == '<string>' and self.mainpyfile:
filename = self.mainpyfile
return filename
do_b = do_break
def do_tbreak(self, arg):
"""tbreak [ ([filename:]lineno | function) [, condition] ]
Same arguments as break, but sets a temporary breakpoint: it
is automatically deleted when first hit.
"""
self.do_break(arg, 1)
def lineinfo(self, identifier):
failed = (None, None, None)
# Input is identifier, may be in single quotes
idstring = identifier.split("'")
if len(idstring) == 1:
# not in single quotes
id = idstring[0].strip()
elif len(idstring) == 3:
# quoted
id = idstring[1].strip()
else:
return failed
if id == '': return failed
parts = id.split('.')
# Protection for derived debuggers
if parts[0] == 'self':
del parts[0]
if len(parts) == 0:
return failed
# Best first guess at file to look at
fname = self.defaultFile()
if len(parts) == 1:
item = parts[0]
else:
# More than one part.
# First is module, second is method/class
f = self.lookupmodule(parts[0])
if f:
fname = f
item = parts[1]
answer = find_function(item, fname)
return answer or failed
def checkline(self, filename, lineno):
"""Check whether specified line seems to be executable.
Return `lineno` if it is, 0 if not (e.g. a docstring, comment, blank
line or EOF). Warning: testing is not comprehensive.
"""
# this method should be callable before starting debugging, so default
# to "no globals" if there is no current frame
globs = self.curframe.f_globals if hasattr(self, 'curframe') else None
line = linecache.getline(filename, lineno, globs)
if not line:
self.message('End of file')
return 0
line = line.strip()
# Don't allow setting breakpoint at a blank line
if (not line or (line[0] == '#') or
(line[:3] == '"""') or line[:3] == "'''"):
self.error('Blank or comment')
return 0
return lineno
def do_enable(self, arg):
"""enable bpnumber [bpnumber ...]
Enables the breakpoints given as a space separated list of
breakpoint numbers.
"""
args = arg.split()
for i in args:
try:
bp = self.get_bpbynumber(i)
except ValueError as err:
self.error(err)
else:
bp.enable()
self.message('Enabled %s' % bp)
def do_disable(self, arg):
"""disable bpnumber [bpnumber ...]
Disables the breakpoints given as a space separated list of
breakpoint numbers. Disabling a breakpoint means it cannot
cause the program to stop execution, but unlike clearing a
breakpoint, it remains in the list of breakpoints and can be
(re-)enabled.
"""
args = arg.split()
for i in args:
try:
bp = self.get_bpbynumber(i)
except ValueError as err:
self.error(err)
else:
bp.disable()
self.message('Disabled %s' % bp)
def do_condition(self, arg):
"""condition bpnumber [condition]
Set a new condition for the breakpoint, an expression which
must evaluate to true before the breakpoint is honored. If
condition is absent, any existing condition is removed; i.e.,
the breakpoint is made unconditional.
"""
args = arg.split(' ', 1)
try:
cond = args[1]
except IndexError:
cond = None
try:
bp = self.get_bpbynumber(args[0].strip())
except ValueError as err:
self.error(err)
else:
bp.cond = cond
if not cond:
self.message('Breakpoint %d is now unconditional.' % bp.number)
else:
self.message('New condition set for breakpoint %d.' % bp.number)
def do_ignore(self, arg):
"""ignore bpnumber [count]
Set the ignore count for the given breakpoint number. If
count is omitted, the ignore count is set to 0. A breakpoint
becomes active when the ignore count is zero. When non-zero,
the count is decremented each time the breakpoint is reached
and the breakpoint is not disabled and any associated
condition evaluates to true.
"""
args = arg.split()
try:
count = int(args[1].strip())
except:
count = 0
try:
bp = self.get_bpbynumber(args[0].strip())
except ValueError as err:
self.error(err)
else:
bp.ignore = count
if count > 0:
if count > 1:
countstr = '%d crossings' % count
else:
countstr = '1 crossing'
self.message('Will ignore next %s of breakpoint %d.' %
(countstr, bp.number))
else:
self.message('Will stop next time breakpoint %d is reached.'
% bp.number)
def do_clear(self, arg):
"""cl(ear) filename:lineno\ncl(ear) [bpnumber [bpnumber...]]
With a space separated list of breakpoint numbers, clear
those breakpoints. Without argument, clear all breaks (but
first ask confirmation). With a filename:lineno argument,
clear all breaks at that line in that file.
"""
if not arg:
try:
reply = input('Clear all breaks? ')
except EOFError:
reply = 'no'
reply = reply.strip().lower()
if reply in ('y', 'yes'):
bplist = [bp for bp in bdb.Breakpoint.bpbynumber if bp]
self.clear_all_breaks()
for bp in bplist:
self.message('Deleted %s' % bp)
return
if ':' in arg:
# Make sure it works for "clear C:\foo\bar.py:12"
i = arg.rfind(':')
filename = arg[:i]
arg = arg[i+1:]
try:
lineno = int(arg)
except ValueError:
err = "Invalid line number (%s)" % arg
else:
bplist = self.get_breaks(filename, lineno)
err = self.clear_break(filename, lineno)
if err:
self.error(err)
else:
for bp in bplist:
self.message('Deleted %s' % bp)
return
numberlist = arg.split()
for i in numberlist:
try:
bp = self.get_bpbynumber(i)
except ValueError as err:
self.error(err)
else:
self.clear_bpbynumber(i)
self.message('Deleted %s' % bp)
do_cl = do_clear # 'c' is already an abbreviation for 'continue'
def do_where(self, arg):
"""w(here)
Print a stack trace, with the most recent frame at the bottom.
An arrow indicates the "current frame", which determines the
context of most commands. 'bt' is an alias for this command.
"""
self.print_stack_trace()
do_w = do_where
do_bt = do_where
def _select_frame(self, number):
assert 0 <= number < len(self.stack)
self.curindex = number
self.curframe = self.stack[self.curindex][0]
self.curframe_locals = self.curframe.f_locals
self.print_stack_entry(self.stack[self.curindex])
self.lineno = None
def do_up(self, arg):
"""u(p) [count]
Move the current frame count (default one) levels up in the
stack trace (to an older frame).
"""
if self.curindex == 0:
self.error('Oldest frame')
return
try:
count = int(arg or 1)
except ValueError:
self.error('Invalid frame count (%s)' % arg)
return
if count < 0:
newframe = 0
else:
newframe = max(0, self.curindex - count)
self._select_frame(newframe)
do_u = do_up
def do_down(self, arg):
"""d(own) [count]
Move the current frame count (default one) levels down in the
stack trace (to a newer frame).
"""
if self.curindex + 1 == len(self.stack):
self.error('Newest frame')
return
try:
count = int(arg or 1)
except ValueError:
self.error('Invalid frame count (%s)' % arg)
return
if count < 0:
newframe = len(self.stack) - 1
else:
newframe = min(len(self.stack) - 1, self.curindex + count)
self._select_frame(newframe)
do_d = do_down
def do_until(self, arg):
"""unt(il) [lineno]
Without argument, continue execution until the line with a
number greater than the current one is reached. With a line
number, continue execution until a line with a number greater
or equal to that is reached. In both cases, also stop when
the current frame returns.
"""
if arg:
try:
lineno = int(arg)
except ValueError:
self.error('Error in argument: %r' % arg)
return
if lineno <= self.curframe.f_lineno:
self.error('"until" line number is smaller than current '
'line number')
return
else:
lineno = None
self.set_until(self.curframe, lineno)
return 1
do_unt = do_until
def do_step(self, arg):
"""s(tep)
Execute the current line, stop at the first possible occasion
(either in a function that is called or in the current
function).
"""
self.set_step()
return 1
do_s = do_step
def do_next(self, arg):
"""n(ext)
Continue execution until the next line in the current function
is reached or it returns.
"""
self.set_next(self.curframe)
return 1
do_n = do_next
def do_run(self, arg):
"""run [args...]
Restart the debugged python program. If a string is supplied
it is splitted with "shlex", and the result is used as the new
sys.argv. History, breakpoints, actions and debugger options
are preserved. "restart" is an alias for "run".
"""
if arg:
import shlex
argv0 = sys.argv[0:1]
sys.argv = shlex.split(arg)
sys.argv[:0] = argv0
# this is caught in the main debugger loop
raise Restart
do_restart = do_run
def do_return(self, arg):
"""r(eturn)
Continue execution until the current function returns.
"""
self.set_return(self.curframe)
return 1
do_r = do_return
def do_continue(self, arg):
"""c(ont(inue))
Continue execution, only stop when a breakpoint is encountered.
"""
if not self.nosigint:
self._previous_sigint_handler = \
signal.signal(signal.SIGINT, self.sigint_handler)
self.set_continue()
return 1
do_c = do_cont = do_continue
def do_jump(self, arg):
"""j(ump) lineno
Set the next line that will be executed. Only available in
the bottom-most frame. This lets you jump back and execute
code again, or jump forward to skip code that you don't want
to run.
It should be noted that not all jumps are allowed -- for
instance it is not possible to jump into the middle of a
for loop or out of a finally clause.
"""
if self.curindex + 1 != len(self.stack):
self.error('You can only jump within the bottom frame')
return
try:
arg = int(arg)
except ValueError:
self.error("The 'jump' command requires a line number")
else:
try:
# Do the jump, fix up our copy of the stack, and display the
# new position
self.curframe.f_lineno = arg
self.stack[self.curindex] = self.stack[self.curindex][0], arg
self.print_stack_entry(self.stack[self.curindex])
except ValueError as e:
self.error('Jump failed: %s' % e)
do_j = do_jump
def do_debug(self, arg):
"""debug code
Enter a recursive debugger that steps through the code
argument (which is an arbitrary expression or statement to be
executed in the current environment).
"""
sys.settrace(None)
globals = self.curframe.f_globals
locals = self.curframe_locals
p = Pdb(self.completekey, self.stdin, self.stdout)
p.prompt = "(%s) " % self.prompt.strip()
self.message("ENTERING RECURSIVE DEBUGGER")
sys.call_tracing(p.run, (arg, globals, locals))
self.message("LEAVING RECURSIVE DEBUGGER")
sys.settrace(self.trace_dispatch)
self.lastcmd = p.lastcmd
def do_quit(self, arg):
"""q(uit)\nexit
Quit from the debugger. The program being executed is aborted.
"""
self._user_requested_quit = True
self.set_quit()
return 1
do_q = do_quit
do_exit = do_quit
def do_EOF(self, arg):
"""EOF
Handles the receipt of EOF as a command.
"""
self.message('')
self._user_requested_quit = True
self.set_quit()
return 1
def do_args(self, arg):
"""a(rgs)
Print the argument list of the current function.
"""
co = self.curframe.f_code
dict = self.curframe_locals
n = co.co_argcount
if co.co_flags & 4: n = n+1
if co.co_flags & 8: n = n+1
for i in range(n):
name = co.co_varnames[i]
if name in dict:
self.message('%s = %r' % (name, dict[name]))
else:
self.message('%s = *** undefined ***' % (name,))
do_a = do_args
def do_retval(self, arg):
"""retval
Print the return value for the last return of a function.
"""
if '__return__' in self.curframe_locals:
self.message(repr(self.curframe_locals['__return__']))
else:
self.error('Not yet returned!')
do_rv = do_retval
def _getval(self, arg):
try:
return eval(arg, self.curframe.f_globals, self.curframe_locals)
except:
exc_info = sys.exc_info()[:2]
self.error(traceback.format_exception_only(*exc_info)[-1].strip())
raise
def _getval_except(self, arg, frame=None):
try:
if frame is None:
return eval(arg, self.curframe.f_globals, self.curframe_locals)
else:
return eval(arg, frame.f_globals, frame.f_locals)
except:
exc_info = sys.exc_info()[:2]
err = traceback.format_exception_only(*exc_info)[-1].strip()
return _rstr('** raised %s **' % err)
def do_p(self, arg):
"""p(rint) expression
Print the value of the expression.
"""
try:
self.message(repr(self._getval(arg)))
except:
pass
# make "print" an alias of "p" since print isn't a Python statement anymore
do_print = do_p
def do_pp(self, arg):
"""pp expression
Pretty-print the value of the expression.
"""
try:
self.message(pprint.pformat(self._getval(arg)))
except:
pass
def do_list(self, arg):
"""l(ist) [first [,last] | .]
List source code for the current file. Without arguments,
list 11 lines around the current line or continue the previous
listing. With . as argument, list 11 lines around the current
line. With one argument, list 11 lines starting at that line.
With two arguments, list the given range; if the second
argument is less than the first, it is a count.
The current line in the current frame is indicated by "->".
If an exception is being debugged, the line where the
exception was originally raised or propagated is indicated by
">>", if it differs from the current line.
"""
self.lastcmd = 'list'
last = None
if arg and arg != '.':
try:
if ',' in arg:
first, last = arg.split(',')
first = int(first.strip())
last = int(last.strip())
if last < first:
# assume it's a count
last = first + last
else:
first = int(arg.strip())
first = max(1, first - 5)
except ValueError:
self.error('Error in argument: %r' % arg)
return
elif self.lineno is None or arg == '.':
first = max(1, self.curframe.f_lineno - 5)
else:
first = self.lineno + 1
if last is None:
last = first + 10
filename = self.curframe.f_code.co_filename
breaklist = self.get_file_breaks(filename)
try:
lines = linecache.getlines(filename, self.curframe.f_globals)
self._print_lines(lines[first-1:last], first, breaklist,
self.curframe)
self.lineno = min(last, len(lines))
if len(lines) < last:
self.message('[EOF]')
except KeyboardInterrupt:
pass
do_l = do_list
def do_longlist(self, arg):
"""longlist | ll
List the whole source code for the current function or frame.
"""
filename = self.curframe.f_code.co_filename
breaklist = self.get_file_breaks(filename)
try:
lines, lineno = getsourcelines(self.curframe)
except IOError as err:
self.error(err)
return
self._print_lines(lines, lineno, breaklist, self.curframe)
do_ll = do_longlist
def do_source(self, arg):
"""source expression
Try to get source code for the given object and display it.
"""
try:
obj = self._getval(arg)
except:
return
try:
lines, lineno = getsourcelines(obj)
except (IOError, TypeError) as err:
self.error(err)
return
self._print_lines(lines, lineno)
def _print_lines(self, lines, start, breaks=(), frame=None):
"""Print a range of lines."""
if frame:
current_lineno = frame.f_lineno
exc_lineno = self.tb_lineno.get(frame, -1)
else:
current_lineno = exc_lineno = -1
for lineno, line in enumerate(lines, start):
s = str(lineno).rjust(3)
if len(s) < 4:
s += ' '
if lineno in breaks:
s += 'B'
else:
s += ' '
if lineno == current_lineno:
s += '->'
elif lineno == exc_lineno:
s += '>>'
self.message(s + '\t' + line.rstrip())
def do_whatis(self, arg):
"""whatis arg
Print the type of the argument.
"""
try:
value = self._getval(arg)
except:
# _getval() already printed the error
return
code = None
# Is it a function?
try:
code = value.__code__
except Exception:
pass
if code:
self.message('Function %s' % code.co_name)
return
# Is it an instance method?
try:
code = value.__func__.__code__
except Exception:
pass
if code:
self.message('Method %s' % code.co_name)
return
# Is it a class?
if value.__class__ is type:
self.message('Class %s.%s' % (value.__module__, value.__name__))
return
# None of the above...
self.message(type(value))
def do_display(self, arg):
"""display [expression]
Display the value of the expression if it changed, each time execution
stops in the current frame.
Without expression, list all display expressions for the current frame.
"""
if not arg:
self.message('Currently displaying:')
for item in self.displaying.get(self.curframe, {}).items():
self.message('%s: %r' % item)
else:
val = self._getval_except(arg)
self.displaying.setdefault(self.curframe, {})[arg] = val
self.message('display %s: %r' % (arg, val))
def do_undisplay(self, arg):
"""undisplay [expression]
Do not display the expression any more in the current frame.
Without expression, clear all display expressions for the current frame.
"""
if arg:
try:
del self.displaying.get(self.curframe, {})[arg]
except KeyError:
self.error('not displaying %s' % arg)
else:
self.displaying.pop(self.curframe, None)
def do_interact(self, arg):
"""interact
Start an interative interpreter whose global namespace
contains all the (global and local) names found in the current scope.
"""
ns = self.curframe.f_globals.copy()
ns.update(self.curframe_locals)
code.interact("*interactive*", local=ns)
def do_alias(self, arg):
"""alias [name [command [parameter parameter ...] ]]
Create an alias called 'name' that executes 'command'. The
command must *not* be enclosed in quotes. Replaceable
parameters can be indicated by %1, %2, and so on, while %* is
replaced by all the parameters. If no command is given, the
current alias for name is shown. If no name is given, all
aliases are listed.
Aliases may be nested and can contain anything that can be
legally typed at the pdb prompt. Note! You *can* override
internal pdb commands with aliases! Those internal commands
are then hidden until the alias is removed. Aliasing is
recursively applied to the first word of the command line; all
other words in the line are left alone.
As an example, here are two useful aliases (especially when
placed in the .pdbrc file):
# Print instance variables (usage "pi classInst")
alias pi for k in %1.__dict__.keys(): print "%1.",k,"=",%1.__dict__[k]
# Print instance variables in self
alias ps pi self
"""
args = arg.split()
if len(args) == 0:
keys = sorted(self.aliases.keys())
for alias in keys:
self.message("%s = %s" % (alias, self.aliases[alias]))
return
if args[0] in self.aliases and len(args) == 1:
self.message("%s = %s" % (args[0], self.aliases[args[0]]))
else:
self.aliases[args[0]] = ' '.join(args[1:])
def do_unalias(self, arg):
"""unalias name
Delete the specified alias.
"""
args = arg.split()
if len(args) == 0: return
if args[0] in self.aliases:
del self.aliases[args[0]]
# List of all the commands making the program resume execution.
commands_resuming = ['do_continue', 'do_step', 'do_next', 'do_return',
'do_quit', 'do_jump']
# Print a traceback starting at the top stack frame.
# The most recently entered frame is printed last;
# this is different from dbx and gdb, but consistent with
# the Python interpreter's stack trace.
# It is also consistent with the up/down commands (which are
# compatible with dbx and gdb: up moves towards 'main()'
# and down moves towards the most recent stack frame).
def print_stack_trace(self):
try:
for frame_lineno in self.stack:
self.print_stack_entry(frame_lineno)
except KeyboardInterrupt:
pass
def print_stack_entry(self, frame_lineno, prompt_prefix=line_prefix):
frame, lineno = frame_lineno
if frame is self.curframe:
prefix = '> '
else:
prefix = ' '
self.message(prefix +
self.format_stack_entry(frame_lineno, prompt_prefix))
# Provide help
def do_help(self, arg):
"""h(elp)
Without argument, print the list of available commands.
With a command name as argument, print help about that command.
"help pdb" shows the full pdb documentation.
"help exec" gives help on the ! command.
"""
if not arg:
return cmd.Cmd.do_help(self, arg)
try:
try:
topic = getattr(self, 'help_' + arg)
return topic()
except AttributeError:
command = getattr(self, 'do_' + arg)
except AttributeError:
self.error('No help for %r' % arg)
else:
if sys.flags.optimize >= 2:
self.error('No help for %r; please do not run Python with -OO '
'if you need command help' % arg)
return
self.message(command.__doc__.rstrip())
do_h = do_help
def help_exec(self):
"""(!) statement
Execute the (one-line) statement in the context of the current
stack frame. The exclamation point can be omitted unless the
first word of the statement resembles a debugger command. To
assign to a global variable you must always prefix the command
with a 'global' command, e.g.:
(Pdb) global list_options; list_options = ['-l']
(Pdb)
"""
self.message((self.help_exec.__doc__ or '').strip())
def help_pdb(self):
help()
# other helper functions
def lookupmodule(self, filename):
"""Helper function for break/clear parsing -- may be overridden.
lookupmodule() translates (possibly incomplete) file or module name
into an absolute file name.
"""
if os.path.isabs(filename) and os.path.exists(filename):
return filename
f = os.path.join(sys.path[0], filename)
if os.path.exists(f) and self.canonic(f) == self.mainpyfile:
return f
root, ext = os.path.splitext(filename)
if ext == '':
filename = filename + '.py'
if os.path.isabs(filename):
return filename
for dirname in sys.path:
while os.path.islink(dirname):
dirname = os.readlink(dirname)
fullname = os.path.join(dirname, filename)
if os.path.exists(fullname):
return fullname
return None
def _runscript(self, filename):
# The script has to run in __main__ namespace (or imports from
# __main__ will break).
#
# So we clear up the __main__ and set several special variables
# (this gets rid of pdb's globals and cleans old variables on restarts).
import __main__
__main__.__dict__.clear()
__main__.__dict__.update({"__name__" : "__main__",
"__file__" : filename,
"__builtins__": __builtins__,
})
# When bdb sets tracing, a number of call and line events happens
# BEFORE debugger even reaches user's code (and the exact sequence of
# events depends on python version). So we take special measures to
# avoid stopping before we reach the main script (see user_line and
# user_call for details).
self._wait_for_mainpyfile = True
self.mainpyfile = self.canonic(filename)
self._user_requested_quit = False
with open(filename, "rb") as fp:
statement = "exec(compile(%r, %r, 'exec'))" % \
(fp.read(), self.mainpyfile)
self.run(statement)
# Collect all command help into docstring, if not run with -OO
if __doc__ is not None:
# unfortunately we can't guess this order from the class definition
_help_order = [
'help', 'where', 'down', 'up', 'break', 'tbreak', 'clear', 'disable',
'enable', 'ignore', 'condition', 'commands', 'step', 'next', 'until',
'jump', 'return', 'retval', 'run', 'continue', 'list', 'longlist',
'args', 'print', 'pp', 'whatis', 'source', 'display', 'undisplay',
'interact', 'alias', 'unalias', 'debug', 'quit',
]
for _command in _help_order:
__doc__ += getattr(Pdb, 'do_' + _command).__doc__.strip() + '\n\n'
__doc__ += Pdb.help_exec.__doc__
del _help_order, _command
# Simplified interface
def run(statement, globals=None, locals=None):
Pdb().run(statement, globals, locals)
def runeval(expression, globals=None, locals=None):
return Pdb().runeval(expression, globals, locals)
def runctx(statement, globals, locals):
# B/W compatibility
run(statement, globals, locals)
def runcall(*args, **kwds):
return Pdb().runcall(*args, **kwds)
def set_trace():
Pdb().set_trace(sys._getframe().f_back)
# Post-Mortem interface
def post_mortem(t=None):
# handling the default
if t is None:
# sys.exc_info() returns (type, value, traceback) if an exception is
# being handled, otherwise it returns None
t = sys.exc_info()[2]
if t is None:
raise ValueError("A valid traceback must be passed if no "
"exception is being handled")
p = Pdb()
p.reset()
p.interaction(None, t)
def pm():
post_mortem(sys.last_traceback)
# Main program for testing
TESTCMD = 'import x; x.main()'
def test():
run(TESTCMD)
# print help
def help():
import pydoc
pydoc.pager(__doc__)
_usage = """\
usage: pdb.py [-c command] ... pyfile [arg] ...
Debug the Python program given by pyfile.
Initial commands are read from .pdbrc files in your home directory
and in the current directory, if they exist. Commands supplied with
-c are executed after commands from .pdbrc files.
To let the script run until an exception occurs, use "-c continue".
To let the script run up to a given line X in the debugged file, use
"-c 'until X'"."""
def main():
import getopt
opts, args = getopt.getopt(sys.argv[1:], 'hc:', ['--help', '--command='])
if not args:
print(_usage)
sys.exit(2)
commands = []
for opt, optarg in opts:
if opt in ['-h', '--help']:
print(_usage)
sys.exit()
elif opt in ['-c', '--command']:
commands.append(optarg)
mainpyfile = args[0] # Get script filename
if not os.path.exists(mainpyfile):
print('Error:', mainpyfile, 'does not exist')
sys.exit(1)
sys.argv[:] = args # Hide "pdb.py" and pdb options from argument list
# Replace pdb's dir with script's dir in front of module search path.
sys.path[0] = os.path.dirname(mainpyfile)
# Note on saving/restoring sys.argv: it's a good idea when sys.argv was
# modified by the script being debugged. It's a bad idea when it was
# changed by the user from the command line. There is a "restart" command
# which allows explicit specification of command line arguments.
pdb = Pdb()
pdb.rcLines.extend(commands)
while True:
try:
pdb._runscript(mainpyfile)
if pdb._user_requested_quit:
break
print("The program finished and will be restarted")
except Restart:
print("Restarting", mainpyfile, "with arguments:")
print("\t" + " ".join(args))
except SystemExit:
# In most cases SystemExit does not warrant a post-mortem session.
print("The program exited via sys.exit(). Exit status:", end=' ')
print(sys.exc_info()[1])
except:
traceback.print_exc()
print("Uncaught exception. Entering post mortem debugging")
print("Running 'cont' or 'step' will restart the program")
t = sys.exc_info()[2]
pdb.interaction(None, t)
print("Post mortem debugger finished. The " + mainpyfile +
" will be restarted")
# When invoked as main program, invoke the debugger on a script
if __name__ == '__main__':
import pdb
pdb.main()
| apache-2.0 |
dannyperry571/theapprentice | script.module.nanscrapers/lib/nanscrapers/scraperplugins/sezonlukdizi.py | 1 | 4411 | import json
import re
import urlparse
import requests
from BeautifulSoup import BeautifulSoup
from nanscrapers.common import random_agent, replaceHTMLCodes
from ..scraper import Scraper
import xbmc
class Sezonluldizi(Scraper):
domains = ['sezonlukdizi.com']
name = "sezonlukdizi"
def __init__(self):
self.base_link = 'http://sezonlukdizi.com'
def scrape_episode(self, title, show_year, year, season, episode, imdb, tvdb):
url_title = title.replace(' ', '-').replace('.', '-').replace(":","").replace("!","").replace("?","").lower()
episode_url = '/%s/%01d-sezon-%01d-bolum.html' % (url_title, int(season), int(episode))
return self.sources(replaceHTMLCodes(episode_url))
def sources(self, url):
sources = []
try:
if url == None: return sources
absolute_url = urlparse.urljoin(self.base_link, url)
headers = {'User-Agent': random_agent()}
html = BeautifulSoup(requests.get(absolute_url, headers=headers, timeout=30).content)
pages = []
embed = html.findAll('div', attrs={'id': 'embed'})[0]
pages.append(embed.findAll('iframe')[0]["src"])
for page in pages:
try:
if not page.startswith('http'):
page = 'http:%s' % page
html = BeautifulSoup(requests.get(page, headers=headers, timeout=30).content)
# captions = html.findAll(text=re.compile('kind\s*:\s*(?:\'|\")captions(?:\'|\")'))
# if not captions: break
try:
link_text = html.findAll(text=re.compile('url\s*:\s*\'(http(?:s|)://api.pcloud.com/.+?)\''))[0]
link = re.findall('url\s*:\s*\'(http(?:s|)://api.pcloud.com/.+?)\'', link_text)[0]
variants = json.loads(requests.get(link, headers=headers, timeout=30).content)['variants']
for variant in variants:
if 'hosts' in variant and 'path' in variant and 'height' in variant:
video_url = '%s%s' % (variant['hosts'][0], variant['path'])
heigth = variant['height']
if not video_url.startswith('http'):
video_url = 'http://%s' % video_url
sources.append(
{'source': 'cdn', 'quality': str(heigth), 'scraper': self.name, 'url': video_url,
'direct': False})
except:
pass
try:
links_text = html.findAll(
text=re.compile('"?file"?\s*:\s*"(.+?)"\s*,\s*"?label"?\s*:\s*"(.+?)"'))
if len(links_text) > 0:
for link_text in links_text:
try:
links = re.findall('"?file"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?[^"]*"',
link_text)
for link in links:
video_url = link[0]
if not video_url.startswith('http'):
video_url = 'http:%s' % video_url
try:
req = requests.head(video_url, headers=headers)
if req.headers['Location'] != "":
video_url = req.headers['Location']
except:
pass
quality = link[1]
sources.append(
{'source': 'google video', 'quality': quality, 'scraper': self.name,
'url': video_url, 'direct': True})
except:
continue
except:
pass
except:
pass
except:
pass
return sources
| gpl-2.0 |
koomik/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/playvid.py | 19 | 2554 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
compat_urllib_parse,
)
class PlayvidIE(InfoExtractor):
_VALID_URL = r'^https?://www\.playvid\.com/watch(\?v=|/)(?P<id>.+?)(?:#|$)'
_TEST = {
'url': 'http://www.playvid.com/watch/agbDDi7WZTV',
'md5': '44930f8afa616efdf9482daf4fe53e1e',
'info_dict': {
'id': 'agbDDi7WZTV',
'ext': 'mp4',
'title': 'Michelle Lewin in Miami Beach',
'duration': 240,
'age_limit': 18,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
video_title = None
duration = None
video_thumbnail = None
formats = []
# most of the information is stored in the flashvars
flashvars = self._html_search_regex(
r'flashvars="(.+?)"', webpage, 'flashvars')
infos = compat_urllib_parse.unquote(flashvars).split(r'&')
for info in infos:
videovars_match = re.match(r'^video_vars\[(.+?)\]=(.+?)$', info)
if videovars_match:
key = videovars_match.group(1)
val = videovars_match.group(2)
if key == 'title':
video_title = compat_urllib_parse.unquote_plus(val)
if key == 'duration':
try:
duration = int(val)
except ValueError:
pass
if key == 'big_thumb':
video_thumbnail = val
videourl_match = re.match(
r'^video_urls\]\[(?P<resolution>[0-9]+)p', key)
if videourl_match:
height = int(videourl_match.group('resolution'))
formats.append({
'height': height,
'url': val,
})
self._sort_formats(formats)
# Extract title - should be in the flashvars; if not, look elsewhere
if video_title is None:
video_title = self._html_search_regex(
r'<title>(.*?)</title', webpage, 'title')
return {
'id': video_id,
'formats': formats,
'title': video_title,
'thumbnail': video_thumbnail,
'duration': duration,
'description': None,
'age_limit': 18
}
| gpl-3.0 |
hortonworks/hortonworks-sandbox | desktop/core/ext-py/Twisted/doc/web/howto/listings/PicturePile/picturepile.py | 1 | 1917 | """Run this with twistd -y."""
import os
from twisted.application import service, internet
from twisted.web.woven import page
from twisted.web import server, static
rootDirectory = os.path.expanduser("~/Pictures")
class DirectoryListing(page.Page):
templateFile = "directory-listing.html"
templateDirectory = os.path.split(os.path.abspath(__file__))[0]
def initialize(self, *args, **kwargs):
self.directory = kwargs['directory']
def wmfactory_title(self, request):
return self.directory
def wmfactory_directory(self, request):
files = os.listdir(self.directory)
for i in xrange(len(files)):
if os.path.isdir(os.path.join(self.directory,files[i])):
files[i] = files[i] + '/'
return files
def getDynamicChild(self, name, request):
# Protect against malicious URLs like '..'
if static.isDangerous(name):
return static.dangerousPathError
# Return a DirectoryListing or an ImageDisplay resource, depending on
# whether the path corresponds to a directory or to a file
path = os.path.join(self.directory,name)
if os.path.exists(path):
if os.path.isdir(path):
return DirectoryListing(directory=path)
else:
return ImageDisplay(image=path)
class ImageDisplay(page.Page):
templateFile="image-display.html"
templateDirectory = os.path.split(os.path.abspath(__file__))[0]
def initialize(self, *args, **kwargs):
self.image = kwargs['image']
def wmfactory_image(self, request):
return self.image
def wchild_preview(self, request):
return static.File(self.image)
site = server.Site(DirectoryListing(directory=rootDirectory))
application = service.Application("ImagePool")
parent = service.IServiceCollection(application)
internet.TCPServer(8088, site).setServiceParent(parent)
| apache-2.0 |
kernc/networkx | networkx/algorithms/connectivity/kcomponents.py | 30 | 8208 | # -*- coding: utf-8 -*-
"""
Moody and White algorithm for k-components
"""
from collections import defaultdict
from itertools import combinations
from operator import itemgetter
import networkx as nx
from networkx.utils import not_implemented_for
# Define the default maximum flow function.
from networkx.algorithms.flow import edmonds_karp
default_flow_func = edmonds_karp
__author__ = '\n'.join(['Jordi Torrents <jtorrents@milnou.net>'])
__all__ = ['k_components']
@not_implemented_for('directed')
def k_components(G, flow_func=None):
r"""Returns the k-component structure of a graph G.
A `k`-component is a maximal subgraph of a graph G that has, at least,
node connectivity `k`: we need to remove at least `k` nodes to break it
into more components. `k`-components have an inherent hierarchical
structure because they are nested in terms of connectivity: a connected
graph can contain several 2-components, each of which can contain
one or more 3-components, and so forth.
Parameters
----------
G : NetworkX graph
flow_func : function
Function to perform the underlying flow computations. Default value
:meth:`edmonds_karp`. This function performs better in sparse graphs with
right tailed degree distributions. :meth:`shortest_augmenting_path` will
perform better in denser graphs.
Returns
-------
k_components : dict
Dictionary with all connectivity levels `k` in the input Graph as keys
and a list of sets of nodes that form a k-component of level `k` as
values.
Raises
------
NetworkXNotImplemented:
If the input graph is directed.
Examples
--------
>>> # Petersen graph has 10 nodes and it is triconnected, thus all
>>> # nodes are in a single component on all three connectivity levels
>>> G = nx.petersen_graph()
>>> k_components = nx.k_components(G)
Notes
-----
Moody and White [1]_ (appendix A) provide an algorithm for identifying
k-components in a graph, which is based on Kanevsky's algorithm [2]_
for finding all minimum-size node cut-sets of a graph (implemented in
:meth:`all_node_cuts` function):
1. Compute node connectivity, k, of the input graph G.
2. Identify all k-cutsets at the current level of connectivity using
Kanevsky's algorithm.
3. Generate new graph components based on the removal of
these cutsets. Nodes in a cutset belong to both sides
of the induced cut.
4. If the graph is neither complete nor trivial, return to 1;
else end.
This implementation also uses some heuristics (see [3]_ for details)
to speed up the computation.
See also
--------
node_connectivity
all_node_cuts
References
----------
.. [1] Moody, J. and D. White (2003). Social cohesion and embeddedness:
A hierarchical conception of social groups.
American Sociological Review 68(1), 103--28.
http://www2.asanet.org/journals/ASRFeb03MoodyWhite.pdf
.. [2] Kanevsky, A. (1993). Finding all minimum-size separating vertex
sets in a graph. Networks 23(6), 533--541.
http://onlinelibrary.wiley.com/doi/10.1002/net.3230230604/abstract
.. [3] Torrents, J. and F. Ferraro (2015). Structural Cohesion:
Visualization and Heuristics for Fast Computation.
http://arxiv.org/pdf/1503.04476v1
"""
# Dictionary with connectivity level (k) as keys and a list of
# sets of nodes that form a k-component as values. Note that
# k-compoents can overlap (but only k - 1 nodes).
k_components = defaultdict(list)
# Define default flow function
if flow_func is None:
flow_func = default_flow_func
# Bicomponents as a base to check for higher order k-components
for component in nx.connected_components(G):
# isolated nodes have connectivity 0
comp = set(component)
if len(comp) > 1:
k_components[1].append(comp)
bicomponents = list(nx.biconnected_component_subgraphs(G))
for bicomponent in bicomponents:
bicomp = set(bicomponent)
# avoid considering dyads as bicomponents
if len(bicomp) > 2:
k_components[2].append(bicomp)
for B in bicomponents:
if len(B) <= 2:
continue
k = nx.node_connectivity(B, flow_func=flow_func)
if k > 2:
k_components[k].append(set(B.nodes_iter()))
# Perform cuts in a DFS like order.
cuts = list(nx.all_node_cuts(B, k=k, flow_func=flow_func))
stack = [(k, _generate_partition(B, cuts, k))]
while stack:
(parent_k, partition) = stack[-1]
try:
nodes = next(partition)
C = B.subgraph(nodes)
this_k = nx.node_connectivity(C, flow_func=flow_func)
if this_k > parent_k and this_k > 2:
k_components[this_k].append(set(C.nodes_iter()))
cuts = list(nx.all_node_cuts(C, k=this_k, flow_func=flow_func))
if cuts:
stack.append((this_k, _generate_partition(C, cuts, this_k)))
except StopIteration:
stack.pop()
# This is necessary because k-components may only be reported at their
# maximum k level. But we want to return a dictionary in which keys are
# connectivity levels and values list of sets of components, without
# skipping any connectivity level. Also, it's possible that subsets of
# an already detected k-component appear at a level k. Checking for this
# in the while loop above penalizes the common case. Thus we also have to
# _consolidate all connectivity levels in _reconstruct_k_components.
return _reconstruct_k_components(k_components)
def _consolidate(sets, k):
"""Merge sets that share k or more elements.
See: http://rosettacode.org/wiki/Set_consolidation
The iterative python implementation posted there is
faster than this because of the overhead of building a
Graph and calling nx.connected_components, but it's not
clear for us if we can use it in NetworkX because there
is no licence for the code.
"""
G = nx.Graph()
nodes = {i: s for i, s in enumerate(sets)}
G.add_nodes_from(nodes)
G.add_edges_from((u, v) for u, v in combinations(nodes, 2)
if len(nodes[u] & nodes[v]) >= k)
for component in nx.connected_components(G):
yield set.union(*[nodes[n] for n in component])
def _generate_partition(G, cuts, k):
def has_nbrs_in_partition(G, node, partition):
for n in G[node]:
if n in partition:
return True
return False
components = []
nodes = ({n for n, d in G.degree().items() if d > k} -
{n for cut in cuts for n in cut})
H = G.subgraph(nodes)
for cc in nx.connected_components(H):
component = set(cc)
for cut in cuts:
for node in cut:
if has_nbrs_in_partition(G, node, cc):
component.add(node)
if len(component) < G.order():
components.append(component)
for component in _consolidate(components, k+1):
yield component
def _reconstruct_k_components(k_comps):
result = dict()
max_k = max(k_comps)
for k in reversed(range(1, max_k+1)):
if k == max_k:
result[k] = list(_consolidate(k_comps[k], k))
elif k not in k_comps:
result[k] = list(_consolidate(result[k+1], k))
else:
nodes_at_k = set.union(*k_comps[k])
to_add = [c for c in result[k+1] if any(n not in nodes_at_k for n in c)]
if to_add:
result[k] = list(_consolidate(k_comps[k] + to_add, k))
else:
result[k] = list(_consolidate(k_comps[k], k))
return result
def build_k_number_dict(kcomps):
result = {}
for k, comps in sorted(kcomps.items(), key=itemgetter(0)):
for comp in comps:
for node in comp:
result[node] = k
return result
| bsd-3-clause |
HyperBaton/ansible | lib/ansible/modules/cloud/azure/azure_rm_iotdevicemodule.py | 18 | 13431 | #!/usr/bin/python
#
# Copyright (c) 2019 Yuwei Zhou, <yuwzho@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_iotdevicemodule
version_added: "2.9"
short_description: Manage Azure IoT hub device module
description:
- Create, delete an Azure IoT hub device module.
options:
hub:
description:
- Name of IoT Hub.
type: str
required: true
hub_policy_name:
description:
- Policy name of the IoT Hub which will be used to query from IoT hub.
- This policy should have at least 'Registry Read' access.
type: str
required: true
hub_policy_key:
description:
- Key of the I(hub_policy_name).
type: str
required: true
name:
description:
- Name of the IoT hub device identity.
type: str
required: true
device:
description:
- Device name the module associate with.
required: true
type: str
state:
description:
- State of the IoT hub. Use C(present) to create or update an IoT hub device and C(absent) to delete an IoT hub device.
type: str
default: present
choices:
- absent
- present
auth_method:
description:
- The authorization type an entity is to be created with.
type: str
choices:
- sas
- certificate_authority
- self_signed
default: sas
primary_key:
description:
- Explicit self-signed certificate thumbprint to use for primary key.
- Explicit Shared Private Key to use for primary key.
type: str
aliases:
- primary_thumbprint
secondary_key:
description:
- Explicit self-signed certificate thumbprint to use for secondary key.
- Explicit Shared Private Key to use for secondary key.
type: str
aliases:
- secondary_thumbprint
twin_tags:
description:
- A section that the solution back end can read from and write to.
- Tags are not visible to device apps.
- "The tag can be nested dictionary, '.', '$', '#', ' ' is not allowed in the key."
- List is not supported.
type: dict
desired:
description:
- Used along with reported properties to synchronize device configuration or conditions.
- "The tag can be nested dictionary, '.', '$', '#', ' ' is not allowed in the key."
- List is not supported.
type: dict
extends_documentation_fragment:
- azure
- azure_tags
author:
- Yuwei Zhou (@yuwzho)
'''
EXAMPLES = '''
- name: Create simplest Azure IoT Hub device module
azure_rm_iotdevicemodule:
hub: myHub
name: Testing
device: mydevice
hub_policy_name: iothubowner
hub_policy_key: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- name: Create Azure IoT Edge device module
azure_rm_iotdevice:
hub: myHub
device: mydevice
name: Testing
hub_policy_name: iothubowner
hub_policy_key: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
edge_enabled: yes
- name: Create Azure IoT Hub device module with module twin properties and tag
azure_rm_iotdevice:
hub: myHub
name: Testing
device: mydevice
hub_policy_name: iothubowner
hub_policy_key: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
twin_tags:
location:
country: US
city: Redmond
sensor: humidity
desired:
period: 100
'''
RETURN = '''
module:
description:
- IoT Hub device.
returned: always
type: dict
sample: {
"authentication": {
"symmetricKey": {
"primaryKey": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
"secondaryKey": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
},
"type": "sas",
"x509Thumbprint": {
"primaryThumbprint": null,
"secondaryThumbprint": null
}
},
"cloudToDeviceMessageCount": 0,
"connectionState": "Disconnected",
"connectionStateUpdatedTime": "0001-01-01T00:00:00",
"deviceId": "mydevice",
"etag": "ODM2NjI3ODg=",
"generationId": "636904759703045768",
"lastActivityTime": "0001-01-01T00:00:00",
"managedBy": null,
"moduleId": "Testing"
}
''' # NOQA
import json
import copy
import re
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id
from ansible.module_utils.common.dict_transformations import _snake_to_camel
try:
from msrestazure.tools import parse_resource_id
from msrestazure.azure_exceptions import CloudError
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMIoTDeviceModule(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
name=dict(type='str', required=True),
hub_policy_name=dict(type='str', required=True),
hub_policy_key=dict(type='str', required=True),
hub=dict(type='str', required=True),
device=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
twin_tags=dict(type='dict'),
desired=dict(type='dict'),
auth_method=dict(type='str', choices=['self_signed', 'sas', 'certificate_authority'], default='sas'),
primary_key=dict(type='str', no_log=True, aliases=['primary_thumbprint']),
secondary_key=dict(type='str', no_log=True, aliases=['secondary_thumbprint'])
)
self.results = dict(
changed=False,
id=None
)
self.name = None
self.hub = None
self.device = None
self.hub_policy_key = None
self.hub_policy_name = None
self.state = None
self.twin_tags = None
self.desired = None
self.auth_method = None
self.primary_key = None
self.secondary_key = None
required_if = [
['auth_method', 'self_signed', ['certificate_authority']]
]
self._base_url = None
self._mgmt_client = None
self.query_parameters = {
'api-version': '2018-06-30'
}
self.header_parameters = {
'Content-Type': 'application/json; charset=utf-8',
'accept-language': 'en-US'
}
super(AzureRMIoTDeviceModule, self).__init__(self.module_arg_spec, supports_check_mode=True, required_if=required_if)
def exec_module(self, **kwargs):
for key in self.module_arg_spec.keys():
setattr(self, key, kwargs[key])
self._base_url = '{0}.azure-devices.net'.format(self.hub)
config = {
'base_url': self._base_url,
'key': self.hub_policy_key,
'policy': self.hub_policy_name
}
self._mgmt_client = self.get_data_svc_client(**config)
changed = False
module = self.get_module()
if self.state == 'present':
if not module:
changed = True
auth = {'type': _snake_to_camel(self.auth_method)}
if self.auth_method == 'self_signed':
auth['x509Thumbprint'] = {
'primaryThumbprint': self.primary_key,
'secondaryThumbprint': self.secondary_key
}
elif self.auth_method == 'sas':
auth['symmetricKey'] = {
'primaryKey': self.primary_key,
'secondaryKey': self.secondary_key
}
module = {
'deviceId': self.device,
'moduleId': self.name,
'authentication': auth
}
if changed and not self.check_mode:
module = self.create_or_update_module(module)
twin = self.get_twin()
if not twin.get('tags'):
twin['tags'] = dict()
twin_change = False
if self.twin_tags and not self.is_equal(self.twin_tags, twin['tags']):
twin_change = True
if self.desired and not self.is_equal(self.desired, twin['properties']['desired']):
self.module.warn('desired')
twin_change = True
if twin_change and not self.check_mode:
twin = self.update_twin(twin)
changed = changed or twin_change
module['tags'] = twin.get('tags') or dict()
module['properties'] = twin['properties']
elif module:
if not self.check_mode:
self.delete_module(module['etag'])
changed = True
module = None
self.results = module or dict()
self.results['changed'] = changed
return self.results
def is_equal(self, updated, original):
changed = False
if not isinstance(updated, dict):
self.fail('The Property or Tag should be a dict')
for key in updated.keys():
if re.search(r'[.|$|#|\s]', key):
self.fail("Property or Tag name has invalid characters: '.', '$', '#' or ' '. Got '{0}'".format(key))
original_value = original.get(key)
updated_value = updated[key]
if isinstance(updated_value, dict):
if not isinstance(original_value, dict):
changed = True
original[key] = updated_value
elif not self.is_equal(updated_value, original_value):
changed = True
elif original_value != updated_value:
changed = True
original[key] = updated_value
return not changed
def create_or_update_module(self, module):
try:
url = '/devices/{0}/modules/{1}'.format(self.device, self.name)
headers = copy.copy(self.header_parameters)
if module.get('etag'):
headers['If-Match'] = '"{0}"'.format(module['etag'])
request = self._mgmt_client.put(url, self.query_parameters)
response = self._mgmt_client.send(request=request, headers=headers, content=module)
if response.status_code not in [200, 201]:
raise CloudError(response)
return json.loads(response.text)
except Exception as exc:
self.fail('Error when creating or updating IoT Hub device {0}: {1}'.format(self.name, exc.message or str(exc)))
def delete_module(self, etag):
try:
url = '/devices/{0}/modules/{1}'.format(self.device, self.name)
headers = copy.copy(self.header_parameters)
headers['If-Match'] = '"{0}"'.format(etag)
request = self._mgmt_client.delete(url, self.query_parameters)
response = self._mgmt_client.send(request=request, headers=headers)
if response.status_code not in [204]:
raise CloudError(response)
except Exception as exc:
self.fail('Error when deleting IoT Hub device {0}: {1}'.format(self.name, exc.message or str(exc)))
def get_module(self):
try:
url = '/devices/{0}/modules/{1}'.format(self.device, self.name)
return self._https_get(url, self.query_parameters, self.header_parameters)
except Exception:
return None
def get_twin(self):
try:
url = '/twins/{0}/modules/{1}'.format(self.device, self.name)
return self._https_get(url, self.query_parameters, self.header_parameters)
except Exception as exc:
self.fail('Error when getting IoT Hub device {0} module twin {1}: {2}'.format(self.device, self.name, exc.message or str(exc)))
def update_twin(self, twin):
try:
url = '/twins/{0}/modules/{1}'.format(self.device, self.name)
headers = copy.copy(self.header_parameters)
headers['If-Match'] = twin['etag']
request = self._mgmt_client.patch(url, self.query_parameters)
response = self._mgmt_client.send(request=request, headers=headers, content=twin)
if response.status_code not in [200]:
raise CloudError(response)
return json.loads(response.text)
except Exception as exc:
self.fail('Error when creating or updating IoT Hub device {0} module twin {1}: {2}'.format(self.device, self.name, exc.message or str(exc)))
def _https_get(self, url, query_parameters, header_parameters):
request = self._mgmt_client.get(url, query_parameters)
response = self._mgmt_client.send(request=request, headers=header_parameters, content=None)
if response.status_code not in [200]:
raise CloudError(response)
return json.loads(response.text)
def main():
AzureRMIoTDeviceModule()
if __name__ == '__main__':
main()
| gpl-3.0 |
nickpack/django-oscar | src/oscar/apps/analytics/receivers.py | 33 | 3932 | import logging
from django.db.models import F
from django.dispatch import receiver
from django.db import IntegrityError
from oscar.core.loading import get_class, get_classes
from oscar.apps.search.signals import user_search
UserSearch, UserRecord, ProductRecord, UserProductView = get_classes(
'analytics.models', ['UserSearch', 'UserRecord', 'ProductRecord',
'UserProductView'])
product_viewed = get_classes('catalogue.signals', ['product_viewed'])
basket_addition = get_class('basket.signals', 'basket_addition')
order_placed = get_class('order.signals', 'order_placed')
# Helpers
logger = logging.getLogger('oscar.analytics')
def _update_counter(model, field_name, filter_kwargs, increment=1):
"""
Efficiently updates a counter field by a given increment. Uses Django's
update() call to fetch and update in one query.
:param model: The model class of the recording model
:param field_name: The name of the field to update
:param filter_kwargs: Parameters to the ORM's filter() function to get the
correct instance
"""
try:
record = model.objects.filter(**filter_kwargs)
affected = record.update(**{field_name: F(field_name) + increment})
if not affected:
filter_kwargs[field_name] = increment
model.objects.create(**filter_kwargs)
except IntegrityError:
# get_or_create sometimes fails due to MySQL's weird transactions, fail
# silently
logger.error(
"IntegrityError when updating analytics counter for %s", model)
def _record_products_in_order(order):
# surely there's a way to do this without causing a query for each line?
for line in order.lines.all():
_update_counter(
ProductRecord, 'num_purchases',
{'product': line.product}, line.quantity)
def _record_user_order(user, order):
try:
record = UserRecord.objects.filter(user=user)
affected = record.update(
num_orders=F('num_orders') + 1,
num_order_lines=F('num_order_lines') + order.num_lines,
num_order_items=F('num_order_items') + order.num_items,
total_spent=F('total_spent') + order.total_incl_tax,
date_last_order=order.date_placed)
if not affected:
UserRecord.objects.create(
user=user, num_orders=1, num_order_lines=order.num_lines,
num_order_items=order.num_items,
total_spent=order.total_incl_tax,
date_last_order=order.date_placed)
except IntegrityError:
logger.error(
"IntegrityError in analytics when recording a user order.")
# Receivers
@receiver(product_viewed)
def receive_product_view(sender, product, user, **kwargs):
if kwargs.get('raw', False):
return
_update_counter(ProductRecord, 'num_views', {'product': product})
if user and user.is_authenticated():
_update_counter(UserRecord, 'num_product_views', {'user': user})
UserProductView.objects.create(product=product, user=user)
@receiver(user_search)
def receive_product_search(sender, query, user, **kwargs):
if user and user.is_authenticated() and not kwargs.get('raw', False):
UserSearch._default_manager.create(user=user, query=query)
@receiver(basket_addition)
def receive_basket_addition(sender, product, user, **kwargs):
if kwargs.get('raw', False):
return
_update_counter(
ProductRecord, 'num_basket_additions', {'product': product})
if user and user.is_authenticated():
_update_counter(UserRecord, 'num_basket_additions', {'user': user})
@receiver(order_placed)
def receive_order_placed(sender, order, user, **kwargs):
if kwargs.get('raw', False):
return
_record_products_in_order(order)
if user and user.is_authenticated():
_record_user_order(user, order)
| bsd-3-clause |
idem2lyon/persomov | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/azubu.py | 143 | 3350 | from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..utils import float_or_none
class AzubuIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?azubu\.tv/[^/]+#!/play/(?P<id>\d+)'
_TESTS = [
{
'url': 'http://www.azubu.tv/GSL#!/play/15575/2014-hot6-cup-last-big-match-ro8-day-1',
'md5': 'a88b42fcf844f29ad6035054bd9ecaf4',
'info_dict': {
'id': '15575',
'ext': 'mp4',
'title': '2014 HOT6 CUP LAST BIG MATCH Ro8 Day 1',
'description': 'md5:d06bdea27b8cc4388a90ad35b5c66c01',
'thumbnail': 're:^https?://.*\.jpe?g',
'timestamp': 1417523507.334,
'upload_date': '20141202',
'duration': 9988.7,
'uploader': 'GSL',
'uploader_id': 414310,
'view_count': int,
},
},
{
'url': 'http://www.azubu.tv/FnaticTV#!/play/9344/-fnatic-at-worlds-2014:-toyz---%22i-love-rekkles,-he-has-amazing-mechanics%22-',
'md5': 'b72a871fe1d9f70bd7673769cdb3b925',
'info_dict': {
'id': '9344',
'ext': 'mp4',
'title': 'Fnatic at Worlds 2014: Toyz - "I love Rekkles, he has amazing mechanics"',
'description': 'md5:4a649737b5f6c8b5c5be543e88dc62af',
'thumbnail': 're:^https?://.*\.jpe?g',
'timestamp': 1410530893.320,
'upload_date': '20140912',
'duration': 172.385,
'uploader': 'FnaticTV',
'uploader_id': 272749,
'view_count': int,
},
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
data = self._download_json(
'http://www.azubu.tv/api/video/%s' % video_id, video_id)['data']
title = data['title'].strip()
description = data['description']
thumbnail = data['thumbnail']
view_count = data['view_count']
uploader = data['user']['username']
uploader_id = data['user']['id']
stream_params = json.loads(data['stream_params'])
timestamp = float_or_none(stream_params['creationDate'], 1000)
duration = float_or_none(stream_params['length'], 1000)
renditions = stream_params.get('renditions') or []
video = stream_params.get('FLVFullLength') or stream_params.get('videoFullLength')
if video:
renditions.append(video)
formats = [{
'url': fmt['url'],
'width': fmt['frameWidth'],
'height': fmt['frameHeight'],
'vbr': float_or_none(fmt['encodingRate'], 1000),
'filesize': fmt['size'],
'vcodec': fmt['videoCodec'],
'container': fmt['videoContainer'],
} for fmt in renditions if fmt['url']]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'duration': duration,
'uploader': uploader,
'uploader_id': uploader_id,
'view_count': view_count,
'formats': formats,
}
| gpl-3.0 |
jspargo/AneMo | thermo/flask/lib/python2.7/site-packages/flask/logging.py | 838 | 1398 | # -*- coding: utf-8 -*-
"""
flask.logging
~~~~~~~~~~~~~
Implements the logging support for Flask.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from logging import getLogger, StreamHandler, Formatter, getLoggerClass, DEBUG
def create_logger(app):
"""Creates a logger for the given application. This logger works
similar to a regular Python logger but changes the effective logging
level based on the application's debug flag. Furthermore this
function also removes all attached handlers in case there was a
logger with the log name before.
"""
Logger = getLoggerClass()
class DebugLogger(Logger):
def getEffectiveLevel(x):
if x.level == 0 and app.debug:
return DEBUG
return Logger.getEffectiveLevel(x)
class DebugHandler(StreamHandler):
def emit(x, record):
StreamHandler.emit(x, record) if app.debug else None
handler = DebugHandler()
handler.setLevel(DEBUG)
handler.setFormatter(Formatter(app.debug_log_format))
logger = getLogger(app.logger_name)
# just in case that was not a new logger, get rid of all the handlers
# already attached to it.
del logger.handlers[:]
logger.__class__ = DebugLogger
logger.addHandler(handler)
return logger
| gpl-2.0 |
varunchitre15/thunderzap_tomato | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
abhijeet9920/python_project | develop/lib/python3.4/site-packages/pkg_resources/_vendor/six.py | 2715 | 30098 | """Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2015 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.10.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
if from_value is None:
raise value
raise value from from_value
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
| mit |
davidwilson-85/easymap | graphic_output/Pillow-4.2.1/Tests/test_file_icns.py | 1 | 2781 | from helper import unittest, PillowTestCase
from PIL import Image
import sys
# sample icon file
TEST_FILE = "Tests/images/pillow.icns"
enable_jpeg2k = hasattr(Image.core, 'jp2klib_version')
class TestFileIcns(PillowTestCase):
def test_sanity(self):
# Loading this icon by default should result in the largest size
# (512x512@2x) being loaded
im = Image.open(TEST_FILE)
im.load()
self.assertEqual(im.mode, "RGBA")
self.assertEqual(im.size, (1024, 1024))
self.assertEqual(im.format, "ICNS")
@unittest.skipIf(sys.platform != 'darwin',
"requires MacOS")
def test_save(self):
im = Image.open(TEST_FILE)
temp_file = self.tempfile("temp.icns")
im.save(temp_file)
reread = Image.open(temp_file)
self.assertEqual(reread.mode, "RGBA")
self.assertEqual(reread.size, (1024, 1024))
self.assertEqual(reread.format, "ICNS")
def test_sizes(self):
# Check that we can load all of the sizes, and that the final pixel
# dimensions are as expected
im = Image.open(TEST_FILE)
for w, h, r in im.info['sizes']:
wr = w * r
hr = h * r
im2 = Image.open(TEST_FILE)
im2.size = (w, h, r)
im2.load()
self.assertEqual(im2.mode, 'RGBA')
self.assertEqual(im2.size, (wr, hr))
def test_older_icon(self):
# This icon was made with Icon Composer rather than iconutil; it still
# uses PNG rather than JP2, however (since it was made on 10.9).
im = Image.open('Tests/images/pillow2.icns')
for w, h, r in im.info['sizes']:
wr = w * r
hr = h * r
im2 = Image.open('Tests/images/pillow2.icns')
im2.size = (w, h, r)
im2.load()
self.assertEqual(im2.mode, 'RGBA')
self.assertEqual(im2.size, (wr, hr))
def test_jp2_icon(self):
# This icon was made by using Uli Kusterer's oldiconutil to replace
# the PNG images with JPEG 2000 ones. The advantage of doing this is
# that OS X 10.5 supports JPEG 2000 but not PNG; some commercial
# software therefore does just this.
# (oldiconutil is here: https://github.com/uliwitness/oldiconutil)
if not enable_jpeg2k:
return
im = Image.open('Tests/images/pillow3.icns')
for w, h, r in im.info['sizes']:
wr = w * r
hr = h * r
im2 = Image.open('Tests/images/pillow3.icns')
im2.size = (w, h, r)
im2.load()
self.assertEqual(im2.mode, 'RGBA')
self.assertEqual(im2.size, (wr, hr))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
JulienMcJay/eclock | windows/Python27/Lib/site-packages/pip/_vendor/html5lib/treewalkers/_base.py | 169 | 6718 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
import gettext
_ = gettext.gettext
from ..constants import voidElements, spaceCharacters
spaceCharacters = "".join(spaceCharacters)
class TreeWalker(object):
def __init__(self, tree):
self.tree = tree
def __iter__(self):
raise NotImplementedError
def error(self, msg):
return {"type": "SerializeError", "data": msg}
def emptyTag(self, namespace, name, attrs, hasChildren=False):
assert namespace is None or isinstance(namespace, text_type), type(namespace)
assert isinstance(name, text_type), type(name)
assert all((namespace is None or isinstance(namespace, text_type)) and
isinstance(name, text_type) and
isinstance(value, text_type)
for (namespace, name), value in attrs.items())
yield {"type": "EmptyTag", "name": name,
"namespace": namespace,
"data": attrs}
if hasChildren:
yield self.error(_("Void element has children"))
def startTag(self, namespace, name, attrs):
assert namespace is None or isinstance(namespace, text_type), type(namespace)
assert isinstance(name, text_type), type(name)
assert all((namespace is None or isinstance(namespace, text_type)) and
isinstance(name, text_type) and
isinstance(value, text_type)
for (namespace, name), value in attrs.items())
return {"type": "StartTag",
"name": name,
"namespace": namespace,
"data": attrs}
def endTag(self, namespace, name):
assert namespace is None or isinstance(namespace, text_type), type(namespace)
assert isinstance(name, text_type), type(namespace)
return {"type": "EndTag",
"name": name,
"namespace": namespace,
"data": {}}
def text(self, data):
assert isinstance(data, text_type), type(data)
data = data
middle = data.lstrip(spaceCharacters)
left = data[:len(data) - len(middle)]
if left:
yield {"type": "SpaceCharacters", "data": left}
data = middle
middle = data.rstrip(spaceCharacters)
right = data[len(middle):]
if middle:
yield {"type": "Characters", "data": middle}
if right:
yield {"type": "SpaceCharacters", "data": right}
def comment(self, data):
assert isinstance(data, text_type), type(data)
return {"type": "Comment", "data": data}
def doctype(self, name, publicId=None, systemId=None, correct=True):
assert name is None or isinstance(name, text_type), type(name)
assert publicId is None or isinstance(publicId, text_type), type(publicId)
assert systemId is None or isinstance(systemId, text_type), type(systemId)
return {"type": "Doctype",
"name": name if name is not None else "",
"publicId": publicId,
"systemId": systemId,
"correct": correct}
def entity(self, name):
assert isinstance(name, text_type), type(name)
return {"type": "Entity", "name": name}
def unknown(self, nodeType):
return self.error(_("Unknown node type: ") + nodeType)
class RecursiveTreeWalker(TreeWalker):
def walkChildren(self, node):
raise NotImplementedError
def element(self, node, namespace, name, attrs, hasChildren):
if name in voidElements:
for token in self.emptyTag(namespace, name, attrs, hasChildren):
yield token
else:
yield self.startTag(name, attrs)
if hasChildren:
for token in self.walkChildren(node):
yield token
yield self.endTag(name)
from xml.dom import Node
DOCUMENT = Node.DOCUMENT_NODE
DOCTYPE = Node.DOCUMENT_TYPE_NODE
TEXT = Node.TEXT_NODE
ELEMENT = Node.ELEMENT_NODE
COMMENT = Node.COMMENT_NODE
ENTITY = Node.ENTITY_NODE
UNKNOWN = "<#UNKNOWN#>"
class NonRecursiveTreeWalker(TreeWalker):
def getNodeDetails(self, node):
raise NotImplementedError
def getFirstChild(self, node):
raise NotImplementedError
def getNextSibling(self, node):
raise NotImplementedError
def getParentNode(self, node):
raise NotImplementedError
def __iter__(self):
currentNode = self.tree
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
hasChildren = False
if type == DOCTYPE:
yield self.doctype(*details)
elif type == TEXT:
for token in self.text(*details):
yield token
elif type == ELEMENT:
namespace, name, attributes, hasChildren = details
if name in voidElements:
for token in self.emptyTag(namespace, name, attributes,
hasChildren):
yield token
hasChildren = False
else:
yield self.startTag(namespace, name, attributes)
elif type == COMMENT:
yield self.comment(details[0])
elif type == ENTITY:
yield self.entity(details[0])
elif type == DOCUMENT:
hasChildren = True
else:
yield self.unknown(details[0])
if hasChildren:
firstChild = self.getFirstChild(currentNode)
else:
firstChild = None
if firstChild is not None:
currentNode = firstChild
else:
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
if type == ELEMENT:
namespace, name, attributes, hasChildren = details
if name not in voidElements:
yield self.endTag(namespace, name)
if self.tree is currentNode:
currentNode = None
break
nextSibling = self.getNextSibling(currentNode)
if nextSibling is not None:
currentNode = nextSibling
break
else:
currentNode = self.getParentNode(currentNode)
| gpl-2.0 |
DinoCow/airflow | airflow/providers/amazon/aws/sensors/step_function_execution.py | 5 | 3125 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from typing import Optional
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.step_function import StepFunctionHook
from airflow.sensors.base import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class StepFunctionExecutionSensor(BaseSensorOperator):
"""
Asks for the state of the Step Function State Machine Execution until it
reaches a failure state or success state.
If it fails, failing the task.
On successful completion of the Execution the Sensor will do an XCom Push
of the State Machine's output to `output`
:param execution_arn: execution_arn to check the state of
:type execution_arn: str
:param aws_conn_id: aws connection to use, defaults to 'aws_default'
:type aws_conn_id: str
"""
INTERMEDIATE_STATES = ('RUNNING',)
FAILURE_STATES = (
'FAILED',
'TIMED_OUT',
'ABORTED',
)
SUCCESS_STATES = ('SUCCEEDED',)
template_fields = ['execution_arn']
template_ext = ()
ui_color = '#66c3ff'
@apply_defaults
def __init__(
self,
*,
execution_arn: str,
aws_conn_id: str = 'aws_default',
region_name: Optional[str] = None,
**kwargs,
):
super().__init__(**kwargs)
self.execution_arn = execution_arn
self.aws_conn_id = aws_conn_id
self.region_name = region_name
self.hook: Optional[StepFunctionHook] = None
def poke(self, context):
execution_status = self.get_hook().describe_execution(self.execution_arn)
state = execution_status['status']
output = json.loads(execution_status['output']) if 'output' in execution_status else None
if state in self.FAILURE_STATES:
raise AirflowException(f'Step Function sensor failed. State Machine Output: {output}')
if state in self.INTERMEDIATE_STATES:
return False
self.log.info('Doing xcom_push of output')
self.xcom_push(context, 'output', output)
return True
def get_hook(self) -> StepFunctionHook:
"""Create and return a StepFunctionHook"""
if self.hook:
return self.hook
self.hook = StepFunctionHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
return self.hook
| apache-2.0 |
kmad1729/website | django/contrib/sessions/middleware.py | 323 | 1888 | import time
from django.conf import settings
from django.utils.cache import patch_vary_headers
from django.utils.http import cookie_date
from django.utils.importlib import import_module
class SessionMiddleware(object):
def process_request(self, request):
engine = import_module(settings.SESSION_ENGINE)
session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME, None)
request.session = engine.SessionStore(session_key)
def process_response(self, request, response):
"""
If request.session was modified, or if the configuration is to save the
session every time, save the changes and set a session cookie.
"""
try:
accessed = request.session.accessed
modified = request.session.modified
except AttributeError:
pass
else:
if accessed:
patch_vary_headers(response, ('Cookie',))
if modified or settings.SESSION_SAVE_EVERY_REQUEST:
if request.session.get_expire_at_browser_close():
max_age = None
expires = None
else:
max_age = request.session.get_expiry_age()
expires_time = time.time() + max_age
expires = cookie_date(expires_time)
# Save the session data and refresh the client cookie.
request.session.save()
response.set_cookie(settings.SESSION_COOKIE_NAME,
request.session.session_key, max_age=max_age,
expires=expires, domain=settings.SESSION_COOKIE_DOMAIN,
path=settings.SESSION_COOKIE_PATH,
secure=settings.SESSION_COOKIE_SECURE or None,
httponly=settings.SESSION_COOKIE_HTTPONLY or None)
return response
| bsd-3-clause |
40223236/40223236-50 | static/Brython3.1.1-20150328-091302/Lib/long_int1/__init__.py | 503 | 3858 | from browser import html, document, window
import javascript
#memorize/cache?
def _get_value(other):
if isinstance(other, LongInt):
return other.value
return other
class BigInt:
def __init__(self):
pass
def __abs__(self):
return LongInt(self.value.abs())
def __add__(self, other):
return LongInt(self.value.plus(_get_value(other)))
def __and__(self, other):
pass
def __divmod__(self, other):
_value=_get_value(other)
return LongInt(self.value.div(_value)), LongInt(self.value.mod(_value))
def __div__(self, other):
return LongInt(self.value.div(_get_value(other)))
def __eq__(self, other):
return bool(self.value.eq(_get_value(other)))
def __floordiv__(self, other):
return LongInt(self.value.div(_get_value(other)).floor())
def __ge__(self, other):
return bool(self.value.gte(_get_value(other)))
def __gt__(self, other):
return bool(self.value.gt(_get_value(other)))
def __index__(self):
if self.value.isInt():
return int(self.value.toNumber())
raise TypeError("This is not an integer")
def __le__(self, other):
return bool(self.value.lte(_get_value(other)))
def __lt__(self, other):
return bool(self.value.lt(_get_value(other)))
def __lshift__(self, shift):
if isinstance(shift, int):
_v=LongInt(2)**shift
return LongInt(self.value.times(_v.value))
def __mod__(self, other):
return LongInt(self.value.mod(_get_value(other)))
def __mul__(self, other):
return LongInt(self.value.times(_get_value(other)))
def __neg__(self, other):
return LongInt(self.value.neg(_get_value(other)))
def __or__(self, other):
pass
def __pow__(self, other):
return LongInt(self.value.pow(_get_value(other)))
def __rshift__(self, other):
pass
def __sub__(self, other):
return LongInt(self.value.minus(_get_value(other)))
def __repr__(self):
return "%s(%s)" % (self.__name__, self.value.toString(10))
def __str__(self):
return "%s(%s)" % (self.__name__, self.value.toString(10))
def __xor__(self, other):
pass
_precision=20
def get_precision(value):
if isinstance(value, LongInt):
return len(str(value.value.toString(10)))
return len(str(value))
class DecimalJS(BigInt):
def __init__(self, value=0, base=10):
global _precision
_prec=get_precision(value)
if _prec > _precision:
_precision=_prec
window.eval('Decimal.precision=%s' % _precision)
self.value=javascript.JSConstructor(window.Decimal)(value, base)
class BigNumberJS(BigInt):
def __init__(self, value=0, base=10):
self.value=javascript.JSConstructor(window.BigNumber)(value, base)
class BigJS(BigInt):
def __init__(self, value=0, base=10):
self.value=javascript.JSConstructor(window.Big)(value, base)
def __floordiv__(self, other):
_v=LongInt(self.value.div(_get_value(other)))
if _v >= 0:
return LongInt(_v.value.round(0, 0)) #round down
return LongInt(_v.value.round(0, 3)) #round up
def __pow__(self, other):
if isinstance(other, LongInt):
_value=int(other.value.toString(10))
elif isinstance(other, str):
_value=int(other)
return LongInt(self.value.pow(_value))
#_path = __file__[:__file__.rfind('/')]+'/'
_path = __BRYTHON__.brython_path + 'Lib/long_int1/'
#to use decimal.js library uncomment these 2 lines
#javascript.load(_path+'decimal.min.js', ['Decimal'])
#LongInt=DecimalJS
#to use bignumber.js library uncomment these 2 lines
javascript.load(_path+'bignumber.min.js', ['BigNumber'])
LongInt=BigNumberJS
#big.js does not have a "base" so only base 10 stuff works.
#to use big.js library uncomment these 2 lines
#javascript.load(_path+'big.min.js', ['Big'])
#LongInt=BigJS
| gpl-3.0 |
edry/edx-platform | common/djangoapps/student/cookies.py | 116 | 5305 | """
Utility functions for setting "logged in" cookies used by subdomains.
"""
import time
import json
from django.utils.http import cookie_date
from django.conf import settings
from django.core.urlresolvers import reverse, NoReverseMatch
def set_logged_in_cookies(request, response, user):
"""
Set cookies indicating that the user is logged in.
Some installations have an external marketing site configured
that displays a different UI when the user is logged in
(e.g. a link to the student dashboard instead of to the login page)
Currently, two cookies are set:
* EDXMKTG_LOGGED_IN_COOKIE_NAME: Set to 'true' if the user is logged in.
* EDXMKTG_USER_INFO_COOKIE_VERSION: JSON-encoded dictionary with user information (see below).
The user info cookie has the following format:
{
"version": 1,
"username": "test-user",
"email": "test-user@example.com",
"header_urls": {
"account_settings": "https://example.com/account/settings",
"learner_profile": "https://example.com/u/test-user",
"logout": "https://example.com/logout"
}
}
Arguments:
request (HttpRequest): The request to the view, used to calculate
the cookie's expiration date based on the session expiration date.
response (HttpResponse): The response on which the cookie will be set.
user (User): The currently logged in user.
Returns:
HttpResponse
"""
if request.session.get_expire_at_browser_close():
max_age = None
expires = None
else:
max_age = request.session.get_expiry_age()
expires_time = time.time() + max_age
expires = cookie_date(expires_time)
cookie_settings = {
'max_age': max_age,
'expires': expires,
'domain': settings.SESSION_COOKIE_DOMAIN,
'path': '/',
'httponly': None,
}
# Backwards compatibility: set the cookie indicating that the user
# is logged in. This is just a boolean value, so it's not very useful.
# In the future, we should be able to replace this with the "user info"
# cookie set below.
response.set_cookie(
settings.EDXMKTG_LOGGED_IN_COOKIE_NAME.encode('utf-8'),
'true',
secure=None,
**cookie_settings
)
# Set a cookie with user info. This can be used by external sites
# to customize content based on user information. Currently,
# we include information that's used to customize the "account"
# links in the header of subdomain sites (such as the marketing site).
header_urls = {'logout': reverse('logout')}
# Unfortunately, this app is currently used by both the LMS and Studio login pages.
# If we're in Studio, we won't be able to reverse the account/profile URLs.
# To handle this, we don't add the URLs if we can't reverse them.
# External sites will need to have fallback mechanisms to handle this case
# (most likely just hiding the links).
try:
header_urls['account_settings'] = reverse('account_settings')
header_urls['learner_profile'] = reverse('learner_profile', kwargs={'username': user.username})
except NoReverseMatch:
pass
# Convert relative URL paths to absolute URIs
for url_name, url_path in header_urls.iteritems():
header_urls[url_name] = request.build_absolute_uri(url_path)
user_info = {
'version': settings.EDXMKTG_USER_INFO_COOKIE_VERSION,
'username': user.username,
'email': user.email,
'header_urls': header_urls,
}
# In production, TLS should be enabled so that this cookie is encrypted
# when we send it. We also need to set "secure" to True so that the browser
# will transmit it only over secure connections.
#
# In non-production environments (acceptance tests, devstack, and sandboxes),
# we still want to set this cookie. However, we do NOT want to set it to "secure"
# because the browser won't send it back to us. This can cause an infinite redirect
# loop in the third-party auth flow, which calls `is_logged_in_cookie_set` to determine
# whether it needs to set the cookie or continue to the next pipeline stage.
user_info_cookie_is_secure = request.is_secure()
response.set_cookie(
settings.EDXMKTG_USER_INFO_COOKIE_NAME.encode('utf-8'),
json.dumps(user_info),
secure=user_info_cookie_is_secure,
**cookie_settings
)
return response
def delete_logged_in_cookies(response):
"""
Delete cookies indicating that the user is logged in.
Arguments:
response (HttpResponse): The response sent to the client.
Returns:
HttpResponse
"""
for cookie_name in [settings.EDXMKTG_LOGGED_IN_COOKIE_NAME, settings.EDXMKTG_USER_INFO_COOKIE_NAME]:
response.delete_cookie(
cookie_name.encode('utf-8'),
path='/',
domain=settings.SESSION_COOKIE_DOMAIN
)
return response
def is_logged_in_cookie_set(request):
"""Check whether the request has logged in cookies set. """
return (
settings.EDXMKTG_LOGGED_IN_COOKIE_NAME in request.COOKIES and
settings.EDXMKTG_USER_INFO_COOKIE_NAME in request.COOKIES
)
| agpl-3.0 |
felixfontein/ansible | lib/ansible/modules/package.py | 15 | 2823 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Ansible Project
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: package
version_added: 2.0
author:
- Ansible Core Team
short_description: Generic OS package manager
description:
- This modules manages packages on a target without specifying a package manager module (like M(ansible.builtin.yum), M(ansible.builtin.apt), ...).
It is convenient to use in an heterogeneous environment of machines without having to create a specific task for
each package manager. `package` calls behind the module for the package manager used by the operating system
discovered by the module M(ansible.builtin.setup). If `setup` was not yet run, `package` will run it.
- This module acts as a proxy to the underlying package manager module. While all arguments will be passed to the
underlying module, not all modules support the same arguments. This documentation only covers the minimum intersection
of module arguments that all packaging modules support.
- For Windows targets, use the M(ansible.windows.win_package) module instead.
options:
name:
description:
- Package name, or package specifier with version.
- Syntax varies with package manager. For example C(name-1.0) or C(name=1.0).
- Package names also vary with package manager; this module will not "translate" them per distro. For example C(libyaml-dev), C(libyaml-devel).
required: true
state:
description:
- Whether to install (C(present)), or remove (C(absent)) a package.
- You can use other states like C(latest) ONLY if they are supported by the underlying package module(s) executed.
required: true
use:
description:
- The required package manager module to use (`yum`, `apt`, and so on). The default 'auto' will use existing facts or try to autodetect it.
- You should only use this field if the automatic selection is not working for some reason.
default: auto
requirements:
- Whatever is required for the package plugins specific for each system.
notes:
- While `package` abstracts package managers to ease dealing with multiple distributions, package name often differs for the same software.
'''
EXAMPLES = '''
- name: Install ntpdate
ansible.builtin.package:
name: ntpdate
state: present
# This uses a variable as this changes per distribution.
- name: Remove the apache package
ansible.builtin.package:
name: "{{ apache }}"
state: absent
- name: Install the latest version of Apache and MariaDB
ansible.builtin.package:
name:
- httpd
- mariadb-server
state: latest
'''
| gpl-3.0 |
vighneshbirodkar/scikit-image | skimage/setup.py | 44 | 1484 | import os
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('skimage', parent_package, top_path)
config.add_subpackage('_shared')
config.add_subpackage('color')
config.add_subpackage('data')
config.add_subpackage('draw')
config.add_subpackage('exposure')
config.add_subpackage('feature')
config.add_subpackage('restoration')
config.add_subpackage('filters')
config.add_subpackage('future')
config.add_subpackage('graph')
config.add_subpackage('io')
config.add_subpackage('measure')
config.add_subpackage('morphology')
config.add_subpackage('transform')
config.add_subpackage('util')
config.add_subpackage('segmentation')
config.add_subpackage('external')
def add_test_directories(arg, dirname, fnames):
if dirname.split(os.path.sep)[-1] == 'tests':
config.add_data_dir(dirname)
# Add test directories
from os.path import isdir, dirname, join
rel_isdir = lambda d: isdir(join(curpath, d))
curpath = join(dirname(__file__), './')
subdirs = [join(d, 'tests') for d in os.listdir(curpath) if rel_isdir(d)]
subdirs = [d for d in subdirs if rel_isdir(d)]
for test_dir in subdirs:
config.add_data_dir(test_dir)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
config = configuration(top_path='').todict()
setup(**config)
| bsd-3-clause |
duducosmos/pgs4a | python-install/lib/python2.7/ftplib.py | 75 | 35728 | """An FTP client class and some helper functions.
Based on RFC 959: File Transfer Protocol (FTP), by J. Postel and J. Reynolds
Example:
>>> from ftplib import FTP
>>> ftp = FTP('ftp.python.org') # connect to host, default port
>>> ftp.login() # default, i.e.: user anonymous, passwd anonymous@
'230 Guest login ok, access restrictions apply.'
>>> ftp.retrlines('LIST') # list directory contents
total 9
drwxr-xr-x 8 root wheel 1024 Jan 3 1994 .
drwxr-xr-x 8 root wheel 1024 Jan 3 1994 ..
drwxr-xr-x 2 root wheel 1024 Jan 3 1994 bin
drwxr-xr-x 2 root wheel 1024 Jan 3 1994 etc
d-wxrwxr-x 2 ftp wheel 1024 Sep 5 13:43 incoming
drwxr-xr-x 2 root wheel 1024 Nov 17 1993 lib
drwxr-xr-x 6 1094 wheel 1024 Sep 13 19:07 pub
drwxr-xr-x 3 root wheel 1024 Jan 3 1994 usr
-rw-r--r-- 1 root root 312 Aug 1 1994 welcome.msg
'226 Transfer complete.'
>>> ftp.quit()
'221 Goodbye.'
>>>
A nice test that reveals some of the network dialogue would be:
python ftplib.py -d localhost -l -p -l
"""
#
# Changes and improvements suggested by Steve Majewski.
# Modified by Jack to work on the mac.
# Modified by Siebren to support docstrings and PASV.
# Modified by Phil Schwartz to add storbinary and storlines callbacks.
# Modified by Giampaolo Rodola' to add TLS support.
#
import os
import sys
# Import SOCKS module if it exists, else standard socket module socket
try:
import SOCKS; socket = SOCKS; del SOCKS # import SOCKS as socket
from socket import getfqdn; socket.getfqdn = getfqdn; del getfqdn
except ImportError:
import socket
from socket import _GLOBAL_DEFAULT_TIMEOUT
__all__ = ["FTP","Netrc"]
# Magic number from <socket.h>
MSG_OOB = 0x1 # Process data out of band
# The standard FTP server control port
FTP_PORT = 21
# Exception raised when an error or invalid response is received
class Error(Exception): pass
class error_reply(Error): pass # unexpected [123]xx reply
class error_temp(Error): pass # 4xx errors
class error_perm(Error): pass # 5xx errors
class error_proto(Error): pass # response does not begin with [1-5]
# All exceptions (hopefully) that may be raised here and that aren't
# (always) programming errors on our side
all_errors = (Error, IOError, EOFError)
# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
CRLF = '\r\n'
# The class itself
class FTP:
'''An FTP client class.
To create a connection, call the class using these arguments:
host, user, passwd, acct, timeout
The first four arguments are all strings, and have default value ''.
timeout must be numeric and defaults to None if not passed,
meaning that no timeout will be set on any ftp socket(s)
If a timeout is passed, then this is now the default timeout for all ftp
socket operations for this instance.
Then use self.connect() with optional host and port argument.
To download a file, use ftp.retrlines('RETR ' + filename),
or ftp.retrbinary() with slightly different arguments.
To upload a file, use ftp.storlines() or ftp.storbinary(),
which have an open file as argument (see their definitions
below for details).
The download/upload functions first issue appropriate TYPE
and PORT or PASV commands.
'''
debugging = 0
host = ''
port = FTP_PORT
sock = None
file = None
welcome = None
passiveserver = 1
# Initialization method (called by class instantiation).
# Initialize host to localhost, port to standard ftp port
# Optional arguments are host (for connect()),
# and user, passwd, acct (for login())
def __init__(self, host='', user='', passwd='', acct='',
timeout=_GLOBAL_DEFAULT_TIMEOUT):
self.timeout = timeout
if host:
self.connect(host)
if user:
self.login(user, passwd, acct)
def connect(self, host='', port=0, timeout=-999):
'''Connect to host. Arguments are:
- host: hostname to connect to (string, default previous host)
- port: port to connect to (integer, default previous port)
'''
if host != '':
self.host = host
if port > 0:
self.port = port
if timeout != -999:
self.timeout = timeout
self.sock = socket.create_connection((self.host, self.port), self.timeout)
self.af = self.sock.family
self.file = self.sock.makefile('rb')
self.welcome = self.getresp()
return self.welcome
def getwelcome(self):
'''Get the welcome message from the server.
(this is read and squirreled away by connect())'''
if self.debugging:
print '*welcome*', self.sanitize(self.welcome)
return self.welcome
def set_debuglevel(self, level):
'''Set the debugging level.
The required argument level means:
0: no debugging output (default)
1: print commands and responses but not body text etc.
2: also print raw lines read and sent before stripping CR/LF'''
self.debugging = level
debug = set_debuglevel
def set_pasv(self, val):
'''Use passive or active mode for data transfers.
With a false argument, use the normal PORT mode,
With a true argument, use the PASV command.'''
self.passiveserver = val
# Internal: "sanitize" a string for printing
def sanitize(self, s):
if s[:5] == 'pass ' or s[:5] == 'PASS ':
i = len(s)
while i > 5 and s[i-1] in '\r\n':
i = i-1
s = s[:5] + '*'*(i-5) + s[i:]
return repr(s)
# Internal: send one line to the server, appending CRLF
def putline(self, line):
line = line + CRLF
if self.debugging > 1: print '*put*', self.sanitize(line)
self.sock.sendall(line)
# Internal: send one command to the server (through putline())
def putcmd(self, line):
if self.debugging: print '*cmd*', self.sanitize(line)
self.putline(line)
# Internal: return one line from the server, stripping CRLF.
# Raise EOFError if the connection is closed
def getline(self):
line = self.file.readline()
if self.debugging > 1:
print '*get*', self.sanitize(line)
if not line: raise EOFError
if line[-2:] == CRLF: line = line[:-2]
elif line[-1:] in CRLF: line = line[:-1]
return line
# Internal: get a response from the server, which may possibly
# consist of multiple lines. Return a single string with no
# trailing CRLF. If the response consists of multiple lines,
# these are separated by '\n' characters in the string
def getmultiline(self):
line = self.getline()
if line[3:4] == '-':
code = line[:3]
while 1:
nextline = self.getline()
line = line + ('\n' + nextline)
if nextline[:3] == code and \
nextline[3:4] != '-':
break
return line
# Internal: get a response from the server.
# Raise various errors if the response indicates an error
def getresp(self):
resp = self.getmultiline()
if self.debugging: print '*resp*', self.sanitize(resp)
self.lastresp = resp[:3]
c = resp[:1]
if c in ('1', '2', '3'):
return resp
if c == '4':
raise error_temp, resp
if c == '5':
raise error_perm, resp
raise error_proto, resp
def voidresp(self):
"""Expect a response beginning with '2'."""
resp = self.getresp()
if resp[:1] != '2':
raise error_reply, resp
return resp
def abort(self):
'''Abort a file transfer. Uses out-of-band data.
This does not follow the procedure from the RFC to send Telnet
IP and Synch; that doesn't seem to work with the servers I've
tried. Instead, just send the ABOR command as OOB data.'''
line = 'ABOR' + CRLF
if self.debugging > 1: print '*put urgent*', self.sanitize(line)
self.sock.sendall(line, MSG_OOB)
resp = self.getmultiline()
if resp[:3] not in ('426', '225', '226'):
raise error_proto, resp
def sendcmd(self, cmd):
'''Send a command and return the response.'''
self.putcmd(cmd)
return self.getresp()
def voidcmd(self, cmd):
"""Send a command and expect a response beginning with '2'."""
self.putcmd(cmd)
return self.voidresp()
def sendport(self, host, port):
'''Send a PORT command with the current host and the given
port number.
'''
hbytes = host.split('.')
pbytes = [repr(port//256), repr(port%256)]
bytes = hbytes + pbytes
cmd = 'PORT ' + ','.join(bytes)
return self.voidcmd(cmd)
def sendeprt(self, host, port):
'''Send a EPRT command with the current host and the given port number.'''
af = 0
if self.af == socket.AF_INET:
af = 1
if self.af == socket.AF_INET6:
af = 2
if af == 0:
raise error_proto, 'unsupported address family'
fields = ['', repr(af), host, repr(port), '']
cmd = 'EPRT ' + '|'.join(fields)
return self.voidcmd(cmd)
def makeport(self):
'''Create a new socket and send a PORT command for it.'''
msg = "getaddrinfo returns an empty list"
sock = None
for res in socket.getaddrinfo(None, 0, self.af, socket.SOCK_STREAM, 0, socket.AI_PASSIVE):
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
sock.bind(sa)
except socket.error, msg:
if sock:
sock.close()
sock = None
continue
break
if not sock:
raise socket.error, msg
sock.listen(1)
port = sock.getsockname()[1] # Get proper port
host = self.sock.getsockname()[0] # Get proper host
if self.af == socket.AF_INET:
resp = self.sendport(host, port)
else:
resp = self.sendeprt(host, port)
if self.timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(self.timeout)
return sock
def makepasv(self):
if self.af == socket.AF_INET:
host, port = parse227(self.sendcmd('PASV'))
else:
host, port = parse229(self.sendcmd('EPSV'), self.sock.getpeername())
return host, port
def ntransfercmd(self, cmd, rest=None):
"""Initiate a transfer over the data connection.
If the transfer is active, send a port command and the
transfer command, and accept the connection. If the server is
passive, send a pasv command, connect to it, and start the
transfer command. Either way, return the socket for the
connection and the expected size of the transfer. The
expected size may be None if it could not be determined.
Optional `rest' argument can be a string that is sent as the
argument to a REST command. This is essentially a server
marker used to tell the server to skip over any data up to the
given marker.
"""
size = None
if self.passiveserver:
host, port = self.makepasv()
conn = socket.create_connection((host, port), self.timeout)
if rest is not None:
self.sendcmd("REST %s" % rest)
resp = self.sendcmd(cmd)
# Some servers apparently send a 200 reply to
# a LIST or STOR command, before the 150 reply
# (and way before the 226 reply). This seems to
# be in violation of the protocol (which only allows
# 1xx or error messages for LIST), so we just discard
# this response.
if resp[0] == '2':
resp = self.getresp()
if resp[0] != '1':
raise error_reply, resp
else:
sock = self.makeport()
if rest is not None:
self.sendcmd("REST %s" % rest)
resp = self.sendcmd(cmd)
# See above.
if resp[0] == '2':
resp = self.getresp()
if resp[0] != '1':
raise error_reply, resp
conn, sockaddr = sock.accept()
if self.timeout is not _GLOBAL_DEFAULT_TIMEOUT:
conn.settimeout(self.timeout)
if resp[:3] == '150':
# this is conditional in case we received a 125
size = parse150(resp)
return conn, size
def transfercmd(self, cmd, rest=None):
"""Like ntransfercmd() but returns only the socket."""
return self.ntransfercmd(cmd, rest)[0]
def login(self, user = '', passwd = '', acct = ''):
'''Login, default anonymous.'''
if not user: user = 'anonymous'
if not passwd: passwd = ''
if not acct: acct = ''
if user == 'anonymous' and passwd in ('', '-'):
# If there is no anonymous ftp password specified
# then we'll just use anonymous@
# We don't send any other thing because:
# - We want to remain anonymous
# - We want to stop SPAM
# - We don't want to let ftp sites to discriminate by the user,
# host or country.
passwd = passwd + 'anonymous@'
resp = self.sendcmd('USER ' + user)
if resp[0] == '3': resp = self.sendcmd('PASS ' + passwd)
if resp[0] == '3': resp = self.sendcmd('ACCT ' + acct)
if resp[0] != '2':
raise error_reply, resp
return resp
def retrbinary(self, cmd, callback, blocksize=8192, rest=None):
"""Retrieve data in binary mode. A new port is created for you.
Args:
cmd: A RETR command.
callback: A single parameter callable to be called on each
block of data read.
blocksize: The maximum number of bytes to read from the
socket at one time. [default: 8192]
rest: Passed to transfercmd(). [default: None]
Returns:
The response code.
"""
self.voidcmd('TYPE I')
conn = self.transfercmd(cmd, rest)
while 1:
data = conn.recv(blocksize)
if not data:
break
callback(data)
conn.close()
return self.voidresp()
def retrlines(self, cmd, callback = None):
"""Retrieve data in line mode. A new port is created for you.
Args:
cmd: A RETR, LIST, NLST, or MLSD command.
callback: An optional single parameter callable that is called
for each line with the trailing CRLF stripped.
[default: print_line()]
Returns:
The response code.
"""
if callback is None: callback = print_line
resp = self.sendcmd('TYPE A')
conn = self.transfercmd(cmd)
fp = conn.makefile('rb')
while 1:
line = fp.readline()
if self.debugging > 2: print '*retr*', repr(line)
if not line:
break
if line[-2:] == CRLF:
line = line[:-2]
elif line[-1:] == '\n':
line = line[:-1]
callback(line)
fp.close()
conn.close()
return self.voidresp()
def storbinary(self, cmd, fp, blocksize=8192, callback=None, rest=None):
"""Store a file in binary mode. A new port is created for you.
Args:
cmd: A STOR command.
fp: A file-like object with a read(num_bytes) method.
blocksize: The maximum data size to read from fp and send over
the connection at once. [default: 8192]
callback: An optional single parameter callable that is called on
on each block of data after it is sent. [default: None]
rest: Passed to transfercmd(). [default: None]
Returns:
The response code.
"""
self.voidcmd('TYPE I')
conn = self.transfercmd(cmd, rest)
while 1:
buf = fp.read(blocksize)
if not buf: break
conn.sendall(buf)
if callback: callback(buf)
conn.close()
return self.voidresp()
def storlines(self, cmd, fp, callback=None):
"""Store a file in line mode. A new port is created for you.
Args:
cmd: A STOR command.
fp: A file-like object with a readline() method.
callback: An optional single parameter callable that is called on
on each line after it is sent. [default: None]
Returns:
The response code.
"""
self.voidcmd('TYPE A')
conn = self.transfercmd(cmd)
while 1:
buf = fp.readline()
if not buf: break
if buf[-2:] != CRLF:
if buf[-1] in CRLF: buf = buf[:-1]
buf = buf + CRLF
conn.sendall(buf)
if callback: callback(buf)
conn.close()
return self.voidresp()
def acct(self, password):
'''Send new account name.'''
cmd = 'ACCT ' + password
return self.voidcmd(cmd)
def nlst(self, *args):
'''Return a list of files in a given directory (default the current).'''
cmd = 'NLST'
for arg in args:
cmd = cmd + (' ' + arg)
files = []
self.retrlines(cmd, files.append)
return files
def dir(self, *args):
'''List a directory in long form.
By default list current directory to stdout.
Optional last argument is callback function; all
non-empty arguments before it are concatenated to the
LIST command. (This *should* only be used for a pathname.)'''
cmd = 'LIST'
func = None
if args[-1:] and type(args[-1]) != type(''):
args, func = args[:-1], args[-1]
for arg in args:
if arg:
cmd = cmd + (' ' + arg)
self.retrlines(cmd, func)
def rename(self, fromname, toname):
'''Rename a file.'''
resp = self.sendcmd('RNFR ' + fromname)
if resp[0] != '3':
raise error_reply, resp
return self.voidcmd('RNTO ' + toname)
def delete(self, filename):
'''Delete a file.'''
resp = self.sendcmd('DELE ' + filename)
if resp[:3] in ('250', '200'):
return resp
else:
raise error_reply, resp
def cwd(self, dirname):
'''Change to a directory.'''
if dirname == '..':
try:
return self.voidcmd('CDUP')
except error_perm, msg:
if msg.args[0][:3] != '500':
raise
elif dirname == '':
dirname = '.' # does nothing, but could return error
cmd = 'CWD ' + dirname
return self.voidcmd(cmd)
def size(self, filename):
'''Retrieve the size of a file.'''
# The SIZE command is defined in RFC-3659
resp = self.sendcmd('SIZE ' + filename)
if resp[:3] == '213':
s = resp[3:].strip()
try:
return int(s)
except (OverflowError, ValueError):
return long(s)
def mkd(self, dirname):
'''Make a directory, return its full pathname.'''
resp = self.sendcmd('MKD ' + dirname)
return parse257(resp)
def rmd(self, dirname):
'''Remove a directory.'''
return self.voidcmd('RMD ' + dirname)
def pwd(self):
'''Return current working directory.'''
resp = self.sendcmd('PWD')
return parse257(resp)
def quit(self):
'''Quit, and close the connection.'''
resp = self.voidcmd('QUIT')
self.close()
return resp
def close(self):
'''Close the connection without assuming anything about it.'''
if self.file:
self.file.close()
self.sock.close()
self.file = self.sock = None
try:
import ssl
except ImportError:
pass
else:
class FTP_TLS(FTP):
'''A FTP subclass which adds TLS support to FTP as described
in RFC-4217.
Connect as usual to port 21 implicitly securing the FTP control
connection before authenticating.
Securing the data connection requires user to explicitly ask
for it by calling prot_p() method.
Usage example:
>>> from ftplib import FTP_TLS
>>> ftps = FTP_TLS('ftp.python.org')
>>> ftps.login() # login anonymously previously securing control channel
'230 Guest login ok, access restrictions apply.'
>>> ftps.prot_p() # switch to secure data connection
'200 Protection level set to P'
>>> ftps.retrlines('LIST') # list directory content securely
total 9
drwxr-xr-x 8 root wheel 1024 Jan 3 1994 .
drwxr-xr-x 8 root wheel 1024 Jan 3 1994 ..
drwxr-xr-x 2 root wheel 1024 Jan 3 1994 bin
drwxr-xr-x 2 root wheel 1024 Jan 3 1994 etc
d-wxrwxr-x 2 ftp wheel 1024 Sep 5 13:43 incoming
drwxr-xr-x 2 root wheel 1024 Nov 17 1993 lib
drwxr-xr-x 6 1094 wheel 1024 Sep 13 19:07 pub
drwxr-xr-x 3 root wheel 1024 Jan 3 1994 usr
-rw-r--r-- 1 root root 312 Aug 1 1994 welcome.msg
'226 Transfer complete.'
>>> ftps.quit()
'221 Goodbye.'
>>>
'''
ssl_version = ssl.PROTOCOL_TLSv1
def __init__(self, host='', user='', passwd='', acct='', keyfile=None,
certfile=None, timeout=_GLOBAL_DEFAULT_TIMEOUT):
self.keyfile = keyfile
self.certfile = certfile
self._prot_p = False
FTP.__init__(self, host, user, passwd, acct, timeout)
def login(self, user='', passwd='', acct='', secure=True):
if secure and not isinstance(self.sock, ssl.SSLSocket):
self.auth()
return FTP.login(self, user, passwd, acct)
def auth(self):
'''Set up secure control connection by using TLS/SSL.'''
if isinstance(self.sock, ssl.SSLSocket):
raise ValueError("Already using TLS")
if self.ssl_version == ssl.PROTOCOL_TLSv1:
resp = self.voidcmd('AUTH TLS')
else:
resp = self.voidcmd('AUTH SSL')
self.sock = ssl.wrap_socket(self.sock, self.keyfile, self.certfile,
ssl_version=self.ssl_version)
self.file = self.sock.makefile(mode='rb')
return resp
def prot_p(self):
'''Set up secure data connection.'''
# PROT defines whether or not the data channel is to be protected.
# Though RFC-2228 defines four possible protection levels,
# RFC-4217 only recommends two, Clear and Private.
# Clear (PROT C) means that no security is to be used on the
# data-channel, Private (PROT P) means that the data-channel
# should be protected by TLS.
# PBSZ command MUST still be issued, but must have a parameter of
# '0' to indicate that no buffering is taking place and the data
# connection should not be encapsulated.
self.voidcmd('PBSZ 0')
resp = self.voidcmd('PROT P')
self._prot_p = True
return resp
def prot_c(self):
'''Set up clear text data connection.'''
resp = self.voidcmd('PROT C')
self._prot_p = False
return resp
# --- Overridden FTP methods
def ntransfercmd(self, cmd, rest=None):
conn, size = FTP.ntransfercmd(self, cmd, rest)
if self._prot_p:
conn = ssl.wrap_socket(conn, self.keyfile, self.certfile,
ssl_version=self.ssl_version)
return conn, size
def retrbinary(self, cmd, callback, blocksize=8192, rest=None):
self.voidcmd('TYPE I')
conn = self.transfercmd(cmd, rest)
try:
while 1:
data = conn.recv(blocksize)
if not data:
break
callback(data)
# shutdown ssl layer
if isinstance(conn, ssl.SSLSocket):
conn.unwrap()
finally:
conn.close()
return self.voidresp()
def retrlines(self, cmd, callback = None):
if callback is None: callback = print_line
resp = self.sendcmd('TYPE A')
conn = self.transfercmd(cmd)
fp = conn.makefile('rb')
try:
while 1:
line = fp.readline()
if self.debugging > 2: print '*retr*', repr(line)
if not line:
break
if line[-2:] == CRLF:
line = line[:-2]
elif line[-1:] == '\n':
line = line[:-1]
callback(line)
# shutdown ssl layer
if isinstance(conn, ssl.SSLSocket):
conn.unwrap()
finally:
fp.close()
conn.close()
return self.voidresp()
def storbinary(self, cmd, fp, blocksize=8192, callback=None, rest=None):
self.voidcmd('TYPE I')
conn = self.transfercmd(cmd, rest)
try:
while 1:
buf = fp.read(blocksize)
if not buf: break
conn.sendall(buf)
if callback: callback(buf)
# shutdown ssl layer
if isinstance(conn, ssl.SSLSocket):
conn.unwrap()
finally:
conn.close()
return self.voidresp()
def storlines(self, cmd, fp, callback=None):
self.voidcmd('TYPE A')
conn = self.transfercmd(cmd)
try:
while 1:
buf = fp.readline()
if not buf: break
if buf[-2:] != CRLF:
if buf[-1] in CRLF: buf = buf[:-1]
buf = buf + CRLF
conn.sendall(buf)
if callback: callback(buf)
# shutdown ssl layer
if isinstance(conn, ssl.SSLSocket):
conn.unwrap()
finally:
conn.close()
return self.voidresp()
__all__.append('FTP_TLS')
all_errors = (Error, IOError, EOFError, ssl.SSLError)
_150_re = None
def parse150(resp):
'''Parse the '150' response for a RETR request.
Returns the expected transfer size or None; size is not guaranteed to
be present in the 150 message.
'''
if resp[:3] != '150':
raise error_reply, resp
global _150_re
if _150_re is None:
import re
_150_re = re.compile("150 .* \((\d+) bytes\)", re.IGNORECASE)
m = _150_re.match(resp)
if not m:
return None
s = m.group(1)
try:
return int(s)
except (OverflowError, ValueError):
return long(s)
_227_re = None
def parse227(resp):
'''Parse the '227' response for a PASV request.
Raises error_proto if it does not contain '(h1,h2,h3,h4,p1,p2)'
Return ('host.addr.as.numbers', port#) tuple.'''
if resp[:3] != '227':
raise error_reply, resp
global _227_re
if _227_re is None:
import re
_227_re = re.compile(r'(\d+),(\d+),(\d+),(\d+),(\d+),(\d+)')
m = _227_re.search(resp)
if not m:
raise error_proto, resp
numbers = m.groups()
host = '.'.join(numbers[:4])
port = (int(numbers[4]) << 8) + int(numbers[5])
return host, port
def parse229(resp, peer):
'''Parse the '229' response for a EPSV request.
Raises error_proto if it does not contain '(|||port|)'
Return ('host.addr.as.numbers', port#) tuple.'''
if resp[:3] != '229':
raise error_reply, resp
left = resp.find('(')
if left < 0: raise error_proto, resp
right = resp.find(')', left + 1)
if right < 0:
raise error_proto, resp # should contain '(|||port|)'
if resp[left + 1] != resp[right - 1]:
raise error_proto, resp
parts = resp[left + 1:right].split(resp[left+1])
if len(parts) != 5:
raise error_proto, resp
host = peer[0]
port = int(parts[3])
return host, port
def parse257(resp):
'''Parse the '257' response for a MKD or PWD request.
This is a response to a MKD or PWD request: a directory name.
Returns the directoryname in the 257 reply.'''
if resp[:3] != '257':
raise error_reply, resp
if resp[3:5] != ' "':
return '' # Not compliant to RFC 959, but UNIX ftpd does this
dirname = ''
i = 5
n = len(resp)
while i < n:
c = resp[i]
i = i+1
if c == '"':
if i >= n or resp[i] != '"':
break
i = i+1
dirname = dirname + c
return dirname
def print_line(line):
'''Default retrlines callback to print a line.'''
print line
def ftpcp(source, sourcename, target, targetname = '', type = 'I'):
'''Copy file from one FTP-instance to another.'''
if not targetname: targetname = sourcename
type = 'TYPE ' + type
source.voidcmd(type)
target.voidcmd(type)
sourcehost, sourceport = parse227(source.sendcmd('PASV'))
target.sendport(sourcehost, sourceport)
# RFC 959: the user must "listen" [...] BEFORE sending the
# transfer request.
# So: STOR before RETR, because here the target is a "user".
treply = target.sendcmd('STOR ' + targetname)
if treply[:3] not in ('125', '150'): raise error_proto # RFC 959
sreply = source.sendcmd('RETR ' + sourcename)
if sreply[:3] not in ('125', '150'): raise error_proto # RFC 959
source.voidresp()
target.voidresp()
class Netrc:
"""Class to parse & provide access to 'netrc' format files.
See the netrc(4) man page for information on the file format.
WARNING: This class is obsolete -- use module netrc instead.
"""
__defuser = None
__defpasswd = None
__defacct = None
def __init__(self, filename=None):
if filename is None:
if "HOME" in os.environ:
filename = os.path.join(os.environ["HOME"],
".netrc")
else:
raise IOError, \
"specify file to load or set $HOME"
self.__hosts = {}
self.__macros = {}
fp = open(filename, "r")
in_macro = 0
while 1:
line = fp.readline()
if not line: break
if in_macro and line.strip():
macro_lines.append(line)
continue
elif in_macro:
self.__macros[macro_name] = tuple(macro_lines)
in_macro = 0
words = line.split()
host = user = passwd = acct = None
default = 0
i = 0
while i < len(words):
w1 = words[i]
if i+1 < len(words):
w2 = words[i + 1]
else:
w2 = None
if w1 == 'default':
default = 1
elif w1 == 'machine' and w2:
host = w2.lower()
i = i + 1
elif w1 == 'login' and w2:
user = w2
i = i + 1
elif w1 == 'password' and w2:
passwd = w2
i = i + 1
elif w1 == 'account' and w2:
acct = w2
i = i + 1
elif w1 == 'macdef' and w2:
macro_name = w2
macro_lines = []
in_macro = 1
break
i = i + 1
if default:
self.__defuser = user or self.__defuser
self.__defpasswd = passwd or self.__defpasswd
self.__defacct = acct or self.__defacct
if host:
if host in self.__hosts:
ouser, opasswd, oacct = \
self.__hosts[host]
user = user or ouser
passwd = passwd or opasswd
acct = acct or oacct
self.__hosts[host] = user, passwd, acct
fp.close()
def get_hosts(self):
"""Return a list of hosts mentioned in the .netrc file."""
return self.__hosts.keys()
def get_account(self, host):
"""Returns login information for the named host.
The return value is a triple containing userid,
password, and the accounting field.
"""
host = host.lower()
user = passwd = acct = None
if host in self.__hosts:
user, passwd, acct = self.__hosts[host]
user = user or self.__defuser
passwd = passwd or self.__defpasswd
acct = acct or self.__defacct
return user, passwd, acct
def get_macros(self):
"""Return a list of all defined macro names."""
return self.__macros.keys()
def get_macro(self, macro):
"""Return a sequence of lines which define a named macro."""
return self.__macros[macro]
def test():
'''Test program.
Usage: ftp [-d] [-r[file]] host [-l[dir]] [-d[dir]] [-p] [file] ...
-d dir
-l list
-p password
'''
if len(sys.argv) < 2:
print test.__doc__
sys.exit(0)
debugging = 0
rcfile = None
while sys.argv[1] == '-d':
debugging = debugging+1
del sys.argv[1]
if sys.argv[1][:2] == '-r':
# get name of alternate ~/.netrc file:
rcfile = sys.argv[1][2:]
del sys.argv[1]
host = sys.argv[1]
ftp = FTP(host)
ftp.set_debuglevel(debugging)
userid = passwd = acct = ''
try:
netrc = Netrc(rcfile)
except IOError:
if rcfile is not None:
sys.stderr.write("Could not open account file"
" -- using anonymous login.")
else:
try:
userid, passwd, acct = netrc.get_account(host)
except KeyError:
# no account for host
sys.stderr.write(
"No account -- using anonymous login.")
ftp.login(userid, passwd, acct)
for file in sys.argv[2:]:
if file[:2] == '-l':
ftp.dir(file[2:])
elif file[:2] == '-d':
cmd = 'CWD'
if file[2:]: cmd = cmd + ' ' + file[2:]
resp = ftp.sendcmd(cmd)
elif file == '-p':
ftp.set_pasv(not ftp.passiveserver)
else:
ftp.retrbinary('RETR ' + file, \
sys.stdout.write, 1024)
ftp.quit()
if __name__ == '__main__':
test()
| lgpl-2.1 |
xiahei/Daily_scripts | Baidu/baidu_count_login.py | 2 | 4106 | #!/usr/bin/env python
# coding:utf-8
try:
import cookielib
except:
import http.cookiejar as cookielib
import re
import requests
from prettytable import PrettyTable
from bs4 import BeautifulSoup
def get_cookies():
"""获取百度 cookies, 并写入文件."""
headers = {
"User-Agent": (
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36"
"(KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36"
)
}
save_cookies_file = 'cookies.txt'
jar = cookielib.LWPCookieJar(save_cookies_file)
sess = requests.session()
sess.headers = headers
sess.cookies = jar
sess.get('http://tieba.baidu.com/')
jar.save(ignore_expires=True, ignore_discard=True)
return jar
class Baidu(object):
"""贴吧签到"""
sign_url = (
"http://tieba.baidu.com/mo/m/sign?"
"tbs=79f03dacf896e9fc1466052875&fid=552164&kw="
) # 签到url
def __init__(self, cookies):
self.sess = requests.Session()
self.sess.headers = {"Cookie": cookies}
def get_token(self):
"""获取token参数"""
url_to_token = 'https://passport.baidu.com/v2/api/?getapi&tpl=tb&apiver=v3'
response = self.sess.get(url_to_token)
json = response.text
token = re.findall('token\" : "(\w+)\",', json)[0]
return token
def login(self, token, usrname, pswd, cookie):
"""登录并返回状态"""
form_data = {
"token": token,
"tpl": 'tb',
"loginmerge": "true", # 必要必要必要参数!!!!
"username": usrname,
"password": pswd
}
login_url = 'https://passport.baidu.com/v2/api/?login'
sess = requests.session()
sess.post(login_url, data=form_data, cookies=cookie)
usr_info = sess.get('http://tieba.baidu.com/f/user/json_userinfo').text
if usr_info == 'null':
print('登录失败!')
exit(0)
else:
print('登录成功!')
return sess
def sign_single_ba(self, kw):
"""单个吧签到"""
url = self.sign_url + kw
html = self.sess.get(url, timeout=30).text
soup = BeautifulSoup(html, 'html.parser')
status = soup.select('body > div > span')[0].text
return status
def get_info(self, sess):
"""获取个人关注贴吧,以及各贴吧经验,等级并返回"""
myFavor = 'http://tieba.baidu.com/mo/m?tn=bdFBW&tab=favorite'
html = self.sess.get(myFavor).text
soup = BeautifulSoup(html, 'html.parser')
allLabel = soup.find_all('td')
kws = [item.text.split('.')[-1] for
item in allLabel[::3]]
levels = [item.text for item in allLabel[1::3]]
exercises = [item.text for item in allLabel[2::3]]
return [kws, levels, exercises]
def sign_all_ba(self):
"""每个页的每个贴吧签到"""
table = PrettyTable([u'贴吧', u'签到状态'])
table.padding_width = 2
kws = self.get_info(self.sess)[0]
for index, kw in enumerate(kws):
try:
status = self.sign_single_ba(kw)
except IndexError:
status = u'签到异常.'
print(u'{0} {1}'.format(kw, status))
table.add_row([kw, status])
temp = self.get_info(self.sess)
levels = temp[1]
exercises = temp[2]
table.add_column(u'经验', exercises)
table.add_column(u'等级', levels)
print(table)
print(u'共{0}个吧'.format(len(levels)))
def start(usrname, pswd):
# cookie = get_cookies()
cookie = open("cookies", 'r').read()
tieba = Baidu(cookie)
# token = tieba.get_token()
# res = tieba.login(token, usrname, pswd, cookie)
tieba.sign_all_ba()
if __name__ == '__main__':
# try:
# usrname = raw_input('手机/邮箱/用户名: ')
# pswd = raw_input('密码: ')
# except:
# usrname = input('手机/邮箱/用户名: ')
# pswd = input('密码: ')
start(1, 0)
| mit |
aljscott/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/thirdparty/ordered_dict.py | 131 | 2984 | # Copyright (c) 2009 Raymond Hettinger.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This code is obtained from http://code.activestate.com/recipes/576669/
from collections import MutableMapping
class OrderedDict(dict, MutableMapping):
# Methods with direct access to underlying attributes
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at 1 argument, got %d', len(args))
if not hasattr(self, '_keys'):
self._keys = []
self.update(*args, **kwds)
def clear(self):
del self._keys[:]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
self._keys.append(key)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
self._keys.remove(key)
def __iter__(self):
return iter(self._keys)
def __reversed__(self):
return reversed(self._keys)
def popitem(self):
if not self:
raise KeyError
key = self._keys.pop()
value = dict.pop(self, key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
inst_dict.pop('_keys', None)
return (self.__class__, (items,), inst_dict)
# Methods with indirect access via the above methods
setdefault = MutableMapping.setdefault
update = MutableMapping.update
pop = MutableMapping.pop
keys = MutableMapping.keys
values = MutableMapping.values
items = MutableMapping.items
def __repr__(self):
pairs = ', '.join(map('%r: %r'.__mod__, self.items()))
return '%s({%s})' % (self.__class__.__name__, pairs)
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
| bsd-3-clause |
richardcs/ansible | lib/ansible/modules/network/avi/avi_systemconfiguration.py | 31 | 6594 | #!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_systemconfiguration
author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com>
short_description: Module for setup of SystemConfiguration Avi RESTful Object
description:
- This module is used to configure SystemConfiguration object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
admin_auth_configuration:
description:
- Adminauthconfiguration settings for systemconfiguration.
default_license_tier:
description:
- Specifies the default license tier which would be used by new clouds.
- Enum options - ENTERPRISE_16, ENTERPRISE_18.
- Field introduced in 17.2.5.
- Default value when not specified in API or module is interpreted by Avi Controller as ENTERPRISE_18.
version_added: "2.5"
dns_configuration:
description:
- Dnsconfiguration settings for systemconfiguration.
dns_virtualservice_refs:
description:
- Dns virtualservices hosting fqdn records for applications across avi vantage.
- If no virtualservices are provided, avi vantage will provide dns services for configured applications.
- Switching back to avi vantage from dns virtualservices is not allowed.
- It is a reference to an object of type virtualservice.
docker_mode:
description:
- Boolean flag to set docker_mode.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
email_configuration:
description:
- Emailconfiguration settings for systemconfiguration.
global_tenant_config:
description:
- Tenantconfiguration settings for systemconfiguration.
linux_configuration:
description:
- Linuxconfiguration settings for systemconfiguration.
mgmt_ip_access_control:
description:
- Configure ip access control for controller to restrict open access.
ntp_configuration:
description:
- Ntpconfiguration settings for systemconfiguration.
portal_configuration:
description:
- Portalconfiguration settings for systemconfiguration.
proxy_configuration:
description:
- Proxyconfiguration settings for systemconfiguration.
snmp_configuration:
description:
- Snmpconfiguration settings for systemconfiguration.
ssh_ciphers:
description:
- Allowed ciphers list for ssh to the management interface on the controller and service engines.
- If this is not specified, all the default ciphers are allowed.
- Ssh -q cipher provides the list of default ciphers supported.
ssh_hmacs:
description:
- Allowed hmac list for ssh to the management interface on the controller and service engines.
- If this is not specified, all the default hmacs are allowed.
- Ssh -q mac provides the list of default hmacs supported.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create SystemConfiguration object
avi_systemconfiguration:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_systemconfiguration
"""
RETURN = '''
obj:
description: SystemConfiguration (api/systemconfiguration) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
admin_auth_configuration=dict(type='dict',),
default_license_tier=dict(type='str',),
dns_configuration=dict(type='dict',),
dns_virtualservice_refs=dict(type='list',),
docker_mode=dict(type='bool',),
email_configuration=dict(type='dict',),
global_tenant_config=dict(type='dict',),
linux_configuration=dict(type='dict',),
mgmt_ip_access_control=dict(type='dict',),
ntp_configuration=dict(type='dict',),
portal_configuration=dict(type='dict',),
proxy_configuration=dict(type='dict',),
snmp_configuration=dict(type='dict',),
ssh_ciphers=dict(type='list',),
ssh_hmacs=dict(type='list',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'systemconfiguration',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
jmankoff/data | Assignments/jmankoff-mobile/lib/flask/config.py | 781 | 6234 | # -*- coding: utf-8 -*-
"""
flask.config
~~~~~~~~~~~~
Implements the configuration related objects.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import imp
import os
import errno
from werkzeug.utils import import_string
from ._compat import string_types
class ConfigAttribute(object):
"""Makes an attribute forward to the config"""
def __init__(self, name, get_converter=None):
self.__name__ = name
self.get_converter = get_converter
def __get__(self, obj, type=None):
if obj is None:
return self
rv = obj.config[self.__name__]
if self.get_converter is not None:
rv = self.get_converter(rv)
return rv
def __set__(self, obj, value):
obj.config[self.__name__] = value
class Config(dict):
"""Works exactly like a dict but provides ways to fill it from files
or special dictionaries. There are two common patterns to populate the
config.
Either you can fill the config from a config file::
app.config.from_pyfile('yourconfig.cfg')
Or alternatively you can define the configuration options in the
module that calls :meth:`from_object` or provide an import path to
a module that should be loaded. It is also possible to tell it to
use the same module and with that provide the configuration values
just before the call::
DEBUG = True
SECRET_KEY = 'development key'
app.config.from_object(__name__)
In both cases (loading from any Python file or loading from modules),
only uppercase keys are added to the config. This makes it possible to use
lowercase values in the config file for temporary values that are not added
to the config or to define the config keys in the same file that implements
the application.
Probably the most interesting way to load configurations is from an
environment variable pointing to a file::
app.config.from_envvar('YOURAPPLICATION_SETTINGS')
In this case before launching the application you have to set this
environment variable to the file you want to use. On Linux and OS X
use the export statement::
export YOURAPPLICATION_SETTINGS='/path/to/config/file'
On windows use `set` instead.
:param root_path: path to which files are read relative from. When the
config object is created by the application, this is
the application's :attr:`~flask.Flask.root_path`.
:param defaults: an optional dictionary of default values
"""
def __init__(self, root_path, defaults=None):
dict.__init__(self, defaults or {})
self.root_path = root_path
def from_envvar(self, variable_name, silent=False):
"""Loads a configuration from an environment variable pointing to
a configuration file. This is basically just a shortcut with nicer
error messages for this line of code::
app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS'])
:param variable_name: name of the environment variable
:param silent: set to `True` if you want silent failure for missing
files.
:return: bool. `True` if able to load config, `False` otherwise.
"""
rv = os.environ.get(variable_name)
if not rv:
if silent:
return False
raise RuntimeError('The environment variable %r is not set '
'and as such configuration could not be '
'loaded. Set this variable and make it '
'point to a configuration file' %
variable_name)
return self.from_pyfile(rv, silent=silent)
def from_pyfile(self, filename, silent=False):
"""Updates the values in the config from a Python file. This function
behaves as if the file was imported as module with the
:meth:`from_object` function.
:param filename: the filename of the config. This can either be an
absolute filename or a filename relative to the
root path.
:param silent: set to `True` if you want silent failure for missing
files.
.. versionadded:: 0.7
`silent` parameter.
"""
filename = os.path.join(self.root_path, filename)
d = imp.new_module('config')
d.__file__ = filename
try:
with open(filename) as config_file:
exec(compile(config_file.read(), filename, 'exec'), d.__dict__)
except IOError as e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return False
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
self.from_object(d)
return True
def from_object(self, obj):
"""Updates the values from the given object. An object can be of one
of the following two types:
- a string: in this case the object with that name will be imported
- an actual object reference: that object is used directly
Objects are usually either modules or classes.
Just the uppercase variables in that object are stored in the config.
Example usage::
app.config.from_object('yourapplication.default_config')
from yourapplication import default_config
app.config.from_object(default_config)
You should not use this function to load the actual configuration but
rather configuration defaults. The actual config should be loaded
with :meth:`from_pyfile` and ideally from a location not within the
package because the package might be installed system wide.
:param obj: an import name or object
"""
if isinstance(obj, string_types):
obj = import_string(obj)
for key in dir(obj):
if key.isupper():
self[key] = getattr(obj, key)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, dict.__repr__(self))
| gpl-3.0 |
MeteorKepler/RICGA | ricga/ops/image_processing.py | 1 | 8197 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for image preprocessing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from ricga.reference.tf2keras_image_process import tf2keras_image
def distort_image(image, thread_id):
"""Perform random distortions on an image.
Args:
image: A float32 Tensor of shape [height, width, 3] with values in [0, 1).
thread_id: Preprocessing thread id used to select the ordering of color
distortions. There should be a multiple of 2 preprocessing threads.
Returns:
distorted_image: A float32 Tensor of shape [height, width, 3] with values in
[0, 1].
"""
# Randomly flip horizontally.
with tf.name_scope("flip_horizontal", values=[image]):
image = tf.image.random_flip_left_right(image)
# Randomly distort the colors based on thread id.
color_ordering = thread_id % 2
with tf.name_scope("distort_color", values=[image]):
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.032)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.032)
# The random_* ops do not necessarily clamp.
image = tf.clip_by_value(image, 0.0, 1.0)
return image
def process_image(encoded_image,
is_training,
height,
width,
ssd_model,
resize_height=346,
resize_width=346,
thread_id=0,
image_format="jpeg"):
"""Decode an image, resize and apply random distortions.
In training, images are distorted slightly differently depending on thread_id.
Args:
encoded_image: String Tensor containing the image.
is_training: Boolean; whether preprocessing for training or eval.
height: Height of the output image.
width: Width of the output image.
ssd_model: SSD300 model.
resize_height: If > 0, resize height before crop to final dimensions.
resize_width: If > 0, resize width before crop to final dimensions.
thread_id: Preprocessing thread id used to select the ordering of color
distortions. There should be a multiple of 2 preprocessing threads.
image_format: "jpeg" or "png".
Returns:
A float32 Tensor of shape [height, width, 3] with values in [-1, 1].
Raises:
ValueError: If image_format is invalid.
"""
# Helper function to log an image summary to the visualizer. Summaries are
# only logged in half of the thread.
def image_summary(name, image_to_sum):
if thread_id % 2 == 0:
tf.summary.image(name, tf.expand_dims(image_to_sum, 0))
# Decode image into a float32 Tensor of shape [?, ?, 3] with values in [0, 1).
with tf.name_scope("decode", values=[encoded_image]):
if image_format == "jpeg":
image = tf.image.decode_jpeg(encoded_image, channels=3)
elif image_format == "png":
image = tf.image.decode_png(encoded_image, channels=3)
else:
raise ValueError("Invalid image format: %s" % image_format)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
original_image = image
image_summary("original_image", image)
# Resize image.
assert (resize_height > 0) == (resize_width > 0)
if resize_height:
image = tf.image.resize_images(image,
size=[resize_height, resize_width],
method=tf.image.ResizeMethod.BILINEAR)
# Crop to final dimensions.
if is_training:
image = tf.random_crop(image, [height, width, 3])
else:
# Central crop, assuming resize_height > height, resize_width > width.
image = tf.image.resize_image_with_crop_or_pad(image, height, width)
image_summary("resized_image", image)
# Randomly distort the image.
if is_training:
image = distort_image(image, thread_id)
image_summary("final_image", image)
# Rescale to [-1,1] instead of [0, 1]
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
# ssd process
image_300x300 = tf.image.resize_images(original_image, [300, 300])
image_300x300_ssd_input = tf2keras_image(image_300x300)
# with tf.variable_scope("ssd"):
ssd_output = ssd_model(tf.expand_dims(image_300x300_ssd_input, 0))[0]
with tf.variable_scope("ssd_out_processing"):
mbox_loc = ssd_output[:, :4]
variances = ssd_output[:, -4:]
mbox_priorbox = ssd_output[:, -8:-4]
mbox_conf = ssd_output[:, 4:-8]
prior_width = mbox_priorbox[:, 2] - mbox_priorbox[:, 0]
prior_height = mbox_priorbox[:, 3] - mbox_priorbox[:, 1]
prior_center_x = 0.5 * (mbox_priorbox[:, 2] + mbox_priorbox[:, 0])
prior_center_y = 0.5 * (mbox_priorbox[:, 3] + mbox_priorbox[:, 1])
decode_bbox_center_x = mbox_loc[:, 0] * prior_width * variances[:, 0]
decode_bbox_center_x += prior_center_x
decode_bbox_center_y = mbox_loc[:, 1] * prior_width * variances[:, 1]
decode_bbox_center_y += prior_center_y
decode_bbox_width = tf.exp(mbox_loc[:, 2] * variances[:, 2])
decode_bbox_width *= prior_width
decode_bbox_height = tf.exp(mbox_loc[:, 3] * variances[:, 3])
decode_bbox_height *= prior_height
decode_bbox_xmin = tf.expand_dims(decode_bbox_center_x - 0.5 * decode_bbox_width, -1)
decode_bbox_ymin = tf.expand_dims(decode_bbox_center_y - 0.5 * decode_bbox_height, -1)
decode_bbox_xmax = tf.expand_dims(decode_bbox_center_x + 0.5 * decode_bbox_width, -1)
decode_bbox_ymax = tf.expand_dims(decode_bbox_center_y + 0.5 * decode_bbox_height, -1)
decode_bbox = tf.concat((decode_bbox_ymin,
decode_bbox_xmax,
decode_bbox_ymax,
decode_bbox_xmin), axis=-1)
decode_bbox = tf.minimum(tf.maximum(decode_bbox, 0.0), 1.0)
mbox_conf_without_background = tf.slice(mbox_conf, [0, 1], [-1, -1])
mbox_conf_max = tf.reduce_max(mbox_conf_without_background, 1)
idx = tf.image.non_max_suppression(decode_bbox, mbox_conf_max, max_output_size=1)
idx = tf.reshape(idx, [1])
good_box = decode_bbox[idx[0]]
region_image = tf.image.crop_and_resize(tf.expand_dims(image_300x300, 0),
boxes=tf.expand_dims(good_box, 0),
box_ind=tf.constant([0], dtype=tf.int32),
crop_size=[height, width],
name="region_images")[0]
image_summary("region_image", region_image)
# Rescale to [-1,1] instead of [0, 1]
region_image = tf.subtract(region_image, 0.5)
region_image = tf.multiply(region_image, 2.0)
return image, region_image
# return ssd, region_image
| apache-2.0 |
fracting/depot_tools | recipes/naclports.py | 25 | 1198 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import recipe_util # pylint: disable=F0401
# This class doesn't need an __init__ method, so we disable the warning
# pylint: disable=W0232
class Naclports(recipe_util.Recipe):
"""Basic Recipe class for naclports."""
@staticmethod
def fetch_spec(props):
url = 'https://chromium.googlesource.com/external/naclports.git'
solution = {
'name' : 'src',
'url' : url,
'deps_file' : 'DEPS',
'managed' : False,
'custom_deps' : {},
'safesync_url': '',
}
spec = {
'solutions': [solution],
}
if props.get('target_os'):
spec['target_os'] = props['target_os'].split(',')
if props.get('target_os_only'):
spec['target_os_only'] = props['target_os_only']
return {
'type': 'gclient_git',
'gclient_git_spec': spec,
}
@staticmethod
def expected_root(_props):
return 'src'
def main(argv=None):
return Naclports().handle_args(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
sparkslabs/kamaelia | Code/Python/Kamaelia/Kamaelia/Apps/SpeakNWrite/Gestures/PreProcessing.py | 3 | 6871 | # -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pygame
from Axon.Component import component
# Stroke pre-processing
DOT_SEPARATION_THRESHOLD = 5
MAX_STROKE_POINTS = 20
class QuantiseStroke(component):
"""Quantises a stroke into a reduced set of points"""
Outboxes = { "outbox":"",
"drawing":"",
"signal":"",
}
def main(self):
while 1:
# wait for first point on path
ox,oy = None,None
while (ox,oy) == (None,None):
if self.dataReady("inbox"):
msg = self.recv("inbox")
if msg != "ENDSTROKE":
ox,oy = msg
if not self.anyReady():
self.pause()
yield 1
# subsequent points on path
points = [(ox,oy)]
while 1:
if self.dataReady("inbox"):
msg = self.recv("inbox")
if msg == "ENDSTROKE":
break
points.append(msg)
if not self.anyReady():
self.pause()
yield 1
# reduce path to a limited number of points
if len(points) > MAX_STROKE_POINTS:
step = float(MAX_STROKE_POINTS) / float(len(points))
v = 0.0
newpoints = []
for point in points:
v=v-step
if v<=0.0:
v=v+1.0
newpoints.append(point)
points = newpoints
for (x,y) in points:
self.send([["CIRCLE", "255","0","0",str(x),str(y),"1"]], "drawing")
self.send( points, "outbox" )
yield 1
def nearest45DegreeStep( step ):
"""Returns (in degrees) the nearest 45 degree angle match to the supplied vector.
Returned values are one of 0, 45, 90, 135, 180, 225, 270, 315.
If the supplied vector is (0,0), the returned angle is 0.
"""
dx,dy = step
if dx == 0 and dy == 0:
return 0
# rotate dy and dx by +22.5 degrees,
# so the boundaries between the 45 degree regions now nicely
# line up with 0, 45, 90, ... instead of 22.5, 67,5 etc
cos = 0.92387953251128674 # math.cos(math.radians(22.5))
sin = 0.38268343236508978 # math.sin(math.radians(22.5))
dx, dy = (dx*cos - dy*sin), (dy*cos + dx*sin)
# lookup angle against properties of dy and dx
index = ( dy > 0, dx > 0, abs(dy) > abs(dx) )
return angleMappings[index]
angleMappings = { (True, True, False) : 0,
(True, True, True ) : 45,
(True, False, True ) : 90,
(True, False, False) : 135,
(False, False, False) : 180,
(False, False, True ) : 225,
(False, True, True ) : 270,
(False, True, False) : 315 }
class SegmentStroke(component):
"""Takes a quantised stroke and breaks it into line segments"""
Outboxes = { "outbox":"",
"drawing":"",
"signal":"",
}
def main(self):
while 1:
if self.dataReady("inbox"):
points = self.recv("inbox")
directions = []
ox,oy = points[0]
for (x,y) in points[1:]:
dx = x-ox
dy = y-oy
directions.append( nearest45DegreeStep((dx,dy)) )
ox,oy = x,y
# smooth directions
if len(directions) >= 3:
for i in range(1,len(directions)-1):
if directions[i] != directions[i-1] and directions[i] != directions[i+1] and directions[i-1] == directions[i+1]:
directions[i] = directions[i-1]
# extract line segments
segments = []
sx,sy = points[0]
sd = directions[0]
for i in range(1,len(directions)):
ex,ey = points[i]
if directions[i] != sd:
segments.append( ((sx,sy),(ex,ey),sd) )
self.send( [["LINE","0","0","255",str(sx),str(sy),str(ex),str(ey)]], "drawing")
sx,sy = ex,ey
sd = directions[i]
# draw a blob too to signify where the join is
self.send( [["CIRCLE","0","0","255",str(ex),str(ey),1]], "drawing")
ex,ey = points[-1]
segments.append( ((sx,sy),(ex,ey),sd) )
self.send( [["LINE","0","0","255",str(sx),str(sy),str(ex),str(ey)]], "drawing")
self.send(segments,"outbox")
if not self.anyReady():
self.pause()
yield 1
class Normalise(component):
"""Takes a path and normalises it to a bounding box width and height 1, and also notes the aspect ratio."""
def main(self):
while 1:
while self.dataReady("inbox"):
path = self.recv("inbox")
xs = [x for (x,y) in path]
ys = [y for (x,y) in path]
left = min(xs)
right = max(xs)
top = min(ys)
bottom = max(ys)
width = max(1.0,float(right-left))
height = max(1.0,float(bottom-top))
aspect = height/width
npath = [ ( (x-left)/width, 1.0-(y-top)/height ) for (x,y) in path ]
self.send( (npath,left,top,width,height,aspect), "outbox" )
self.pause()
yield 1
| apache-2.0 |
NMGRL/pychron | pychron/managers/motion_controller_managers/motion_controller_manager.py | 2 | 7513 | # ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# =============enthought library imports=======================
from __future__ import absolute_import
from __future__ import print_function
from threading import Thread
from traits.api import Instance, Enum, DelegatesTo, Property, Button, Any, Float
from traitsui.api import View, Item, HGroup, spring, \
ListEditor, VGroup, UItem
# =============standard library imports ========================
# =============local library imports ==========================
from pychron.managers.manager import Manager
from pychron.hardware.motion_controller import MotionController
from pychron.paths import paths
from pychron.core.helpers.filetools import parse_file
import six
class MotionControllerManager(Manager):
"""
"""
motion_controller = Instance(MotionController)
_axes = DelegatesTo('motion_controller', prefix='axes')
axes = Property
apply_button = Button('Apply')
read_button = Button('Read')
load_button = Button('Load')
# print_param_table = Button('Print')
motion_group = DelegatesTo('motion_controller', prefix='groupobj')
view_style = Enum('simple_view', 'full_view')
selected = Any
xmove_to_button = Button('Move X')
ymove_to_button = Button('Move Y')
xtarget_position = Float
ytarget_position = Float
def kill(self, **kw):
super(MotionControllerManager, self).kill(**kw)
self.motion_controller.save_axes_parameters()
def _get_axis_by_id(self, aid):
return next((a for a in six.itervalues(self._axes) if a.id == int(aid)), None)
def _get_axes(self):
keys = list(self._axes.keys())
keys.sort()
axs = [self._axes[k] for k in keys]
if self.motion_group:
axs += [self.motion_group]
return axs
def _get_selected(self):
ax = self.selected
if ax is None:
ax = self.axes[0]
return ax
# handlers
def _xmove_to_button_fired(self):
self._move_to('x', self.xtarget_position)
def _ymove_to_button_fired(self):
self._move_to('y', self.ytarget_position)
def _move_to(self, k, v):
def func():
self.motion_controller.start_timer()
ax = self.motion_controller.axes[k]
self.motion_controller.destroy_group()
self.motion_controller._axis_move('{}PA{}'.format(ax.id, v), block=k)
self.motion_controller.update_axes()
t = Thread(target=func)
t.start()
def _read_button_fired(self):
ax = self._get_selected()
ax._read_parameters_fired()
def _apply_button_fired(self):
ax = self._get_selected()
print(ax, ax.id)
if ax is not None:
ax.upload_parameters_to_device()
self.motion_controller.save_axes_parameters(axis=ax)
def _load_button_fired(self):
path = self.open_file_dialog(default_directory=paths.device_dir)
# path = os.path.join(root_dir, 'zobs', 'NewStage-Axis-1.txt')
if path is not None:
# sniff the file to get the axis
lines = parse_file(path)
aid = lines[0][0]
try:
ax = self._get_axis_by_id(aid)
func = ax.load_parameters_from_file
except ValueError:
# this is a txt file not a cfg
ax = self._get_selected()
if ax is not None:
func = ax.load
if ax is not None:
func(path)
# ax.load_parameters_from_file(path)
# ax.load_parameters_from_file(path)
def traits_view(self):
"""
"""
cgrp = VGroup(Item('axes',
style='custom',
show_label=False,
editor=ListEditor(use_notebook=True,
dock_style='tab',
page_name='.name',
selected='selected',
view='full_view')),
HGroup(spring, Item('load_button'),
Item('read_button'),
Item('apply_button'),
show_labels=False))
tgrp = VGroup(HGroup(UItem('xmove_to_button'), UItem('xtarget_position')),
HGroup(UItem('ymove_to_button'), UItem('ytarget_position')))
view = View(VGroup(tgrp, cgrp),
resizable=True,
handler=self.handler_klass, # MotionControllerManagerHandler,
title='Configure Motion Controller')
return view
def configure_view(self):
v = View(Item('axes',
style='custom',
show_label=False,
editor=ListEditor(use_notebook=True,
dock_style='tab',
page_name='.name',
view=self.view_style,
selected='selected'
)),
HGroup(spring, Item('load_button'),
Item('read_button'), Item('apply_button'), show_labels=False, ))
return v
# print [self._axes[k] for k in keys] + [self.motion_group]
# return [self._axes[k] for k in keys] + [self.motion_group]
# def _restore_fired(self):
# '''
# '''
# self.motion_controller.axes_factory()
# self.trait_property_changed('axes', None)
# for a in self.axes:
# a.load_
# def _apply_all_fired(self):
# '''
# '''
# # for a in self.axes:
# # a.upload_parameters_to_device()
# if sele
# # self.motion_controller.save()
# def _print_param_table_fired(self):
# table = []
# for a in self.axes:
# attrs, codes, params = a.load_parameters()
# table.append(params)
#
# try:
# p = '/Users/ross/Sandbox/unidex_dump.txt'
# with open(p, 'w') as f:
# for attr, code, ri in zip(attrs, codes, zip(*table)):
# l = ''.join(map('{:<20s}'.format, map(str, ri)))
# l = '{:<20s} {} - {}'.format(attr, code, l)
# f.write(l + '\n')
# print l
# except Exception, e:
# print 'exception', e
| apache-2.0 |
arnavd96/Cinemiezer | myvenv/lib/python3.4/site-packages/django/contrib/staticfiles/management/commands/collectstatic.py | 34 | 14419 | from __future__ import unicode_literals
import os
from collections import OrderedDict
from django.apps import apps
from django.contrib.staticfiles.finders import get_finders
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.files.storage import FileSystemStorage
from django.core.management.base import BaseCommand, CommandError
from django.core.management.color import no_style
from django.utils.encoding import smart_text
from django.utils.functional import cached_property
from django.utils.six.moves import input
class Command(BaseCommand):
"""
Command that allows to copy or symlink static files from different
locations to the settings.STATIC_ROOT.
"""
help = "Collect static files in a single location."
requires_system_checks = False
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.copied_files = []
self.symlinked_files = []
self.unmodified_files = []
self.post_processed_files = []
self.storage = staticfiles_storage
self.style = no_style()
@cached_property
def local(self):
try:
self.storage.path('')
except NotImplementedError:
return False
return True
def add_arguments(self, parser):
parser.add_argument(
'--noinput', '--no-input',
action='store_false', dest='interactive', default=True,
help="Do NOT prompt the user for input of any kind.",
)
parser.add_argument(
'--no-post-process',
action='store_false', dest='post_process', default=True,
help="Do NOT post process collected files.",
)
parser.add_argument(
'-i', '--ignore', action='append', default=[],
dest='ignore_patterns', metavar='PATTERN',
help="Ignore files or directories matching this glob-style "
"pattern. Use multiple times to ignore more.",
)
parser.add_argument(
'-n', '--dry-run',
action='store_true', dest='dry_run', default=False,
help="Do everything except modify the filesystem.",
)
parser.add_argument(
'-c', '--clear',
action='store_true', dest='clear', default=False,
help="Clear the existing files using the storage "
"before trying to copy or link the original file.",
)
parser.add_argument(
'-l', '--link',
action='store_true', dest='link', default=False,
help="Create a symbolic link to each file instead of copying.",
)
parser.add_argument(
'--no-default-ignore', action='store_false',
dest='use_default_ignore_patterns', default=True,
help="Don't ignore the common private glob-style patterns (defaults to 'CVS', '.*' and '*~').",
)
def set_options(self, **options):
"""
Set instance variables based on an options dict
"""
self.interactive = options['interactive']
self.verbosity = options['verbosity']
self.symlink = options['link']
self.clear = options['clear']
self.dry_run = options['dry_run']
ignore_patterns = options['ignore_patterns']
if options['use_default_ignore_patterns']:
ignore_patterns += apps.get_app_config('staticfiles').ignore_patterns
self.ignore_patterns = list(set(ignore_patterns))
self.post_process = options['post_process']
def collect(self):
"""
Perform the bulk of the work of collectstatic.
Split off from handle() to facilitate testing.
"""
if self.symlink and not self.local:
raise CommandError("Can't symlink to a remote destination.")
if self.clear:
self.clear_dir('')
if self.symlink:
handler = self.link_file
else:
handler = self.copy_file
found_files = OrderedDict()
for finder in get_finders():
for path, storage in finder.list(self.ignore_patterns):
# Prefix the relative path if the source storage contains it
if getattr(storage, 'prefix', None):
prefixed_path = os.path.join(storage.prefix, path)
else:
prefixed_path = path
if prefixed_path not in found_files:
found_files[prefixed_path] = (storage, path)
handler(path, prefixed_path, storage)
else:
self.log(
"Found another file with the destination path '%s'. It "
"will be ignored since only the first encountered file "
"is collected. If this is not what you want, make sure "
"every static file has a unique path." % prefixed_path,
level=1,
)
# Here we check if the storage backend has a post_process
# method and pass it the list of modified files.
if self.post_process and hasattr(self.storage, 'post_process'):
processor = self.storage.post_process(found_files,
dry_run=self.dry_run)
for original_path, processed_path, processed in processor:
if isinstance(processed, Exception):
self.stderr.write("Post-processing '%s' failed!" % original_path)
# Add a blank line before the traceback, otherwise it's
# too easy to miss the relevant part of the error message.
self.stderr.write("")
raise processed
if processed:
self.log("Post-processed '%s' as '%s'" %
(original_path, processed_path), level=1)
self.post_processed_files.append(original_path)
else:
self.log("Skipped post-processing '%s'" % original_path)
return {
'modified': self.copied_files + self.symlinked_files,
'unmodified': self.unmodified_files,
'post_processed': self.post_processed_files,
}
def handle(self, **options):
self.set_options(**options)
message = ['\n']
if self.dry_run:
message.append(
'You have activated the --dry-run option so no files will be modified.\n\n'
)
message.append(
'You have requested to collect static files at the destination\n'
'location as specified in your settings'
)
if self.is_local_storage() and self.storage.location:
destination_path = self.storage.location
message.append(':\n\n %s\n\n' % destination_path)
else:
destination_path = None
message.append('.\n\n')
if self.clear:
message.append('This will DELETE ALL FILES in this location!\n')
else:
message.append('This will overwrite existing files!\n')
message.append(
'Are you sure you want to do this?\n\n'
"Type 'yes' to continue, or 'no' to cancel: "
)
if self.interactive and input(''.join(message)) != 'yes':
raise CommandError("Collecting static files cancelled.")
collected = self.collect()
modified_count = len(collected['modified'])
unmodified_count = len(collected['unmodified'])
post_processed_count = len(collected['post_processed'])
if self.verbosity >= 1:
template = ("\n%(modified_count)s %(identifier)s %(action)s"
"%(destination)s%(unmodified)s%(post_processed)s.\n")
summary = template % {
'modified_count': modified_count,
'identifier': 'static file' + ('' if modified_count == 1 else 's'),
'action': 'symlinked' if self.symlink else 'copied',
'destination': (" to '%s'" % destination_path if destination_path else ''),
'unmodified': (', %s unmodified' % unmodified_count if collected['unmodified'] else ''),
'post_processed': (collected['post_processed'] and
', %s post-processed'
% post_processed_count or ''),
}
return summary
def log(self, msg, level=2):
"""
Small log helper
"""
if self.verbosity >= level:
self.stdout.write(msg)
def is_local_storage(self):
return isinstance(self.storage, FileSystemStorage)
def clear_dir(self, path):
"""
Deletes the given relative path using the destination storage backend.
"""
if not self.storage.exists(path):
return
dirs, files = self.storage.listdir(path)
for f in files:
fpath = os.path.join(path, f)
if self.dry_run:
self.log("Pretending to delete '%s'" %
smart_text(fpath), level=1)
else:
self.log("Deleting '%s'" % smart_text(fpath), level=1)
try:
full_path = self.storage.path(fpath)
except NotImplementedError:
self.storage.delete(fpath)
else:
if not os.path.exists(full_path) and os.path.lexists(full_path):
# Delete broken symlinks
os.unlink(full_path)
else:
self.storage.delete(fpath)
for d in dirs:
self.clear_dir(os.path.join(path, d))
def delete_file(self, path, prefixed_path, source_storage):
"""
Checks if the target file should be deleted if it already exists
"""
if self.storage.exists(prefixed_path):
try:
# When was the target file modified last time?
target_last_modified = self.storage.get_modified_time(prefixed_path)
except (OSError, NotImplementedError, AttributeError):
# The storage doesn't support get_modified_time() or failed
pass
else:
try:
# When was the source file modified last time?
source_last_modified = source_storage.get_modified_time(path)
except (OSError, NotImplementedError, AttributeError):
pass
else:
# The full path of the target file
if self.local:
full_path = self.storage.path(prefixed_path)
else:
full_path = None
# Skip the file if the source file is younger
# Avoid sub-second precision (see #14665, #19540)
if (target_last_modified.replace(microsecond=0) >= source_last_modified.replace(microsecond=0) and
full_path and not (self.symlink ^ os.path.islink(full_path))):
if prefixed_path not in self.unmodified_files:
self.unmodified_files.append(prefixed_path)
self.log("Skipping '%s' (not modified)" % path)
return False
# Then delete the existing file if really needed
if self.dry_run:
self.log("Pretending to delete '%s'" % path)
else:
self.log("Deleting '%s'" % path)
self.storage.delete(prefixed_path)
return True
def link_file(self, path, prefixed_path, source_storage):
"""
Attempt to link ``path``
"""
# Skip this file if it was already copied earlier
if prefixed_path in self.symlinked_files:
return self.log("Skipping '%s' (already linked earlier)" % path)
# Delete the target file if needed or break
if not self.delete_file(path, prefixed_path, source_storage):
return
# The full path of the source file
source_path = source_storage.path(path)
# Finally link the file
if self.dry_run:
self.log("Pretending to link '%s'" % source_path, level=1)
else:
self.log("Linking '%s'" % source_path, level=1)
full_path = self.storage.path(prefixed_path)
try:
os.makedirs(os.path.dirname(full_path))
except OSError:
pass
try:
if os.path.lexists(full_path):
os.unlink(full_path)
os.symlink(source_path, full_path)
except AttributeError:
import platform
raise CommandError("Symlinking is not supported by Python %s." %
platform.python_version())
except NotImplementedError:
import platform
raise CommandError("Symlinking is not supported in this "
"platform (%s)." % platform.platform())
except OSError as e:
raise CommandError(e)
if prefixed_path not in self.symlinked_files:
self.symlinked_files.append(prefixed_path)
def copy_file(self, path, prefixed_path, source_storage):
"""
Attempt to copy ``path`` with storage
"""
# Skip this file if it was already copied earlier
if prefixed_path in self.copied_files:
return self.log("Skipping '%s' (already copied earlier)" % path)
# Delete the target file if needed or break
if not self.delete_file(path, prefixed_path, source_storage):
return
# The full path of the source file
source_path = source_storage.path(path)
# Finally start copying
if self.dry_run:
self.log("Pretending to copy '%s'" % source_path, level=1)
else:
self.log("Copying '%s'" % source_path, level=1)
with source_storage.open(path) as source_file:
self.storage.save(prefixed_path, source_file)
self.copied_files.append(prefixed_path)
| mit |
MikeLing/shogun | examples/undocumented/python/preprocessor_prunevarsubmean.py | 5 | 1052 | #!/usr/bin/env python
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_numbers('../data/fm_train_real.dat')
testdat = lm.load_numbers('../data/fm_test_real.dat')
parameter_list = [[traindat,testdat,1.5,10],[traindat,testdat,1.5,10]]
def preprocessor_prunevarsubmean (fm_train_real=traindat,fm_test_real=testdat,width=1.4,size_cache=10):
from shogun import Chi2Kernel
from shogun import RealFeatures
from shogun import PruneVarSubMean
feats_train=RealFeatures(fm_train_real)
feats_test=RealFeatures(fm_test_real)
preproc=PruneVarSubMean()
preproc.init(feats_train)
feats_train.add_preprocessor(preproc)
feats_train.apply_preprocessor()
feats_test.add_preprocessor(preproc)
feats_test.apply_preprocessor()
kernel=Chi2Kernel(feats_train, feats_train, width, size_cache)
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
return km_train,km_test,kernel
if __name__=='__main__':
print('PruneVarSubMean')
preprocessor_prunevarsubmean(*parameter_list[0])
| gpl-3.0 |
ajnirp/servo | tests/wpt/css-tests/tools/six/documentation/conf.py | 420 | 7015 | # -*- coding: utf-8 -*-
#
# six documentation build configuration file
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.0"
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.intersphinx"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
#source_encoding = "utf-8-sig"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"six"
copyright = u"2010-2014, Benjamin Peterson"
sys.path.append(os.path.abspath(os.path.join(".", "..")))
from six import __version__ as six_version
sys.path.pop()
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = six_version[:-2]
# The full version, including alpha/beta/rc tags.
release = six_version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'sixdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", "six.tex", u"six Documentation",
u"Benjamin Peterson", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
("index", "six", u"six Documentation",
[u"Benjamin Peterson"], 1)
]
# -- Intersphinx ---------------------------------------------------------------
intersphinx_mapping = {"py2" : ("https://docs.python.org/2/", None),
"py3" : ("https://docs.python.org/3/", None)}
| mpl-2.0 |
MSusik/invenio | invenio/legacy/bibauthorid/general_utils.py | 3 | 18048 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
'''
bibauthorid_general_utils
Bibauthorid utilities used by many parts of the framework
'''
from __future__ import print_function
from invenio.legacy.bibauthorid import config as bconfig
from datetime import datetime
import sys
from math import floor
from invenio.utils.crossref import get_marcxml_for_doi, CrossrefError
try:
import elementtree.ElementTree as ET
except ImportError:
from xml.etree import ElementTree as ET
from urllib import urlopen
from urllib2 import HTTPError
from collections import deque
import multiprocessing as mp
import time
import re
PRINT_TS = bconfig.DEBUG_TIMESTAMPS
PRINT_TS_US = bconfig.DEBUG_TIMESTAMPS_UPDATE_STATUS and PRINT_TS
NEWLINE = bconfig.DEBUG_UPDATE_STATUS_THREAD_SAFE
FO = bconfig.DEBUG_LOG_TO_PIDFILE
TERMINATOR = '\r'
if NEWLINE or FO:
TERMINATOR = '\n'
import os
PID = os.getpid
pidfiles = dict()
# Constants for classification
DOI_ID = "doi"
ARXIV_ID = "arxivid"
ORCID_ID = "orcid"
INSPIRE_ID = "inspire"
orcid_regex = re.compile(r"((?:https?://)?(?:www.)?orcid.org/)?((?:\d{4}-){3}\d{3}[\dX]$)", re.IGNORECASE)
inspire_regex = re.compile(r"(INSPIRE-)(\d+)$", re.IGNORECASE)
arxiv_new_regex = re.compile(r"(arXiv:)?(\d{4})\.(\d{4,6})(v\d+)?$", re.IGNORECASE)
arxiv_old_regex = re.compile(r"(arXiv:)?((?:[a-zA-Z]|[a-zA-Z]-[a-zA-Z])+)(\.[a-zA-Z]{2})?/(\d{7})(v\d+)?$", re.IGNORECASE)
doi_regex = re.compile(r"((?:https?://)?(?:dx.)?doi.org/)?(10\.(\d+)(/|\.)\S.*)$", re.IGNORECASE)
def get_orcid_from_string(identifier, uri=False):
"""
Extracts the ORCID from various string inputs and validates the format.
This function returns an ORCID that can be used with other functions
handling ORCIDs based on the string representation of a valid ORCID.
Passing True to the uri key will ensure that the function returns a
valid ORCID with the http://orcid.org prefix as per the specification.
@param identifier: Target string to extract from.
@param uri: True to return an ORCID URI, None defaults to return the ORCID.
@return: ORCID without prefix or None if no ORCID is found.
"""
result = orcid_regex.match(identifier.strip())
if result is None:
return None
elif uri:
return "http://orcid.org/" + result.group(2)
else:
return result.group(2)
def calculate_orcid_checksum_digit(orcid):
"""
Calculates the ORCID Checksum on the 'digit' component of an ORCID.
@param orcid: ORCID String representation in the format of dddd-dddd-dddd-dddC
@return: String representation of 'C' the checksum digit in range 0-9 and X
"""
clean_orcid = orcid[:-1].replace("-", "")
assert len(clean_orcid) == 15
total = 0
for char in clean_orcid:
total = (total + int(char)) * 2
remainder = total % 11
result = (12 - remainder) % 11
if result == 10:
return "X"
else:
return str(result)
def is_orcid_checksum_matching(orcid):
"""
Compares the validity of a supplied ORCID using the checksum.
@param orcid: ORCID String representation in the format of dddd-dddd-dddd-dddC
@return: Boolean of whether the ORCID is valid based on the checksum
"""
assert len(orcid) == 19
caps_orcid = orcid.upper()
return calculate_orcid_checksum_digit(orcid) == caps_orcid[-1:]
def is_valid_orcid(identifier):
"""
Complete validation of an ORCID through format and its checksum.
@param identifier: The string of an identifier to check.
@return: Boolean representing the statement.
"""
orcid = get_orcid_from_string(identifier)
if orcid is None:
return False
else:
return is_orcid_checksum_matching(orcid)
def is_inspire_id(identifier):
"""
Checks if a given ID matches the format of an INSPIRE ID.
@param identifier: The string of an identifier to check.
@return: Boolean representing the statement.
"""
result = inspire_regex.match(identifier.strip())
return result is not None
def is_orcid_or_inspire_id(identifier):
"""
Checks if a given ID is a ORCID or INSPIRE ID.
@param identifier: The string of an identifier to check.
@return: ORCID_ID or INSPIRE_ID constant or None if neither.
"""
if is_valid_orcid(identifier):
return ORCID_ID
elif is_inspire_id(identifier):
return INSPIRE_ID
else:
return None
def is_arxiv_id_new(identifier):
"""
Checks if a given ID matches the format of a new arXiv ID.
examples: arXiv:1234.1234, arXiv:1234.1234v2
@param identifier: The string of an identifier to check.
@return: Boolean representing the statement.
"""
result = arxiv_new_regex.match(identifier.strip())
return result is not None
def is_arxiv_id_old(identifier):
"""
Checks if a given ID matches the format of an old arXiv ID.
examples: arXiv:hep-th/9901001, arXiv:hep-th/9901001v1
@param identifier: The string of an identifier to check.
@return: Boolean representing the statement.
"""
result = arxiv_old_regex.match(identifier.strip())
return result is not None
def is_arxiv_id(identifier):
"""
Checks if a given ID matches the format of any arXiv ID scheme.
examples: arXiv:hep-th/9901001, arXiv:1234.1234
@param identifier: The string of an identifier to check.
@return: Boolean representing the statement.
"""
return is_arxiv_id_new(identifier) or is_arxiv_id_old(identifier)
def is_doi(identifier):
"""
Checks if a given ID matches the format of a DOI.
examples: 10.1016/S0735-1097(98)00347-7, 10.1007/978-3-642-28108-2_19
@param identifier: The string of an identifier to check.
@return: Boolean representing the statement.
"""
result = doi_regex.match(identifier.strip())
return result is not None
def get_doi(identifier):
"""
Extracts doi from a given ID that matches the format of a DOI.
examples: 10.1016/S0735-1097(98)00347-7, 10.1007/978-3-642-28108-2_19
@param identifier: The string of an identifier to check.
@return: doi: None or a string representing the doi.
"""
result = doi_regex.match(identifier.strip())
if result is not None:
return result.group(2)
else:
return None
def is_arxiv_id_or_doi(identifier):
"""
Checks if a given ID is a arXiv ID or DOI.
@param identifier: The string of an identifier to check.
@return: ARXIV_ID or DOI_ID constant or None if neither.
"""
if is_arxiv_id(identifier):
return ARXIV_ID
elif is_doi(identifier):
return DOI_ID
else:
return None
def get_title_of_doi(doi):
try:
xml = get_marcxml_for_doi(doi)
except CrossrefError:
return doi
root = ET.fromstring(xml)
for datafield in root.findall('datafield'):
tag = datafield.get('tag')
if tag == '245':
title = datafield.find('subfield').text
return title
return doi
def get_xml_referer_of_arxiv_pubid(arxiv_pubid):
arxiv_id = None
if is_arxiv_id_new(arxiv_pubid):
result = arxiv_new_regex.match(arxiv_pubid)
arxiv_id = arxiv_pubid[len(result.group(1)):]
elif is_arxiv_id_old(arxiv_pubid):
arxiv_id = arxiv_pubid
if arxiv_id is None:
return None
# TODO: the below url should be configurable
referer = 'http://export.arxiv.org/oai2?verb=GetRecord&identifier=oai:arXiv.org:%s&metadataPrefix=oai_dc' % arxiv_id
return referer
def get_title_of_arxiv_pubid(arxiv_pubid):
def get_title_from_arxiv_xml(tree, tags):
try:
tag = tags.popleft()
except IndexError:
return tree.text
for descendant in tree:
if descendant.tag.endswith(tag):
return get_title_from_arxiv_xml(descendant, tags)
return None
xml_referer = get_xml_referer_of_arxiv_pubid(arxiv_pubid)
if xml_referer is None:
return arxiv_pubid
try:
fxml = urlopen(xml_referer)
xml = fxml.read()
fxml.close()
root = ET.fromstring(xml)
except HTTPError:
return arxiv_pubid
title = get_title_from_arxiv_xml(root, deque(['GetRecord', 'record', 'metadata', 'dc', 'title']))
if title:
return title
return arxiv_pubid
def schedule_workers(function, args, max_processes=mp.cpu_count()):
processes = dict( (x,None) for x in range(max_processes) )
jobs = list(args)
jobs.reverse()
while jobs:
for p, proc in processes.iteritems():
if not proc or not proc.is_alive():
if proc:
proc.join()
proc.terminate()
new_proc = mp.Process(target=function, args=(jobs.pop(),))
new_proc.start()
processes[p] = new_proc
time.sleep(1)
for p, proc in processes.iteritems():
if not proc or not proc.is_alive():
if proc:
proc.join()
proc.terminate()
class defaultdict(dict):
'''
Implementation of defaultdict to supply missing collections library in python <= 2.4
'''
def __init__(self, default_factory, *args, **kwargs):
super(defaultdict, self).__init__(*args, **kwargs)
self.default_factory = default_factory
def __missing__(self, key):
try:
self[key] = self.default_factory()
except TypeError:
raise KeyError("Missing key %s" % (key,))
else:
return self[key]
def __getitem__(self, key):
try:
return super(defaultdict, self).__getitem__(key)
except KeyError:
return self.__missing__(key)
def override_stdout_config(fileout=False, stdout=True):
global FO
assert fileout^stdout
if fileout:
FO = True
if stdout:
FO = False
def set_stdout():
if FO:
try:
sys.stdout = pidfiles[PID()]
except KeyError:
pidfiles[PID()] = open('/tmp/bibauthorid_log_pid_'+str(PID()),'w')
sys.stdout = pidfiles[PID()]
print('REDIRECTING TO PIDFILE ')
#python2.4 compatibility layer.
try:
any([True])
except:
def any(x):
for element in x:
if element:
return True
return False
bai_any = any
try:
all([True])
except:
def all(x):
for element in x:
if not element:
return False
return True
bai_all = all
#end of python2.4 compatibility. Please remove this horror as soon as all systems will have
#been ported to python2.6+
def __print_func(*args):
set_stdout()
if PRINT_TS:
print(datetime.now(), end=' ')
for arg in args:
print(arg, end=' ')
print("")
sys.stdout.flush()
def __dummy_print(*args):
pass
def __create_conditional_print(cond):
if cond:
return __print_func
else:
return __dummy_print
bibauthor_print = __create_conditional_print(bconfig.DEBUG_OUTPUT)
name_comparison_print = __create_conditional_print(bconfig.DEBUG_NAME_COMPARISON_OUTPUT)
metadata_comparison_print = __create_conditional_print(bconfig.DEBUG_METADATA_COMPARISON_OUTPUT)
wedge_print = __create_conditional_print(bconfig.DEBUG_WEDGE_OUTPUT)
if bconfig.DEBUG_OUTPUT:
status_len = 18
comment_len = 40
def padd(stry, l):
return stry[:l].ljust(l)
def update_status(percent, comment="", print_ts=False):
set_stdout()
filled = max(0,int(floor(percent * status_len)))
bar = "[%s%s] " % ("#" * filled, "-" * (status_len - filled))
percent = ("%.2f%% done" % (percent * 100))
progress = padd(bar + percent, status_len+2)
comment = padd(comment, comment_len)
if print_ts or PRINT_TS_US:
print(datetime.now(), end=' ')
print('pid:',PID(), end=' ')
print(progress, comment, TERMINATOR, end=' ')
sys.stdout.flush()
def update_status_final(comment=""):
set_stdout()
update_status(1., comment, print_ts=PRINT_TS)
print("")
sys.stdout.flush()
else:
def update_status(percent, comment=""):
pass
def update_status_final(comment=""):
pass
def print_tortoise_memory_log(summary, fp):
stry = "PID:\t%s\tPEAK:\t%s,%s\tEST:\t%s\tBIBS:\t%s\n" % (summary['pid'], summary['peak1'], summary['peak2'], summary['est'], summary['bibs'])
fp.write(stry)
def parse_tortoise_memory_log(memfile_path):
f = open(memfile_path)
lines = f.readlines()
f.close()
def line_2_dict(line):
line = line.split('\t')
ret = { 'mem1' : int(line[3].split(",")[0]),
'mem2' : int(line[3].split(",")[1]),
'est' : float(line[5]),
'bibs' : int(line[7])
}
return ret
return map(line_2_dict, lines)
eps = 1e-6
def is_eq(v1, v2):
return v1 + eps > v2 and v2 + eps > v1
#Sort files in place
class FileSort(object):
def __init__(self, inFile, outFile=None, splitSize=20):
""" split size (in MB) """
self._inFile = inFile
if outFile is None:
self._outFile = inFile
else:
self._outFile = outFile
self._splitSize = splitSize * 1000000
self.setKeyExtractMethod()
self.reverse = False
def setKeyExtractMethod(self, keyExtractMethod=None):
""" key extract from line for sort method:
def f(line):
return line[1:3], line[5:10]
"""
if keyExtractMethod is None:
self._getKey = lambda line: line
else:
self._getKey = keyExtractMethod
def sort(self, reverse=False):
self.reverse=reverse
files = self._splitFile()
if files is None:
""" file size <= self._splitSize """
self._sortFile(self._inFile, self._outFile)
return
for fn in files:
self._sortFile(fn)
self._mergeFiles(files)
self._deleteFiles(files)
def _sortFile(self, fileName, outFile=None):
lines = open(fileName).readlines()
get_key = self._getKey
data = [(get_key(line), line) for line in lines if line!='']
data.sort(reverse=self.reverse)
lines = [line[1] for line in data]
if outFile is not None:
open(outFile, 'w').write(''.join(lines))
else:
open(fileName, 'w').write(''.join(lines))
def _splitFile(self):
totalSize = os.path.getsize(self._inFile)
if totalSize <= self._splitSize:
# do not split file, the file isn't so big.
return None
fileNames = []
fn,e = os.path.splitext(self._inFile)
f = open(self._inFile)
try:
i = size = 0
lines = []
for line in f:
size += len(line)
lines.append(line)
if size >= self._splitSize:
i += 1
tmpFile = fn + '.%03d' % i
fileNames.append(tmpFile)
open(tmpFile,'w').write(''.join(lines))
del lines[:]
size = 0
if size > 0:
tmpFile = fn + '.%03d' % (i+1)
fileNames.append(tmpFile)
open(tmpFile,'w').write(''.join(lines))
return fileNames
finally:
f.close()
def _mergeFiles(self, files):
files = [open(f) for f in files]
lines = []
keys = []
for f in files:
l = f.readline()
lines.append(l)
keys.append(self._getKey(l))
buff = []
buffSize = self._splitSize/2
append = buff.append
output = open(self._outFile,'w')
try:
key = min(keys)
index = keys.index(key)
get_key = self._getKey
while 1:
while key == min(keys):
append(lines[index])
if len(buff) > buffSize:
output.write(''.join(buff))
del buff[:]
line = files[index].readline()
if not line:
files[index].close()
del files[index]
del keys[index]
del lines[index]
break
key = get_key(line)
keys[index] = key
lines[index] = line
if len(files)==0:
break
# key != min(keys), see for new index (file)
key = min(keys)
index = keys.index(key)
if len(buff)>0:
output.write(''.join(buff))
finally:
output.close()
def _deleteFiles(self, files):
for fn in files:
os.remove(fn)
def sortFileInPlace(inFileName, outFileName=None, getKeyMethod=None, reverse=False):
fs = FileSort(inFileName, outFileName)
if getKeyMethod is not None:
fs.setKeyExtractMethod(getKeyMethod)
fs.sort(reverse=reverse)
fs = None
| gpl-2.0 |
manisandro/QGIS | python/plugins/MetaSearch/link_types.py | 55 | 1923 | # -*- coding: utf-8 -*-
###############################################################################
#
# MetaSearch Catalog Client
#
# Copyright (C) 2014 Tom Kralidis (tomkralidis@gmail.com)
#
# This source is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This code is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
###############################################################################
WMSWMST_LINK_TYPES = [
'OGC:WMS',
'OGC:WMTS',
'OGC:WMS-1.1.1-http-get-map',
'OGC:WMS-1.1.1-http-get-capabilities',
'OGC:WMS-1.3.0-http-get-map',
'OGC:WMS-1.3.0-http-get-capabilities',
'urn:x-esri:specification:ServiceType:wms:url',
'urn:x-esri:specification:ServiceType:Gmd:URL.wms'
]
WFS_LINK_TYPES = [
'OGC:WFS',
'OGC:WFS-1.0.0-http-get-capabilities',
'OGC:WFS-1.1.0-http-get-capabilities',
'urn:x-esri:specification:ServiceType:wfs:url',
'urn:x-esri:specification:ServiceType:Gmd:URL.wfs'
]
WCS_LINK_TYPES = [
'OGC:WCS',
'OGC:WCS-1.1.0-http-get-capabilities',
'urn:x-esri:specification:ServiceType:wcs:url',
'urn:x-esri:specification:ServiceType:Gmd:URL.wcs'
]
AMS_LINK_TYPES = [
'ESRI:ArcGIS:MapServer',
'Esri REST: Map Service'
]
AFS_LINK_TYPES = [
'ESRI:ArcGIS:FeatureServer',
'Esri REST: Feature Service'
]
GIS_FILE_LINK_TYPES = [
'FILE:GEO'
]
| gpl-2.0 |
loop1024/pymo-global | android/pgs4a-0.9.6/python-install/lib/python2.7/ctypes/test/test_memfunctions.py | 79 | 3251 | import sys
import unittest
from ctypes import *
class MemFunctionsTest(unittest.TestCase):
## def test_overflow(self):
## # string_at and wstring_at must use the Python calling
## # convention (which acquires the GIL and checks the Python
## # error flag). Provoke an error and catch it; see also issue
## # #3554: <http://bugs.python.org/issue3554>
## self.assertRaises((OverflowError, MemoryError, SystemError),
## lambda: wstring_at(u"foo", sys.maxint - 1))
## self.assertRaises((OverflowError, MemoryError, SystemError),
## lambda: string_at("foo", sys.maxint - 1))
def test_memmove(self):
# large buffers apparently increase the chance that the memory
# is allocated in high address space.
a = create_string_buffer(1000000)
p = "Hello, World"
result = memmove(a, p, len(p))
self.assertEqual(a.value, "Hello, World")
self.assertEqual(string_at(result), "Hello, World")
self.assertEqual(string_at(result, 5), "Hello")
self.assertEqual(string_at(result, 16), "Hello, World\0\0\0\0")
self.assertEqual(string_at(result, 0), "")
def test_memset(self):
a = create_string_buffer(1000000)
result = memset(a, ord('x'), 16)
self.assertEqual(a.value, "xxxxxxxxxxxxxxxx")
self.assertEqual(string_at(result), "xxxxxxxxxxxxxxxx")
self.assertEqual(string_at(a), "xxxxxxxxxxxxxxxx")
self.assertEqual(string_at(a, 20), "xxxxxxxxxxxxxxxx\0\0\0\0")
def test_cast(self):
a = (c_ubyte * 32)(*map(ord, "abcdef"))
self.assertEqual(cast(a, c_char_p).value, "abcdef")
self.assertEqual(cast(a, POINTER(c_byte))[:7],
[97, 98, 99, 100, 101, 102, 0])
self.assertEqual(cast(a, POINTER(c_byte))[:7:],
[97, 98, 99, 100, 101, 102, 0])
self.assertEqual(cast(a, POINTER(c_byte))[6:-1:-1],
[0, 102, 101, 100, 99, 98, 97])
self.assertEqual(cast(a, POINTER(c_byte))[:7:2],
[97, 99, 101, 0])
self.assertEqual(cast(a, POINTER(c_byte))[:7:7],
[97])
def test_string_at(self):
s = string_at("foo bar")
# XXX The following may be wrong, depending on how Python
# manages string instances
self.assertEqual(2, sys.getrefcount(s))
self.assertTrue(s, "foo bar")
self.assertEqual(string_at("foo bar", 8), "foo bar\0")
self.assertEqual(string_at("foo bar", 3), "foo")
try:
create_unicode_buffer
except NameError:
pass
else:
def test_wstring_at(self):
p = create_unicode_buffer("Hello, World")
a = create_unicode_buffer(1000000)
result = memmove(a, p, len(p) * sizeof(c_wchar))
self.assertEqual(a.value, "Hello, World")
self.assertEqual(wstring_at(a), "Hello, World")
self.assertEqual(wstring_at(a, 5), "Hello")
self.assertEqual(wstring_at(a, 16), "Hello, World\0\0\0\0")
self.assertEqual(wstring_at(a, 0), "")
if __name__ == "__main__":
unittest.main()
| mit |
dparnell/rethinkdb | test/rql_test/connections/http_support/werkzeug/wrappers.py | 146 | 76379 | # -*- coding: utf-8 -*-
"""
werkzeug.wrappers
~~~~~~~~~~~~~~~~~
The wrappers are simple request and response objects which you can
subclass to do whatever you want them to do. The request object contains
the information transmitted by the client (webbrowser) and the response
object contains all the information sent back to the browser.
An important detail is that the request object is created with the WSGI
environ and will act as high-level proxy whereas the response object is an
actual WSGI application.
Like everything else in Werkzeug these objects will work correctly with
unicode data. Incoming form data parsed by the response object will be
decoded into an unicode object if possible and if it makes sense.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from functools import update_wrapper
from datetime import datetime, timedelta
from werkzeug.http import HTTP_STATUS_CODES, \
parse_accept_header, parse_cache_control_header, parse_etags, \
parse_date, generate_etag, is_resource_modified, unquote_etag, \
quote_etag, parse_set_header, parse_authorization_header, \
parse_www_authenticate_header, remove_entity_headers, \
parse_options_header, dump_options_header, http_date, \
parse_if_range_header, parse_cookie, dump_cookie, \
parse_range_header, parse_content_range_header, dump_header
from werkzeug.urls import url_decode, iri_to_uri, url_join
from werkzeug.formparser import FormDataParser, default_stream_factory
from werkzeug.utils import cached_property, environ_property, \
header_property, get_content_type
from werkzeug.wsgi import get_current_url, get_host, \
ClosingIterator, get_input_stream, get_content_length
from werkzeug.datastructures import MultiDict, CombinedMultiDict, Headers, \
EnvironHeaders, ImmutableMultiDict, ImmutableTypeConversionDict, \
ImmutableList, MIMEAccept, CharsetAccept, LanguageAccept, \
ResponseCacheControl, RequestCacheControl, CallbackDict, \
ContentRange, iter_multi_items
from werkzeug._internal import _get_environ
from werkzeug._compat import to_bytes, string_types, text_type, \
integer_types, wsgi_decoding_dance, wsgi_get_bytes, \
to_unicode, to_native, BytesIO
def _run_wsgi_app(*args):
"""This function replaces itself to ensure that the test module is not
imported unless required. DO NOT USE!
"""
global _run_wsgi_app
from werkzeug.test import run_wsgi_app as _run_wsgi_app
return _run_wsgi_app(*args)
def _warn_if_string(iterable):
"""Helper for the response objects to check if the iterable returned
to the WSGI server is not a string.
"""
if isinstance(iterable, string_types):
from warnings import warn
warn(Warning('response iterable was set to a string. This appears '
'to work but means that the server will send the '
'data to the client char, by char. This is almost '
'never intended behavior, use response.data to assign '
'strings to the response object.'), stacklevel=2)
def _assert_not_shallow(request):
if request.shallow:
raise RuntimeError('A shallow request tried to consume '
'form data. If you really want to do '
'that, set `shallow` to False.')
def _iter_encoded(iterable, charset):
for item in iterable:
if isinstance(item, text_type):
yield item.encode(charset)
else:
yield item
class BaseRequest(object):
"""Very basic request object. This does not implement advanced stuff like
entity tag parsing or cache controls. The request object is created with
the WSGI environment as first argument and will add itself to the WSGI
environment as ``'werkzeug.request'`` unless it's created with
`populate_request` set to False.
There are a couple of mixins available that add additional functionality
to the request object, there is also a class called `Request` which
subclasses `BaseRequest` and all the important mixins.
It's a good idea to create a custom subclass of the :class:`BaseRequest`
and add missing functionality either via mixins or direct implementation.
Here an example for such subclasses::
from werkzeug.wrappers import BaseRequest, ETagRequestMixin
class Request(BaseRequest, ETagRequestMixin):
pass
Request objects are **read only**. As of 0.5 modifications are not
allowed in any place. Unlike the lower level parsing functions the
request object will use immutable objects everywhere possible.
Per default the request object will assume all the text data is `utf-8`
encoded. Please refer to `the unicode chapter <unicode.txt>`_ for more
details about customizing the behavior.
Per default the request object will be added to the WSGI
environment as `werkzeug.request` to support the debugging system.
If you don't want that, set `populate_request` to `False`.
If `shallow` is `True` the environment is initialized as shallow
object around the environ. Every operation that would modify the
environ in any way (such as consuming form data) raises an exception
unless the `shallow` attribute is explicitly set to `False`. This
is useful for middlewares where you don't want to consume the form
data by accident. A shallow request is not populated to the WSGI
environment.
.. versionchanged:: 0.5
read-only mode was enforced by using immutables classes for all
data.
"""
#: the charset for the request, defaults to utf-8
charset = 'utf-8'
#: the error handling procedure for errors, defaults to 'replace'
encoding_errors = 'replace'
#: the maximum content length. This is forwarded to the form data
#: parsing function (:func:`parse_form_data`). When set and the
#: :attr:`form` or :attr:`files` attribute is accessed and the
#: parsing fails because more than the specified value is transmitted
#: a :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
#:
#: Have a look at :ref:`dealing-with-request-data` for more details.
#:
#: .. versionadded:: 0.5
max_content_length = None
#: the maximum form field size. This is forwarded to the form data
#: parsing function (:func:`parse_form_data`). When set and the
#: :attr:`form` or :attr:`files` attribute is accessed and the
#: data in memory for post data is longer than the specified value a
#: :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
#:
#: Have a look at :ref:`dealing-with-request-data` for more details.
#:
#: .. versionadded:: 0.5
max_form_memory_size = None
#: the class to use for `args` and `form`. The default is an
#: :class:`~werkzeug.datastructures.ImmutableMultiDict` which supports
#: multiple values per key. alternatively it makes sense to use an
#: :class:`~werkzeug.datastructures.ImmutableOrderedMultiDict` which
#: preserves order or a :class:`~werkzeug.datastructures.ImmutableDict`
#: which is the fastest but only remembers the last key. It is also
#: possible to use mutable structures, but this is not recommended.
#:
#: .. versionadded:: 0.6
parameter_storage_class = ImmutableMultiDict
#: the type to be used for list values from the incoming WSGI environment.
#: By default an :class:`~werkzeug.datastructures.ImmutableList` is used
#: (for example for :attr:`access_list`).
#:
#: .. versionadded:: 0.6
list_storage_class = ImmutableList
#: the type to be used for dict values from the incoming WSGI environment.
#: By default an
#: :class:`~werkzeug.datastructures.ImmutableTypeConversionDict` is used
#: (for example for :attr:`cookies`).
#:
#: .. versionadded:: 0.6
dict_storage_class = ImmutableTypeConversionDict
#: The form data parser that shoud be used. Can be replaced to customize
#: the form date parsing.
form_data_parser_class = FormDataParser
#: Optionally a list of hosts that is trusted by this request. By default
#: all hosts are trusted which means that whatever the client sends the
#: host is will be accepted. This is the recommended setup as a webserver
#: should manually be set up to not route invalid hosts to the application.
#:
#: .. versionadded:: 0.9
trusted_hosts = None
#: Indicates weather the data descriptor should be allowed to read and
#: buffer up the input stream. By default it's enabled.
#:
#: .. versionadded:: 0.9
disable_data_descriptor = False
def __init__(self, environ, populate_request=True, shallow=False):
self.environ = environ
if populate_request and not shallow:
self.environ['werkzeug.request'] = self
self.shallow = shallow
def __repr__(self):
# make sure the __repr__ even works if the request was created
# from an invalid WSGI environment. If we display the request
# in a debug session we don't want the repr to blow up.
args = []
try:
args.append("'%s'" % self.url)
args.append('[%s]' % self.method)
except Exception:
args.append('(invalid WSGI environ)')
return '<%s %s>' % (
self.__class__.__name__,
' '.join(args)
)
@property
def url_charset(self):
"""The charset that is assumed for URLs. Defaults to the value
of :attr:`charset`.
.. versionadded:: 0.6
"""
return self.charset
@classmethod
def from_values(cls, *args, **kwargs):
"""Create a new request object based on the values provided. If
environ is given missing values are filled from there. This method is
useful for small scripts when you need to simulate a request from an URL.
Do not use this method for unittesting, there is a full featured client
object (:class:`Client`) that allows to create multipart requests,
support for cookies etc.
This accepts the same options as the
:class:`~werkzeug.test.EnvironBuilder`.
.. versionchanged:: 0.5
This method now accepts the same arguments as
:class:`~werkzeug.test.EnvironBuilder`. Because of this the
`environ` parameter is now called `environ_overrides`.
:return: request object
"""
from werkzeug.test import EnvironBuilder
charset = kwargs.pop('charset', cls.charset)
kwargs['charset'] = charset
builder = EnvironBuilder(*args, **kwargs)
try:
return builder.get_request(cls)
finally:
builder.close()
@classmethod
def application(cls, f):
"""Decorate a function as responder that accepts the request as first
argument. This works like the :func:`responder` decorator but the
function is passed the request object as first argument and the
request object will be closed automatically::
@Request.application
def my_wsgi_app(request):
return Response('Hello World!')
:param f: the WSGI callable to decorate
:return: a new WSGI callable
"""
#: return a callable that wraps the -2nd argument with the request
#: and calls the function with all the arguments up to that one and
#: the request. The return value is then called with the latest
#: two arguments. This makes it possible to use this decorator for
#: both methods and standalone WSGI functions.
def application(*args):
request = cls(args[-2])
with request:
return f(*args[:-2] + (request,))(*args[-2:])
return update_wrapper(application, f)
def _get_file_stream(self, total_content_length, content_type, filename=None,
content_length=None):
"""Called to get a stream for the file upload.
This must provide a file-like class with `read()`, `readline()`
and `seek()` methods that is both writeable and readable.
The default implementation returns a temporary file if the total
content length is higher than 500KB. Because many browsers do not
provide a content length for the files only the total content
length matters.
:param total_content_length: the total content length of all the
data in the request combined. This value
is guaranteed to be there.
:param content_type: the mimetype of the uploaded file.
:param filename: the filename of the uploaded file. May be `None`.
:param content_length: the length of this file. This value is usually
not provided because webbrowsers do not provide
this value.
"""
return default_stream_factory(total_content_length, content_type,
filename, content_length)
@property
def want_form_data_parsed(self):
"""Returns True if the request method carries content. As of
Werkzeug 0.9 this will be the case if a content type is transmitted.
.. versionadded:: 0.8
"""
return bool(self.environ.get('CONTENT_TYPE'))
def make_form_data_parser(self):
"""Creates the form data parser. Instanciates the
:attr:`form_data_parser_class` with some parameters.
.. versionadded:: 0.8
"""
return self.form_data_parser_class(self._get_file_stream,
self.charset,
self.encoding_errors,
self.max_form_memory_size,
self.max_content_length,
self.parameter_storage_class)
def _load_form_data(self):
"""Method used internally to retrieve submitted data. After calling
this sets `form` and `files` on the request object to multi dicts
filled with the incoming form data. As a matter of fact the input
stream will be empty afterwards. You can also call this method to
force the parsing of the form data.
.. versionadded:: 0.8
"""
# abort early if we have already consumed the stream
if 'form' in self.__dict__:
return
_assert_not_shallow(self)
if self.want_form_data_parsed:
content_type = self.environ.get('CONTENT_TYPE', '')
content_length = get_content_length(self.environ)
mimetype, options = parse_options_header(content_type)
parser = self.make_form_data_parser()
data = parser.parse(self._get_stream_for_parsing(),
mimetype, content_length, options)
else:
data = (self.stream, self.parameter_storage_class(),
self.parameter_storage_class())
# inject the values into the instance dict so that we bypass
# our cached_property non-data descriptor.
d = self.__dict__
d['stream'], d['form'], d['files'] = data
def _get_stream_for_parsing(self):
"""This is the same as accessing :attr:`stream` with the difference
that if it finds cached data from calling :meth:`get_data` first it
will create a new stream out of the cached data.
.. versionadded:: 0.9.3
"""
cached_data = getattr(self, '_cached_data', None)
if cached_data is not None:
return BytesIO(cached_data)
return self.stream
def close(self):
"""Closes associated resources of this request object. This
closes all file handles explicitly. You can also use the request
object in a with statement with will automatically close it.
.. versionadded:: 0.9
"""
files = self.__dict__.get('files')
for key, value in iter_multi_items(files or ()):
value.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
@cached_property
def stream(self):
"""The stream to read incoming data from. Unlike :attr:`input_stream`
this stream is properly guarded that you can't accidentally read past
the length of the input. Werkzeug will internally always refer to
this stream to read data which makes it possible to wrap this
object with a stream that does filtering.
.. versionchanged:: 0.9
This stream is now always available but might be consumed by the
form parser later on. Previously the stream was only set if no
parsing happened.
"""
_assert_not_shallow(self)
return get_input_stream(self.environ)
input_stream = environ_property('wsgi.input', 'The WSGI input stream.\n'
'In general it\'s a bad idea to use this one because you can easily '
'read past the boundary. Use the :attr:`stream` instead.')
@cached_property
def args(self):
"""The parsed URL parameters. By default an
:class:`~werkzeug.datastructures.ImmutableMultiDict`
is returned from this function. This can be changed by setting
:attr:`parameter_storage_class` to a different type. This might
be necessary if the order of the form data is important.
"""
return url_decode(wsgi_get_bytes(self.environ.get('QUERY_STRING', '')),
self.url_charset, errors=self.encoding_errors,
cls=self.parameter_storage_class)
@cached_property
def data(self):
if self.disable_data_descriptor:
raise AttributeError('data descriptor is disabled')
# XXX: this should eventually be deprecated.
# We trigger form data parsing first which means that the descriptor
# will not cache the data that would otherwise be .form or .files
# data. This restores the behavior that was there in Werkzeug
# before 0.9. New code should use :meth:`get_data` explicitly as
# this will make behavior explicit.
return self.get_data(parse_form_data=True)
def get_data(self, cache=True, as_text=False, parse_form_data=False):
"""This reads the buffered incoming data from the client into one
bytestring. By default this is cached but that behavior can be
changed by setting `cache` to `False`.
Usually it's a bad idea to call this method without checking the
content length first as a client could send dozens of megabytes or more
to cause memory problems on the server.
Note that if the form data was already parsed this method will not
return anything as form data parsing does not cache the data like
this method does. To implicitly invoke form data parsing function
set `parse_form_data` to `True`. When this is done the return value
of this method will be an empty string if the form parser handles
the data. This generally is not necessary as if the whole data is
cached (which is the default) the form parser will used the cached
data to parse the form data. Please be generally aware of checking
the content length first in any case before calling this method
to avoid exhausting server memory.
If `as_text` is set to `True` the return value will be a decoded
unicode string.
.. versionadded:: 0.9
"""
rv = getattr(self, '_cached_data', None)
if rv is None:
if parse_form_data:
self._load_form_data()
rv = self.stream.read()
if cache:
self._cached_data = rv
if as_text:
rv = rv.decode(self.charset, self.encoding_errors)
return rv
@cached_property
def form(self):
"""The form parameters. By default an
:class:`~werkzeug.datastructures.ImmutableMultiDict`
is returned from this function. This can be changed by setting
:attr:`parameter_storage_class` to a different type. This might
be necessary if the order of the form data is important.
"""
self._load_form_data()
return self.form
@cached_property
def values(self):
"""Combined multi dict for :attr:`args` and :attr:`form`."""
args = []
for d in self.args, self.form:
if not isinstance(d, MultiDict):
d = MultiDict(d)
args.append(d)
return CombinedMultiDict(args)
@cached_property
def files(self):
""":class:`~werkzeug.datastructures.MultiDict` object containing
all uploaded files. Each key in :attr:`files` is the name from the
``<input type="file" name="">``. Each value in :attr:`files` is a
Werkzeug :class:`~werkzeug.datastructures.FileStorage` object.
Note that :attr:`files` will only contain data if the request method was
POST, PUT or PATCH and the ``<form>`` that posted to the request had
``enctype="multipart/form-data"``. It will be empty otherwise.
See the :class:`~werkzeug.datastructures.MultiDict` /
:class:`~werkzeug.datastructures.FileStorage` documentation for
more details about the used data structure.
"""
self._load_form_data()
return self.files
@cached_property
def cookies(self):
"""Read only access to the retrieved cookie values as dictionary."""
return parse_cookie(self.environ, self.charset,
self.encoding_errors,
cls=self.dict_storage_class)
@cached_property
def headers(self):
"""The headers from the WSGI environ as immutable
:class:`~werkzeug.datastructures.EnvironHeaders`.
"""
return EnvironHeaders(self.environ)
@cached_property
def path(self):
"""Requested path as unicode. This works a bit like the regular path
info in the WSGI environment but will always include a leading slash,
even if the URL root is accessed.
"""
raw_path = wsgi_decoding_dance(self.environ.get('PATH_INFO') or '',
self.charset, self.encoding_errors)
return '/' + raw_path.lstrip('/')
@cached_property
def full_path(self):
"""Requested path as unicode, including the query string."""
return self.path + u'?' + to_unicode(self.query_string, self.url_charset)
@cached_property
def script_root(self):
"""The root path of the script without the trailing slash."""
raw_path = wsgi_decoding_dance(self.environ.get('SCRIPT_NAME') or '',
self.charset, self.encoding_errors)
return raw_path.rstrip('/')
@cached_property
def url(self):
"""The reconstructed current URL as IRI."""
return get_current_url(self.environ,
trusted_hosts=self.trusted_hosts)
@cached_property
def base_url(self):
"""Like :attr:`url` but without the querystring"""
return get_current_url(self.environ, strip_querystring=True,
trusted_hosts=self.trusted_hosts)
@cached_property
def url_root(self):
"""The full URL root (with hostname), this is the application
root as IRI.
"""
return get_current_url(self.environ, True,
trusted_hosts=self.trusted_hosts)
@cached_property
def host_url(self):
"""Just the host with scheme as IRI."""
return get_current_url(self.environ, host_only=True,
trusted_hosts=self.trusted_hosts)
@cached_property
def host(self):
"""Just the host including the port if available."""
return get_host(self.environ, trusted_hosts=self.trusted_hosts)
query_string = environ_property('QUERY_STRING', '', read_only=True,
load_func=wsgi_get_bytes, doc=
'''The URL parameters as raw bytestring.''')
method = environ_property('REQUEST_METHOD', 'GET', read_only=True, doc=
'''The transmission method. (For example ``'GET'`` or ``'POST'``).''')
@cached_property
def access_route(self):
"""If a forwarded header exists this is a list of all ip addresses
from the client ip to the last proxy server.
"""
if 'HTTP_X_FORWARDED_FOR' in self.environ:
addr = self.environ['HTTP_X_FORWARDED_FOR'].split(',')
return self.list_storage_class([x.strip() for x in addr])
elif 'REMOTE_ADDR' in self.environ:
return self.list_storage_class([self.environ['REMOTE_ADDR']])
return self.list_storage_class()
@property
def remote_addr(self):
"""The remote address of the client."""
return self.environ.get('REMOTE_ADDR')
remote_user = environ_property('REMOTE_USER', doc='''
If the server supports user authentication, and the script is
protected, this attribute contains the username the user has
authenticated as.''')
scheme = environ_property('wsgi.url_scheme', doc='''
URL scheme (http or https).
.. versionadded:: 0.7''')
is_xhr = property(lambda x: x.environ.get('HTTP_X_REQUESTED_WITH', '')
.lower() == 'xmlhttprequest', doc='''
True if the request was triggered via a JavaScript XMLHttpRequest.
This only works with libraries that support the `X-Requested-With`
header and set it to "XMLHttpRequest". Libraries that do that are
prototype, jQuery and Mochikit and probably some more.''')
is_secure = property(lambda x: x.environ['wsgi.url_scheme'] == 'https',
doc='`True` if the request is secure.')
is_multithread = environ_property('wsgi.multithread', doc='''
boolean that is `True` if the application is served by
a multithreaded WSGI server.''')
is_multiprocess = environ_property('wsgi.multiprocess', doc='''
boolean that is `True` if the application is served by
a WSGI server that spawns multiple processes.''')
is_run_once = environ_property('wsgi.run_once', doc='''
boolean that is `True` if the application will be executed only
once in a process lifetime. This is the case for CGI for example,
but it's not guaranteed that the exeuction only happens one time.''')
class BaseResponse(object):
"""Base response class. The most important fact about a response object
is that it's a regular WSGI application. It's initialized with a couple
of response parameters (headers, body, status code etc.) and will start a
valid WSGI response when called with the environ and start response
callable.
Because it's a WSGI application itself processing usually ends before the
actual response is sent to the server. This helps debugging systems
because they can catch all the exceptions before responses are started.
Here a small example WSGI application that takes advantage of the
response objects::
from werkzeug.wrappers import BaseResponse as Response
def index():
return Response('Index page')
def application(environ, start_response):
path = environ.get('PATH_INFO') or '/'
if path == '/':
response = index()
else:
response = Response('Not Found', status=404)
return response(environ, start_response)
Like :class:`BaseRequest` which object is lacking a lot of functionality
implemented in mixins. This gives you a better control about the actual
API of your response objects, so you can create subclasses and add custom
functionality. A full featured response object is available as
:class:`Response` which implements a couple of useful mixins.
To enforce a new type of already existing responses you can use the
:meth:`force_type` method. This is useful if you're working with different
subclasses of response objects and you want to post process them with a
know interface.
Per default the request object will assume all the text data is `utf-8`
encoded. Please refer to `the unicode chapter <unicode.txt>`_ for more
details about customizing the behavior.
Response can be any kind of iterable or string. If it's a string it's
considered being an iterable with one item which is the string passed.
Headers can be a list of tuples or a
:class:`~werkzeug.datastructures.Headers` object.
Special note for `mimetype` and `content_type`: For most mime types
`mimetype` and `content_type` work the same, the difference affects
only 'text' mimetypes. If the mimetype passed with `mimetype` is a
mimetype starting with `text/`, the charset parameter of the response
object is appended to it. In contrast the `content_type` parameter is
always added as header unmodified.
.. versionchanged:: 0.5
the `direct_passthrough` parameter was added.
:param response: a string or response iterable.
:param status: a string with a status or an integer with the status code.
:param headers: a list of headers or a
:class:`~werkzeug.datastructures.Headers` object.
:param mimetype: the mimetype for the request. See notice above.
:param content_type: the content type for the request. See notice above.
:param direct_passthrough: if set to `True` :meth:`iter_encoded` is not
called before iteration which makes it
possible to pass special iterators though
unchanged (see :func:`wrap_file` for more
details.)
"""
#: the charset of the response.
charset = 'utf-8'
#: the default status if none is provided.
default_status = 200
#: the default mimetype if none is provided.
default_mimetype = 'text/plain'
#: if set to `False` accessing properties on the response object will
#: not try to consume the response iterator and convert it into a list.
#:
#: .. versionadded:: 0.6.2
#:
#: That attribute was previously called `implicit_seqence_conversion`.
#: (Notice the typo). If you did use this feature, you have to adapt
#: your code to the name change.
implicit_sequence_conversion = True
#: Should this response object correct the location header to be RFC
#: conformant? This is true by default.
#:
#: .. versionadded:: 0.8
autocorrect_location_header = True
#: Should this response object automatically set the content-length
#: header if possible? This is true by default.
#:
#: .. versionadded:: 0.8
automatically_set_content_length = True
def __init__(self, response=None, status=None, headers=None,
mimetype=None, content_type=None, direct_passthrough=False):
if isinstance(headers, Headers):
self.headers = headers
elif not headers:
self.headers = Headers()
else:
self.headers = Headers(headers)
if content_type is None:
if mimetype is None and 'content-type' not in self.headers:
mimetype = self.default_mimetype
if mimetype is not None:
mimetype = get_content_type(mimetype, self.charset)
content_type = mimetype
if content_type is not None:
self.headers['Content-Type'] = content_type
if status is None:
status = self.default_status
if isinstance(status, integer_types):
self.status_code = status
else:
self.status = status
self.direct_passthrough = direct_passthrough
self._on_close = []
# we set the response after the headers so that if a class changes
# the charset attribute, the data is set in the correct charset.
if response is None:
self.response = []
elif isinstance(response, (text_type, bytes, bytearray)):
self.set_data(response)
else:
self.response = response
def call_on_close(self, func):
"""Adds a function to the internal list of functions that should
be called as part of closing down the response. Since 0.7 this
function also returns the function that was passed so that this
can be used as a decorator.
.. versionadded:: 0.6
"""
self._on_close.append(func)
return func
def __repr__(self):
if self.is_sequence:
body_info = '%d bytes' % sum(map(len, self.iter_encoded()))
else:
body_info = self.is_streamed and 'streamed' or 'likely-streamed'
return '<%s %s [%s]>' % (
self.__class__.__name__,
body_info,
self.status
)
@classmethod
def force_type(cls, response, environ=None):
"""Enforce that the WSGI response is a response object of the current
type. Werkzeug will use the :class:`BaseResponse` internally in many
situations like the exceptions. If you call :meth:`get_response` on an
exception you will get back a regular :class:`BaseResponse` object, even
if you are using a custom subclass.
This method can enforce a given response type, and it will also
convert arbitrary WSGI callables into response objects if an environ
is provided::
# convert a Werkzeug response object into an instance of the
# MyResponseClass subclass.
response = MyResponseClass.force_type(response)
# convert any WSGI application into a response object
response = MyResponseClass.force_type(response, environ)
This is especially useful if you want to post-process responses in
the main dispatcher and use functionality provided by your subclass.
Keep in mind that this will modify response objects in place if
possible!
:param response: a response object or wsgi application.
:param environ: a WSGI environment object.
:return: a response object.
"""
if not isinstance(response, BaseResponse):
if environ is None:
raise TypeError('cannot convert WSGI application into '
'response objects without an environ')
response = BaseResponse(*_run_wsgi_app(response, environ))
response.__class__ = cls
return response
@classmethod
def from_app(cls, app, environ, buffered=False):
"""Create a new response object from an application output. This
works best if you pass it an application that returns a generator all
the time. Sometimes applications may use the `write()` callable
returned by the `start_response` function. This tries to resolve such
edge cases automatically. But if you don't get the expected output
you should set `buffered` to `True` which enforces buffering.
:param app: the WSGI application to execute.
:param environ: the WSGI environment to execute against.
:param buffered: set to `True` to enforce buffering.
:return: a response object.
"""
return cls(*_run_wsgi_app(app, environ, buffered))
def _get_status_code(self):
return self._status_code
def _set_status_code(self, code):
self._status_code = code
try:
self._status = '%d %s' % (code, HTTP_STATUS_CODES[code].upper())
except KeyError:
self._status = '%d UNKNOWN' % code
status_code = property(_get_status_code, _set_status_code,
doc='The HTTP Status code as number')
del _get_status_code, _set_status_code
def _get_status(self):
return self._status
def _set_status(self, value):
self._status = to_native(value)
try:
self._status_code = int(self._status.split(None, 1)[0])
except ValueError:
self._status_code = 0
self._status = '0 %s' % self._status
status = property(_get_status, _set_status, doc='The HTTP Status code')
del _get_status, _set_status
def get_data(self, as_text=False):
"""The string representation of the request body. Whenever you call
this property the request iterable is encoded and flattened. This
can lead to unwanted behavior if you stream big data.
This behavior can be disabled by setting
:attr:`implicit_sequence_conversion` to `False`.
If `as_text` is set to `True` the return value will be a decoded
unicode string.
.. versionadded:: 0.9
"""
self._ensure_sequence()
rv = b''.join(self.iter_encoded())
if as_text:
rv = rv.decode(self.charset)
return rv
def set_data(self, value):
"""Sets a new string as response. The value set must either by a
unicode or bytestring. If a unicode string is set it's encoded
automatically to the charset of the response (utf-8 by default).
.. versionadded:: 0.9
"""
# if an unicode string is set, it's encoded directly so that we
# can set the content length
if isinstance(value, text_type):
value = value.encode(self.charset)
else:
value = bytes(value)
self.response = [value]
if self.automatically_set_content_length:
self.headers['Content-Length'] = str(len(value))
data = property(get_data, set_data, doc='''
A descriptor that calls :meth:`get_data` and :meth:`set_data`. This
should not be used and will eventually get deprecated.
''')
def calculate_content_length(self):
"""Returns the content length if available or `None` otherwise."""
try:
self._ensure_sequence()
except RuntimeError:
return None
return sum(len(x) for x in self.response)
def _ensure_sequence(self, mutable=False):
"""This method can be called by methods that need a sequence. If
`mutable` is true, it will also ensure that the response sequence
is a standard Python list.
.. versionadded:: 0.6
"""
if self.is_sequence:
# if we need a mutable object, we ensure it's a list.
if mutable and not isinstance(self.response, list):
self.response = list(self.response)
return
if self.direct_passthrough:
raise RuntimeError('Attempted implicit sequence conversion '
'but the response object is in direct '
'passthrough mode.')
if not self.implicit_sequence_conversion:
raise RuntimeError('The response object required the iterable '
'to be a sequence, but the implicit '
'conversion was disabled. Call '
'make_sequence() yourself.')
self.make_sequence()
def make_sequence(self):
"""Converts the response iterator in a list. By default this happens
automatically if required. If `implicit_sequence_conversion` is
disabled, this method is not automatically called and some properties
might raise exceptions. This also encodes all the items.
.. versionadded:: 0.6
"""
if not self.is_sequence:
# if we consume an iterable we have to ensure that the close
# method of the iterable is called if available when we tear
# down the response
close = getattr(self.response, 'close', None)
self.response = list(self.iter_encoded())
if close is not None:
self.call_on_close(close)
def iter_encoded(self):
"""Iter the response encoded with the encoding of the response.
If the response object is invoked as WSGI application the return
value of this method is used as application iterator unless
:attr:`direct_passthrough` was activated.
"""
if __debug__:
_warn_if_string(self.response)
# Encode in a separate function so that self.response is fetched
# early. This allows us to wrap the response with the return
# value from get_app_iter or iter_encoded.
return _iter_encoded(self.response, self.charset)
def set_cookie(self, key, value='', max_age=None, expires=None,
path='/', domain=None, secure=None, httponly=False):
"""Sets a cookie. The parameters are the same as in the cookie `Morsel`
object in the Python standard library but it accepts unicode data, too.
:param key: the key (name) of the cookie to be set.
:param value: the value of the cookie.
:param max_age: should be a number of seconds, or `None` (default) if
the cookie should last only as long as the client's
browser session.
:param expires: should be a `datetime` object or UNIX timestamp.
:param domain: if you want to set a cross-domain cookie. For example,
``domain=".example.com"`` will set a cookie that is
readable by the domain ``www.example.com``,
``foo.example.com`` etc. Otherwise, a cookie will only
be readable by the domain that set it.
:param path: limits the cookie to a given path, per default it will
span the whole domain.
"""
self.headers.add('Set-Cookie', dump_cookie(key, value, max_age,
expires, path, domain, secure, httponly,
self.charset))
def delete_cookie(self, key, path='/', domain=None):
"""Delete a cookie. Fails silently if key doesn't exist.
:param key: the key (name) of the cookie to be deleted.
:param path: if the cookie that should be deleted was limited to a
path, the path has to be defined here.
:param domain: if the cookie that should be deleted was limited to a
domain, that domain has to be defined here.
"""
self.set_cookie(key, expires=0, max_age=0, path=path, domain=domain)
@property
def is_streamed(self):
"""If the response is streamed (the response is not an iterable with
a length information) this property is `True`. In this case streamed
means that there is no information about the number of iterations.
This is usually `True` if a generator is passed to the response object.
This is useful for checking before applying some sort of post
filtering that should not take place for streamed responses.
"""
try:
len(self.response)
except (TypeError, AttributeError):
return True
return False
@property
def is_sequence(self):
"""If the iterator is buffered, this property will be `True`. A
response object will consider an iterator to be buffered if the
response attribute is a list or tuple.
.. versionadded:: 0.6
"""
return isinstance(self.response, (tuple, list))
def close(self):
"""Close the wrapped response if possible. You can also use the object
in a with statement which will automatically close it.
.. versionadded:: 0.9
Can now be used in a with statement.
"""
if hasattr(self.response, 'close'):
self.response.close()
for func in self._on_close:
func()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
def freeze(self):
"""Call this method if you want to make your response object ready for
being pickled. This buffers the generator if there is one. It will
also set the `Content-Length` header to the length of the body.
.. versionchanged:: 0.6
The `Content-Length` header is now set.
"""
# we explicitly set the length to a list of the *encoded* response
# iterator. Even if the implicit sequence conversion is disabled.
self.response = list(self.iter_encoded())
self.headers['Content-Length'] = str(sum(map(len, self.response)))
def get_wsgi_headers(self, environ):
"""This is automatically called right before the response is started
and returns headers modified for the given environment. It returns a
copy of the headers from the response with some modifications applied
if necessary.
For example the location header (if present) is joined with the root
URL of the environment. Also the content length is automatically set
to zero here for certain status codes.
.. versionchanged:: 0.6
Previously that function was called `fix_headers` and modified
the response object in place. Also since 0.6, IRIs in location
and content-location headers are handled properly.
Also starting with 0.6, Werkzeug will attempt to set the content
length if it is able to figure it out on its own. This is the
case if all the strings in the response iterable are already
encoded and the iterable is buffered.
:param environ: the WSGI environment of the request.
:return: returns a new :class:`~werkzeug.datastructures.Headers`
object.
"""
headers = Headers(self.headers)
location = None
content_location = None
content_length = None
status = self.status_code
# iterate over the headers to find all values in one go. Because
# get_wsgi_headers is used each response that gives us a tiny
# speedup.
for key, value in headers:
ikey = key.lower()
if ikey == u'location':
location = value
elif ikey == u'content-location':
content_location = value
elif ikey == u'content-length':
content_length = value
# make sure the location header is an absolute URL
if location is not None:
old_location = location
if isinstance(location, text_type):
# Safe conversion is necessary here as we might redirect
# to a broken URI scheme (for instance itms-services).
location = iri_to_uri(location, safe_conversion=True)
if self.autocorrect_location_header:
current_url = get_current_url(environ, root_only=True)
if isinstance(current_url, text_type):
current_url = iri_to_uri(current_url)
location = url_join(current_url, location)
if location != old_location:
headers['Location'] = location
# make sure the content location is a URL
if content_location is not None and \
isinstance(content_location, text_type):
headers['Content-Location'] = iri_to_uri(content_location)
# remove entity headers and set content length to zero if needed.
# Also update content_length accordingly so that the automatic
# content length detection does not trigger in the following
# code.
if 100 <= status < 200 or status == 204:
headers['Content-Length'] = content_length = u'0'
elif status == 304:
remove_entity_headers(headers)
# if we can determine the content length automatically, we
# should try to do that. But only if this does not involve
# flattening the iterator or encoding of unicode strings in
# the response. We however should not do that if we have a 304
# response.
if self.automatically_set_content_length and \
self.is_sequence and content_length is None and status != 304:
try:
content_length = sum(len(to_bytes(x, 'ascii'))
for x in self.response)
except UnicodeError:
# aha, something non-bytestringy in there, too bad, we
# can't safely figure out the length of the response.
pass
else:
headers['Content-Length'] = str(content_length)
return headers
def get_app_iter(self, environ):
"""Returns the application iterator for the given environ. Depending
on the request method and the current status code the return value
might be an empty response rather than the one from the response.
If the request method is `HEAD` or the status code is in a range
where the HTTP specification requires an empty response, an empty
iterable is returned.
.. versionadded:: 0.6
:param environ: the WSGI environment of the request.
:return: a response iterable.
"""
status = self.status_code
if environ['REQUEST_METHOD'] == 'HEAD' or \
100 <= status < 200 or status in (204, 304):
iterable = ()
elif self.direct_passthrough:
if __debug__:
_warn_if_string(self.response)
return self.response
else:
iterable = self.iter_encoded()
return ClosingIterator(iterable, self.close)
def get_wsgi_response(self, environ):
"""Returns the final WSGI response as tuple. The first item in
the tuple is the application iterator, the second the status and
the third the list of headers. The response returned is created
specially for the given environment. For example if the request
method in the WSGI environment is ``'HEAD'`` the response will
be empty and only the headers and status code will be present.
.. versionadded:: 0.6
:param environ: the WSGI environment of the request.
:return: an ``(app_iter, status, headers)`` tuple.
"""
headers = self.get_wsgi_headers(environ)
app_iter = self.get_app_iter(environ)
return app_iter, self.status, headers.to_wsgi_list()
def __call__(self, environ, start_response):
"""Process this response as WSGI application.
:param environ: the WSGI environment.
:param start_response: the response callable provided by the WSGI
server.
:return: an application iterator
"""
app_iter, status, headers = self.get_wsgi_response(environ)
start_response(status, headers)
return app_iter
class AcceptMixin(object):
"""A mixin for classes with an :attr:`~BaseResponse.environ` attribute
to get all the HTTP accept headers as
:class:`~werkzeug.datastructures.Accept` objects (or subclasses
thereof).
"""
@cached_property
def accept_mimetypes(self):
"""List of mimetypes this client supports as
:class:`~werkzeug.datastructures.MIMEAccept` object.
"""
return parse_accept_header(self.environ.get('HTTP_ACCEPT'), MIMEAccept)
@cached_property
def accept_charsets(self):
"""List of charsets this client supports as
:class:`~werkzeug.datastructures.CharsetAccept` object.
"""
return parse_accept_header(self.environ.get('HTTP_ACCEPT_CHARSET'),
CharsetAccept)
@cached_property
def accept_encodings(self):
"""List of encodings this client accepts. Encodings in a HTTP term
are compression encodings such as gzip. For charsets have a look at
:attr:`accept_charset`.
"""
return parse_accept_header(self.environ.get('HTTP_ACCEPT_ENCODING'))
@cached_property
def accept_languages(self):
"""List of languages this client accepts as
:class:`~werkzeug.datastructures.LanguageAccept` object.
.. versionchanged 0.5
In previous versions this was a regular
:class:`~werkzeug.datastructures.Accept` object.
"""
return parse_accept_header(self.environ.get('HTTP_ACCEPT_LANGUAGE'),
LanguageAccept)
class ETagRequestMixin(object):
"""Add entity tag and cache descriptors to a request object or object with
a WSGI environment available as :attr:`~BaseRequest.environ`. This not
only provides access to etags but also to the cache control header.
"""
@cached_property
def cache_control(self):
"""A :class:`~werkzeug.datastructures.RequestCacheControl` object
for the incoming cache control headers.
"""
cache_control = self.environ.get('HTTP_CACHE_CONTROL')
return parse_cache_control_header(cache_control, None,
RequestCacheControl)
@cached_property
def if_match(self):
"""An object containing all the etags in the `If-Match` header.
:rtype: :class:`~werkzeug.datastructures.ETags`
"""
return parse_etags(self.environ.get('HTTP_IF_MATCH'))
@cached_property
def if_none_match(self):
"""An object containing all the etags in the `If-None-Match` header.
:rtype: :class:`~werkzeug.datastructures.ETags`
"""
return parse_etags(self.environ.get('HTTP_IF_NONE_MATCH'))
@cached_property
def if_modified_since(self):
"""The parsed `If-Modified-Since` header as datetime object."""
return parse_date(self.environ.get('HTTP_IF_MODIFIED_SINCE'))
@cached_property
def if_unmodified_since(self):
"""The parsed `If-Unmodified-Since` header as datetime object."""
return parse_date(self.environ.get('HTTP_IF_UNMODIFIED_SINCE'))
@cached_property
def if_range(self):
"""The parsed `If-Range` header.
.. versionadded:: 0.7
:rtype: :class:`~werkzeug.datastructures.IfRange`
"""
return parse_if_range_header(self.environ.get('HTTP_IF_RANGE'))
@cached_property
def range(self):
"""The parsed `Range` header.
.. versionadded:: 0.7
:rtype: :class:`~werkzeug.datastructures.Range`
"""
return parse_range_header(self.environ.get('HTTP_RANGE'))
class UserAgentMixin(object):
"""Adds a `user_agent` attribute to the request object which contains the
parsed user agent of the browser that triggered the request as a
:class:`~werkzeug.useragents.UserAgent` object.
"""
@cached_property
def user_agent(self):
"""The current user agent."""
from werkzeug.useragents import UserAgent
return UserAgent(self.environ)
class AuthorizationMixin(object):
"""Adds an :attr:`authorization` property that represents the parsed
value of the `Authorization` header as
:class:`~werkzeug.datastructures.Authorization` object.
"""
@cached_property
def authorization(self):
"""The `Authorization` object in parsed form."""
header = self.environ.get('HTTP_AUTHORIZATION')
return parse_authorization_header(header)
class StreamOnlyMixin(object):
"""If mixed in before the request object this will change the bahavior
of it to disable handling of form parsing. This disables the
:attr:`files`, :attr:`form` attributes and will just provide a
:attr:`stream` attribute that however is always available.
.. versionadded:: 0.9
"""
disable_data_descriptor = True
want_form_data_parsed = False
class ETagResponseMixin(object):
"""Adds extra functionality to a response object for etag and cache
handling. This mixin requires an object with at least a `headers`
object that implements a dict like interface similar to
:class:`~werkzeug.datastructures.Headers`.
If you want the :meth:`freeze` method to automatically add an etag, you
have to mixin this method before the response base class. The default
response class does not do that.
"""
@property
def cache_control(self):
"""The Cache-Control general-header field is used to specify
directives that MUST be obeyed by all caching mechanisms along the
request/response chain.
"""
def on_update(cache_control):
if not cache_control and 'cache-control' in self.headers:
del self.headers['cache-control']
elif cache_control:
self.headers['Cache-Control'] = cache_control.to_header()
return parse_cache_control_header(self.headers.get('cache-control'),
on_update,
ResponseCacheControl)
def make_conditional(self, request_or_environ):
"""Make the response conditional to the request. This method works
best if an etag was defined for the response already. The `add_etag`
method can be used to do that. If called without etag just the date
header is set.
This does nothing if the request method in the request or environ is
anything but GET or HEAD.
It does not remove the body of the response because that's something
the :meth:`__call__` function does for us automatically.
Returns self so that you can do ``return resp.make_conditional(req)``
but modifies the object in-place.
:param request_or_environ: a request object or WSGI environment to be
used to make the response conditional
against.
"""
environ = _get_environ(request_or_environ)
if environ['REQUEST_METHOD'] in ('GET', 'HEAD'):
# if the date is not in the headers, add it now. We however
# will not override an already existing header. Unfortunately
# this header will be overriden by many WSGI servers including
# wsgiref.
if 'date' not in self.headers:
self.headers['Date'] = http_date()
if 'content-length' not in self.headers:
length = self.calculate_content_length()
if length is not None:
self.headers['Content-Length'] = length
if not is_resource_modified(environ, self.headers.get('etag'), None,
self.headers.get('last-modified')):
self.status_code = 304
return self
def add_etag(self, overwrite=False, weak=False):
"""Add an etag for the current response if there is none yet."""
if overwrite or 'etag' not in self.headers:
self.set_etag(generate_etag(self.get_data()), weak)
def set_etag(self, etag, weak=False):
"""Set the etag, and override the old one if there was one."""
self.headers['ETag'] = quote_etag(etag, weak)
def get_etag(self):
"""Return a tuple in the form ``(etag, is_weak)``. If there is no
ETag the return value is ``(None, None)``.
"""
return unquote_etag(self.headers.get('ETag'))
def freeze(self, no_etag=False):
"""Call this method if you want to make your response object ready for
pickeling. This buffers the generator if there is one. This also
sets the etag unless `no_etag` is set to `True`.
"""
if not no_etag:
self.add_etag()
super(ETagResponseMixin, self).freeze()
accept_ranges = header_property('Accept-Ranges', doc='''
The `Accept-Ranges` header. Even though the name would indicate
that multiple values are supported, it must be one string token only.
The values ``'bytes'`` and ``'none'`` are common.
.. versionadded:: 0.7''')
def _get_content_range(self):
def on_update(rng):
if not rng:
del self.headers['content-range']
else:
self.headers['Content-Range'] = rng.to_header()
rv = parse_content_range_header(self.headers.get('content-range'),
on_update)
# always provide a content range object to make the descriptor
# more user friendly. It provides an unset() method that can be
# used to remove the header quickly.
if rv is None:
rv = ContentRange(None, None, None, on_update=on_update)
return rv
def _set_content_range(self, value):
if not value:
del self.headers['content-range']
elif isinstance(value, string_types):
self.headers['Content-Range'] = value
else:
self.headers['Content-Range'] = value.to_header()
content_range = property(_get_content_range, _set_content_range, doc='''
The `Content-Range` header as
:class:`~werkzeug.datastructures.ContentRange` object. Even if the
header is not set it wil provide such an object for easier
manipulation.
.. versionadded:: 0.7''')
del _get_content_range, _set_content_range
class ResponseStream(object):
"""A file descriptor like object used by the :class:`ResponseStreamMixin` to
represent the body of the stream. It directly pushes into the response
iterable of the response object.
"""
mode = 'wb+'
def __init__(self, response):
self.response = response
self.closed = False
def write(self, value):
if self.closed:
raise ValueError('I/O operation on closed file')
self.response._ensure_sequence(mutable=True)
self.response.response.append(value)
def writelines(self, seq):
for item in seq:
self.write(item)
def close(self):
self.closed = True
def flush(self):
if self.closed:
raise ValueError('I/O operation on closed file')
def isatty(self):
if self.closed:
raise ValueError('I/O operation on closed file')
return False
@property
def encoding(self):
return self.response.charset
class ResponseStreamMixin(object):
"""Mixin for :class:`BaseRequest` subclasses. Classes that inherit from
this mixin will automatically get a :attr:`stream` property that provides
a write-only interface to the response iterable.
"""
@cached_property
def stream(self):
"""The response iterable as write-only stream."""
return ResponseStream(self)
class CommonRequestDescriptorsMixin(object):
"""A mixin for :class:`BaseRequest` subclasses. Request objects that
mix this class in will automatically get descriptors for a couple of
HTTP headers with automatic type conversion.
.. versionadded:: 0.5
"""
content_type = environ_property('CONTENT_TYPE', doc='''
The Content-Type entity-header field indicates the media type of
the entity-body sent to the recipient or, in the case of the HEAD
method, the media type that would have been sent had the request
been a GET.''')
@cached_property
def content_length(self):
"""The Content-Length entity-header field indicates the size of the
entity-body in bytes or, in the case of the HEAD method, the size of
the entity-body that would have been sent had the request been a
GET.
"""
return get_content_length(self.environ)
content_encoding = environ_property('HTTP_CONTENT_ENCODING', doc='''
The Content-Encoding entity-header field is used as a modifier to the
media-type. When present, its value indicates what additional content
codings have been applied to the entity-body, and thus what decoding
mechanisms must be applied in order to obtain the media-type
referenced by the Content-Type header field.
.. versionadded:: 0.9''')
content_md5 = environ_property('HTTP_CONTENT_MD5', doc='''
The Content-MD5 entity-header field, as defined in RFC 1864, is an
MD5 digest of the entity-body for the purpose of providing an
end-to-end message integrity check (MIC) of the entity-body. (Note:
a MIC is good for detecting accidental modification of the
entity-body in transit, but is not proof against malicious attacks.)
.. versionadded:: 0.9''')
referrer = environ_property('HTTP_REFERER', doc='''
The Referer[sic] request-header field allows the client to specify,
for the server's benefit, the address (URI) of the resource from which
the Request-URI was obtained (the "referrer", although the header
field is misspelled).''')
date = environ_property('HTTP_DATE', None, parse_date, doc='''
The Date general-header field represents the date and time at which
the message was originated, having the same semantics as orig-date
in RFC 822.''')
max_forwards = environ_property('HTTP_MAX_FORWARDS', None, int, doc='''
The Max-Forwards request-header field provides a mechanism with the
TRACE and OPTIONS methods to limit the number of proxies or gateways
that can forward the request to the next inbound server.''')
def _parse_content_type(self):
if not hasattr(self, '_parsed_content_type'):
self._parsed_content_type = \
parse_options_header(self.environ.get('CONTENT_TYPE', ''))
@property
def mimetype(self):
"""Like :attr:`content_type` but without parameters (eg, without
charset, type etc.). For example if the content
type is ``text/html; charset=utf-8`` the mimetype would be
``'text/html'``.
"""
self._parse_content_type()
return self._parsed_content_type[0]
@property
def mimetype_params(self):
"""The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
"""
self._parse_content_type()
return self._parsed_content_type[1]
@cached_property
def pragma(self):
"""The Pragma general-header field is used to include
implementation-specific directives that might apply to any recipient
along the request/response chain. All pragma directives specify
optional behavior from the viewpoint of the protocol; however, some
systems MAY require that behavior be consistent with the directives.
"""
return parse_set_header(self.environ.get('HTTP_PRAGMA', ''))
class CommonResponseDescriptorsMixin(object):
"""A mixin for :class:`BaseResponse` subclasses. Response objects that
mix this class in will automatically get descriptors for a couple of
HTTP headers with automatic type conversion.
"""
def _get_mimetype(self):
ct = self.headers.get('content-type')
if ct:
return ct.split(';')[0].strip()
def _set_mimetype(self, value):
self.headers['Content-Type'] = get_content_type(value, self.charset)
def _get_mimetype_params(self):
def on_update(d):
self.headers['Content-Type'] = \
dump_options_header(self.mimetype, d)
d = parse_options_header(self.headers.get('content-type', ''))[1]
return CallbackDict(d, on_update)
mimetype = property(_get_mimetype, _set_mimetype, doc='''
The mimetype (content type without charset etc.)''')
mimetype_params = property(_get_mimetype_params, doc='''
The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
.. versionadded:: 0.5
''')
location = header_property('Location', doc='''
The Location response-header field is used to redirect the recipient
to a location other than the Request-URI for completion of the request
or identification of a new resource.''')
age = header_property('Age', None, parse_date, http_date, doc='''
The Age response-header field conveys the sender's estimate of the
amount of time since the response (or its revalidation) was
generated at the origin server.
Age values are non-negative decimal integers, representing time in
seconds.''')
content_type = header_property('Content-Type', doc='''
The Content-Type entity-header field indicates the media type of the
entity-body sent to the recipient or, in the case of the HEAD method,
the media type that would have been sent had the request been a GET.
''')
content_length = header_property('Content-Length', None, int, str, doc='''
The Content-Length entity-header field indicates the size of the
entity-body, in decimal number of OCTETs, sent to the recipient or,
in the case of the HEAD method, the size of the entity-body that would
have been sent had the request been a GET.''')
content_location = header_property('Content-Location', doc='''
The Content-Location entity-header field MAY be used to supply the
resource location for the entity enclosed in the message when that
entity is accessible from a location separate from the requested
resource's URI.''')
content_encoding = header_property('Content-Encoding', doc='''
The Content-Encoding entity-header field is used as a modifier to the
media-type. When present, its value indicates what additional content
codings have been applied to the entity-body, and thus what decoding
mechanisms must be applied in order to obtain the media-type
referenced by the Content-Type header field.''')
content_md5 = header_property('Content-MD5', doc='''
The Content-MD5 entity-header field, as defined in RFC 1864, is an
MD5 digest of the entity-body for the purpose of providing an
end-to-end message integrity check (MIC) of the entity-body. (Note:
a MIC is good for detecting accidental modification of the
entity-body in transit, but is not proof against malicious attacks.)
''')
date = header_property('Date', None, parse_date, http_date, doc='''
The Date general-header field represents the date and time at which
the message was originated, having the same semantics as orig-date
in RFC 822.''')
expires = header_property('Expires', None, parse_date, http_date, doc='''
The Expires entity-header field gives the date/time after which the
response is considered stale. A stale cache entry may not normally be
returned by a cache.''')
last_modified = header_property('Last-Modified', None, parse_date,
http_date, doc='''
The Last-Modified entity-header field indicates the date and time at
which the origin server believes the variant was last modified.''')
def _get_retry_after(self):
value = self.headers.get('retry-after')
if value is None:
return
elif value.isdigit():
return datetime.utcnow() + timedelta(seconds=int(value))
return parse_date(value)
def _set_retry_after(self, value):
if value is None:
if 'retry-after' in self.headers:
del self.headers['retry-after']
return
elif isinstance(value, datetime):
value = http_date(value)
else:
value = str(value)
self.headers['Retry-After'] = value
retry_after = property(_get_retry_after, _set_retry_after, doc='''
The Retry-After response-header field can be used with a 503 (Service
Unavailable) response to indicate how long the service is expected
to be unavailable to the requesting client.
Time in seconds until expiration or date.''')
def _set_property(name, doc=None):
def fget(self):
def on_update(header_set):
if not header_set and name in self.headers:
del self.headers[name]
elif header_set:
self.headers[name] = header_set.to_header()
return parse_set_header(self.headers.get(name), on_update)
def fset(self, value):
if not value:
del self.headers[name]
elif isinstance(value, string_types):
self.headers[name] = value
else:
self.headers[name] = dump_header(value)
return property(fget, fset, doc=doc)
vary = _set_property('Vary', doc='''
The Vary field value indicates the set of request-header fields that
fully determines, while the response is fresh, whether a cache is
permitted to use the response to reply to a subsequent request
without revalidation.''')
content_language = _set_property('Content-Language', doc='''
The Content-Language entity-header field describes the natural
language(s) of the intended audience for the enclosed entity. Note
that this might not be equivalent to all the languages used within
the entity-body.''')
allow = _set_property('Allow', doc='''
The Allow entity-header field lists the set of methods supported
by the resource identified by the Request-URI. The purpose of this
field is strictly to inform the recipient of valid methods
associated with the resource. An Allow header field MUST be
present in a 405 (Method Not Allowed) response.''')
del _set_property, _get_mimetype, _set_mimetype, _get_retry_after, \
_set_retry_after
class WWWAuthenticateMixin(object):
"""Adds a :attr:`www_authenticate` property to a response object."""
@property
def www_authenticate(self):
"""The `WWW-Authenticate` header in a parsed form."""
def on_update(www_auth):
if not www_auth and 'www-authenticate' in self.headers:
del self.headers['www-authenticate']
elif www_auth:
self.headers['WWW-Authenticate'] = www_auth.to_header()
header = self.headers.get('www-authenticate')
return parse_www_authenticate_header(header, on_update)
class Request(BaseRequest, AcceptMixin, ETagRequestMixin,
UserAgentMixin, AuthorizationMixin,
CommonRequestDescriptorsMixin):
"""Full featured request object implementing the following mixins:
- :class:`AcceptMixin` for accept header parsing
- :class:`ETagRequestMixin` for etag and cache control handling
- :class:`UserAgentMixin` for user agent introspection
- :class:`AuthorizationMixin` for http auth handling
- :class:`CommonRequestDescriptorsMixin` for common headers
"""
class PlainRequest(StreamOnlyMixin, Request):
"""A request object without special form parsing capabilities.
.. versionadded:: 0.9
"""
class Response(BaseResponse, ETagResponseMixin, ResponseStreamMixin,
CommonResponseDescriptorsMixin,
WWWAuthenticateMixin):
"""Full featured response object implementing the following mixins:
- :class:`ETagResponseMixin` for etag and cache control handling
- :class:`ResponseStreamMixin` to add support for the `stream` property
- :class:`CommonResponseDescriptorsMixin` for various HTTP descriptors
- :class:`WWWAuthenticateMixin` for HTTP authentication support
"""
| agpl-3.0 |
Nico60/external_chromium | chrome/common/extensions/docs/examples/apps/hello-python/httplib2/iri2uri.py | 885 | 3850 | """
iri2uri
Converts an IRI to a URI.
"""
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = []
__version__ = "1.0.0"
__license__ = "MIT"
__history__ = """
"""
import urlparse
# Convert an IRI to a URI following the rules in RFC 3987
#
# The characters we need to enocde and escape are defined in the spec:
#
# iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD
# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF
# / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD
# / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD
# / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD
# / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD
# / %xD0000-DFFFD / %xE1000-EFFFD
escape_range = [
(0xA0, 0xD7FF ),
(0xE000, 0xF8FF ),
(0xF900, 0xFDCF ),
(0xFDF0, 0xFFEF),
(0x10000, 0x1FFFD ),
(0x20000, 0x2FFFD ),
(0x30000, 0x3FFFD),
(0x40000, 0x4FFFD ),
(0x50000, 0x5FFFD ),
(0x60000, 0x6FFFD),
(0x70000, 0x7FFFD ),
(0x80000, 0x8FFFD ),
(0x90000, 0x9FFFD),
(0xA0000, 0xAFFFD ),
(0xB0000, 0xBFFFD ),
(0xC0000, 0xCFFFD),
(0xD0000, 0xDFFFD ),
(0xE1000, 0xEFFFD),
(0xF0000, 0xFFFFD ),
(0x100000, 0x10FFFD)
]
def encode(c):
retval = c
i = ord(c)
for low, high in escape_range:
if i < low:
break
if i >= low and i <= high:
retval = "".join(["%%%2X" % ord(o) for o in c.encode('utf-8')])
break
return retval
def iri2uri(uri):
"""Convert an IRI to a URI. Note that IRIs must be
passed in a unicode strings. That is, do not utf-8 encode
the IRI before passing it into the function."""
if isinstance(uri ,unicode):
(scheme, authority, path, query, fragment) = urlparse.urlsplit(uri)
authority = authority.encode('idna')
# For each character in 'ucschar' or 'iprivate'
# 1. encode as utf-8
# 2. then %-encode each octet of that utf-8
uri = urlparse.urlunsplit((scheme, authority, path, query, fragment))
uri = "".join([encode(c) for c in uri])
return uri
if __name__ == "__main__":
import unittest
class Test(unittest.TestCase):
def test_uris(self):
"""Test that URIs are invariant under the transformation."""
invariant = [
u"ftp://ftp.is.co.za/rfc/rfc1808.txt",
u"http://www.ietf.org/rfc/rfc2396.txt",
u"ldap://[2001:db8::7]/c=GB?objectClass?one",
u"mailto:John.Doe@example.com",
u"news:comp.infosystems.www.servers.unix",
u"tel:+1-816-555-1212",
u"telnet://192.0.2.16:80/",
u"urn:oasis:names:specification:docbook:dtd:xml:4.1.2" ]
for uri in invariant:
self.assertEqual(uri, iri2uri(uri))
def test_iri(self):
""" Test that the right type of escaping is done for each part of the URI."""
self.assertEqual("http://xn--o3h.com/%E2%98%84", iri2uri(u"http://\N{COMET}.com/\N{COMET}"))
self.assertEqual("http://bitworking.org/?fred=%E2%98%84", iri2uri(u"http://bitworking.org/?fred=\N{COMET}"))
self.assertEqual("http://bitworking.org/#%E2%98%84", iri2uri(u"http://bitworking.org/#\N{COMET}"))
self.assertEqual("#%E2%98%84", iri2uri(u"#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")))
self.assertNotEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode('utf-8')))
unittest.main()
| bsd-3-clause |
tedi3231/openerp | build/lib/openerp/addons/base/ir/workflow/print_instance.py | 27 | 8703 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from operator import itemgetter
import os
from openerp import report, tools
_logger = logging.getLogger(__name__)
def graph_get(cr, graph, wkf_ids, nested, workitem, processed_subflows):
import pydot
cr.execute('select * from wkf_activity where wkf_id in ('+','.join(['%s']*len(wkf_ids))+')', wkf_ids)
nodes = cr.dictfetchall()
activities = {}
actfrom = {}
actto = {}
for n in nodes:
activities[n['id']] = n
if n['subflow_id'] and nested and n['subflow_id'] not in processed_subflows:
processed_subflows.add(n['subflow_id']) # don't create multiple times the same cluster.
cr.execute('select * from wkf where id=%s', (n['subflow_id'],))
wkfinfo = cr.dictfetchone()
graph2 = pydot.Cluster('subflow'+str(n['subflow_id']), fontsize='12', label = "\"Subflow: %s\\nOSV: %s\"" % ( n['name'], wkfinfo['osv']) )
(s1,s2) = graph_get(cr, graph2, [n['subflow_id']], True, workitem, processed_subflows)
graph.add_subgraph(graph2)
actfrom[n['id']] = s2
actto[n['id']] = s1
else:
args = {}
if n['flow_start'] or n['flow_stop']:
args['style']='filled'
args['color']='lightgrey'
args['label']=n['name']
workitems = ''
if n['id'] in workitem:
workitems = '\\nx ' + str(workitem[n['id']])
args['label'] += workitems
args['color'] = "red"
args['style']='filled'
if n['subflow_id']:
args['shape'] = 'box'
if nested and n['subflow_id'] in processed_subflows:
cr.execute('select * from wkf where id=%s', (n['subflow_id'],))
wkfinfo = cr.dictfetchone()
args['label'] = \
'\"Subflow: %s\\nOSV: %s\\n(already expanded)%s\"' % \
(n['name'], wkfinfo['osv'], workitems)
args['color'] = 'green'
args['style'] ='filled'
graph.add_node(pydot.Node(n['id'], **args))
actfrom[n['id']] = (n['id'],{})
actto[n['id']] = (n['id'],{})
node_ids = tuple(map(itemgetter('id'), nodes))
cr.execute('select * from wkf_transition where act_from IN %s', (node_ids,))
transitions = cr.dictfetchall()
for t in transitions:
if not t['act_to'] in activities:
continue
args = {
'label': str(t['condition']).replace(' or ', '\\nor ')
.replace(' and ','\\nand ')
}
if t['signal']:
args['label'] += '\\n'+str(t['signal'])
args['style'] = 'bold'
if activities[t['act_from']]['split_mode']=='AND':
args['arrowtail']='box'
elif str(activities[t['act_from']]['split_mode'])=='OR ':
args['arrowtail']='inv'
if activities[t['act_to']]['join_mode']=='AND':
args['arrowhead']='crow'
activity_from = actfrom[t['act_from']][1].get(t['signal'], actfrom[t['act_from']][0])
activity_to = actto[t['act_to']][1].get(t['signal'], actto[t['act_to']][0])
graph.add_edge(pydot.Edge( str(activity_from) ,str(activity_to), fontsize='10', **args))
cr.execute('select * from wkf_activity where flow_start=True and wkf_id in ('+','.join(['%s']*len(wkf_ids))+')', wkf_ids)
start = cr.fetchone()[0]
cr.execute("select 'subflow.'||name,id from wkf_activity where flow_stop=True and wkf_id in ("+','.join(['%s']*len(wkf_ids))+')', wkf_ids)
stop = cr.fetchall()
if stop:
stop = (stop[0][1], dict(stop))
else:
stop = ("stop",{})
return (start, {}), stop
def graph_instance_get(cr, graph, inst_id, nested=False):
cr.execute('select wkf_id from wkf_instance where id=%s', (inst_id,))
inst = cr.fetchall()
def workitem_get(instance):
cr.execute('select act_id,count(*) from wkf_workitem where inst_id=%s group by act_id', (instance,))
workitems = dict(cr.fetchall())
cr.execute('select subflow_id from wkf_workitem where inst_id=%s', (instance,))
for (subflow_id,) in cr.fetchall():
workitems.update(workitem_get(subflow_id))
return workitems
processed_subflows = set()
graph_get(cr, graph, [x[0] for x in inst], nested, workitem_get(inst_id), processed_subflows)
#
# TODO: pas clean: concurrent !!!
#
class report_graph_instance(object):
def __init__(self, cr, uid, ids, data):
try:
import pydot
except Exception,e:
_logger.warning(
'Import Error for pydot, you will not be able to render workflows.\n'
'Consider Installing PyDot or dependencies: http://dkbza.org/pydot.html.')
raise e
self.done = False
try:
cr.execute('select * from wkf where osv=%s limit 1',
(data['model'],))
wkfinfo = cr.dictfetchone()
if not wkfinfo:
ps_string = '''%PS-Adobe-3.0
/inch {72 mul} def
/Times-Roman findfont 50 scalefont setfont
1.5 inch 15 inch moveto
(No workflow defined) show
showpage'''
else:
cr.execute('select i.id from wkf_instance i left join wkf w on (i.wkf_id=w.id) where res_id=%s and osv=%s',(data['id'],data['model']))
inst_ids = cr.fetchall()
if not inst_ids:
ps_string = '''%PS-Adobe-3.0
/inch {72 mul} def
/Times-Roman findfont 50 scalefont setfont
1.5 inch 15 inch moveto
(No workflow instance defined) show
showpage'''
else:
graph = pydot.Dot(graph_name=data['model'].replace('.','_'),
fontsize='16',
label="""\\\n\\nWorkflow: %s\\n OSV: %s""" % (wkfinfo['name'],wkfinfo['osv']),
size='7.3, 10.1', center='1', ratio='auto', rotate='0', rankdir='TB',
)
for inst_id in inst_ids:
inst_id = inst_id[0]
graph_instance_get(cr, graph, inst_id, data.get('nested', False))
ps_string = graph.create(prog='dot', format='ps')
except Exception:
_logger.exception('Exception in call:')
# string is in PS, like the success message would have been
ps_string = '''%PS-Adobe-3.0
/inch {72 mul} def
/Times-Roman findfont 50 scalefont setfont
1.5 inch 15 inch moveto
(No workflow available) show
showpage'''
if os.name == "nt":
prog = 'ps2pdf.bat'
else:
prog = 'ps2pdf'
args = (prog, '-', '-')
input, output = tools.exec_command_pipe(*args)
input.write(ps_string)
input.close()
self.result = output.read()
output.close()
self.done = True
def is_done(self):
return self.done
def get(self):
if self.done:
return self.result
else:
return None
class report_graph(report.interface.report_int):
def __init__(self, name, table):
report.interface.report_int.__init__(self, name)
self.table = table
def result(self):
if self.obj.is_done():
return True, self.obj.get(), 'pdf'
else:
return False, False, False
def create(self, cr, uid, ids, data, context=None):
self.obj = report_graph_instance(cr, uid, ids, data)
return self.obj.get(), 'pdf'
report_graph('report.workflow.instance.graph', 'ir.workflow')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Kongsea/tensorflow | tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py | 13 | 31886 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A decoder that performs beam search."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.contrib.seq2seq.python.ops import beam_search_ops
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.layers import base as layers_base
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.util import nest
__all__ = [
"BeamSearchDecoderOutput",
"BeamSearchDecoderState",
"BeamSearchDecoder",
"FinalBeamSearchDecoderOutput",
"tile_batch",
]
class BeamSearchDecoderState(
collections.namedtuple("BeamSearchDecoderState", ("cell_state", "log_probs",
"finished", "lengths"))):
pass
class BeamSearchDecoderOutput(
collections.namedtuple("BeamSearchDecoderOutput",
("scores", "predicted_ids", "parent_ids"))):
pass
class FinalBeamSearchDecoderOutput(
collections.namedtuple("FinalBeamDecoderOutput",
["predicted_ids", "beam_search_decoder_output"])):
"""Final outputs returned by the beam search after all decoding is finished.
Args:
predicted_ids: The final prediction. A tensor of shape
`[T, batch_size, beam_width]`.
beam_search_decoder_output: An instance of `BeamSearchDecoderOutput` that
describes the state of the beam search.
"""
pass
def _tile_batch(t, multiplier):
"""Core single-tensor implementation of tile_batch."""
t = ops.convert_to_tensor(t, name="t")
shape_t = array_ops.shape(t)
if t.shape.ndims is None or t.shape.ndims < 1:
raise ValueError("t must have statically known rank")
tiling = [1] * (t.shape.ndims + 1)
tiling[1] = multiplier
tiled_static_batch_size = (
t.shape[0].value * multiplier if t.shape[0].value is not None else None)
tiled = array_ops.tile(array_ops.expand_dims(t, 1), tiling)
tiled = array_ops.reshape(
tiled, array_ops.concat(([shape_t[0] * multiplier], shape_t[1:]), 0))
tiled.set_shape(
tensor_shape.TensorShape(
[tiled_static_batch_size]).concatenate(t.shape[1:]))
return tiled
def tile_batch(t, multiplier, name=None):
"""Tile the batch dimension of a (possibly nested structure of) tensor(s) t.
For each tensor t in a (possibly nested structure) of tensors,
this function takes a tensor t shaped `[batch_size, s0, s1, ...]` composed of
minibatch entries `t[0], ..., t[batch_size - 1]` and tiles it to have a shape
`[batch_size * multiplier, s0, s1, ...]` composed of minibatch entries
`t[0], t[0], ..., t[1], t[1], ...` where each minibatch entry is repeated
`multiplier` times.
Args:
t: `Tensor` shaped `[batch_size, ...]`.
multiplier: Python int.
name: Name scope for any created operations.
Returns:
A (possibly nested structure of) `Tensor` shaped
`[batch_size * multiplier, ...]`.
Raises:
ValueError: if tensor(s) `t` do not have a statically known rank or
the rank is < 1.
"""
flat_t = nest.flatten(t)
with ops.name_scope(name, "tile_batch", flat_t + [multiplier]):
return nest.map_structure(lambda t_: _tile_batch(t_, multiplier), t)
def _check_maybe(t):
if isinstance(t, tensor_array_ops.TensorArray):
raise TypeError(
"TensorArray state is not supported by BeamSearchDecoder: %s" % t.name)
if t.shape.ndims is None:
raise ValueError(
"Expected tensor (%s) to have known rank, but ndims == None." % t)
class BeamSearchDecoder(decoder.Decoder):
"""BeamSearch sampling decoder.
**NOTE** If you are using the `BeamSearchDecoder` with a cell wrapped in
`AttentionWrapper`, then you must ensure that:
- The encoder output has been tiled to `beam_width` via
@{tf.contrib.seq2seq.tile_batch} (NOT `tf.tile`).
- The `batch_size` argument passed to the `zero_state` method of this
wrapper is equal to `true_batch_size * beam_width`.
- The initial state created with `zero_state` above contains a
`cell_state` value containing properly tiled final state from the
encoder.
An example:
```
tiled_encoder_outputs = tf.contrib.seq2seq.tile_batch(
encoder_outputs, multiplier=beam_width)
tiled_encoder_final_state = tf.conrib.seq2seq.tile_batch(
encoder_final_state, multiplier=beam_width)
tiled_sequence_length = tf.contrib.seq2seq.tile_batch(
sequence_length, multiplier=beam_width)
attention_mechanism = MyFavoriteAttentionMechanism(
num_units=attention_depth,
memory=tiled_inputs,
memory_sequence_length=tiled_sequence_length)
attention_cell = AttentionWrapper(cell, attention_mechanism, ...)
decoder_initial_state = attention_cell.zero_state(
dtype, batch_size=true_batch_size * beam_width)
decoder_initial_state = decoder_initial_state.clone(
cell_state=tiled_encoder_final_state)
```
"""
def __init__(self,
cell,
embedding,
start_tokens,
end_token,
initial_state,
beam_width,
output_layer=None,
length_penalty_weight=0.0):
"""Initialize the BeamSearchDecoder.
Args:
cell: An `RNNCell` instance.
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
initial_state: A (possibly nested tuple of...) tensors and TensorArrays.
beam_width: Python integer, the number of beams.
output_layer: (Optional) An instance of `tf.layers.Layer`, i.e.,
`tf.layers.Dense`. Optional layer to apply to the RNN output prior
to storing the result or sampling.
length_penalty_weight: Float weight to penalize length. Disabled with 0.0.
Raises:
TypeError: if `cell` is not an instance of `RNNCell`,
or `output_layer` is not an instance of `tf.layers.Layer`.
ValueError: If `start_tokens` is not a vector or
`end_token` is not a scalar.
"""
if not rnn_cell_impl._like_rnncell(cell): # pylint: disable=protected-access
raise TypeError("cell must be an RNNCell, received: %s" % type(cell))
if (output_layer is not None
and not isinstance(output_layer, layers_base.Layer)):
raise TypeError(
"output_layer must be a Layer, received: %s" % type(output_layer))
self._cell = cell
self._output_layer = output_layer
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._start_tokens = ops.convert_to_tensor(
start_tokens, dtype=dtypes.int32, name="start_tokens")
if self._start_tokens.get_shape().ndims != 1:
raise ValueError("start_tokens must be a vector")
self._end_token = ops.convert_to_tensor(
end_token, dtype=dtypes.int32, name="end_token")
if self._end_token.get_shape().ndims != 0:
raise ValueError("end_token must be a scalar")
self._batch_size = array_ops.size(start_tokens)
self._beam_width = beam_width
self._length_penalty_weight = length_penalty_weight
self._initial_cell_state = nest.map_structure(
self._maybe_split_batch_beams,
initial_state, self._cell.state_size)
self._start_tokens = array_ops.tile(
array_ops.expand_dims(self._start_tokens, 1), [1, self._beam_width])
self._start_inputs = self._embedding_fn(self._start_tokens)
self._finished = array_ops.zeros(
[self._batch_size, self._beam_width], dtype=dtypes.bool)
@property
def batch_size(self):
return self._batch_size
def _rnn_output_size(self):
size = self._cell.output_size
if self._output_layer is None:
return size
else:
# To use layer's compute_output_shape, we need to convert the
# RNNCell's output_size entries into shapes with an unknown
# batch size. We then pass this through the layer's
# compute_output_shape and read off all but the first (batch)
# dimensions to get the output size of the rnn with the layer
# applied to the top.
output_shape_with_unknown_batch = nest.map_structure(
lambda s: tensor_shape.TensorShape([None]).concatenate(s),
size)
layer_output_shape = self._output_layer._compute_output_shape( # pylint: disable=protected-access
output_shape_with_unknown_batch)
return nest.map_structure(lambda s: s[1:], layer_output_shape)
@property
def tracks_own_finished(self):
"""The BeamSearchDecoder shuffles its beams and their finished state.
For this reason, it conflicts with the `dynamic_decode` function's
tracking of finished states. Setting this property to true avoids
early stopping of decoding due to mismanagement of the finished state
in `dynamic_decode`.
Returns:
`True`.
"""
return True
@property
def output_size(self):
# Return the cell output and the id
return BeamSearchDecoderOutput(
scores=tensor_shape.TensorShape([self._beam_width]),
predicted_ids=tensor_shape.TensorShape([self._beam_width]),
parent_ids=tensor_shape.TensorShape([self._beam_width]))
@property
def output_dtype(self):
# Assume the dtype of the cell is the output_size structure
# containing the input_state's first component's dtype.
# Return that structure and int32 (the id)
dtype = nest.flatten(self._initial_cell_state)[0].dtype
return BeamSearchDecoderOutput(
scores=nest.map_structure(lambda _: dtype, self._rnn_output_size()),
predicted_ids=dtypes.int32,
parent_ids=dtypes.int32)
def initialize(self, name=None):
"""Initialize the decoder.
Args:
name: Name scope for any created operations.
Returns:
`(finished, start_inputs, initial_state)`.
"""
finished, start_inputs = self._finished, self._start_inputs
initial_state = BeamSearchDecoderState(
cell_state=self._initial_cell_state,
log_probs=array_ops.zeros(
[self._batch_size, self._beam_width],
dtype=nest.flatten(self._initial_cell_state)[0].dtype),
finished=finished,
lengths=array_ops.zeros(
[self._batch_size, self._beam_width], dtype=dtypes.int64))
return (finished, start_inputs, initial_state)
def finalize(self, outputs, final_state, sequence_lengths):
"""Finalize and return the predicted_ids.
Args:
outputs: An instance of BeamSearchDecoderOutput.
final_state: An instance of BeamSearchDecoderState. Passed through to the
output.
sequence_lengths: An `int64` tensor shaped `[batch_size, beam_width]`.
The sequence lengths determined for each beam during decode.
**NOTE** These are ignored; the updated sequence lengths are stored in
`final_state.lengths`.
Returns:
outputs: An instance of `FinalBeamSearchDecoderOutput` where the
predicted_ids are the result of calling _gather_tree.
final_state: The same input instance of `BeamSearchDecoderState`.
"""
del sequence_lengths
# Get max_sequence_length across all beams for each batch.
max_sequence_lengths = math_ops.to_int32(
math_ops.reduce_max(final_state.lengths, axis=1))
predicted_ids = beam_search_ops.gather_tree(
outputs.predicted_ids,
outputs.parent_ids,
max_sequence_lengths=max_sequence_lengths,
end_token=self._end_token)
outputs = FinalBeamSearchDecoderOutput(
beam_search_decoder_output=outputs, predicted_ids=predicted_ids)
return outputs, final_state
def _merge_batch_beams(self, t, s=None):
"""Merges the tensor from a batch of beams into a batch by beams.
More exactly, t is a tensor of dimension [batch_size, beam_width, s]. We
reshape this into [batch_size*beam_width, s]
Args:
t: Tensor of dimension [batch_size, beam_width, s]
s: (Possibly known) depth shape.
Returns:
A reshaped version of t with dimension [batch_size * beam_width, s].
"""
if isinstance(s, ops.Tensor):
s = tensor_shape.as_shape(tensor_util.constant_value(s))
else:
s = tensor_shape.TensorShape(s)
t_shape = array_ops.shape(t)
static_batch_size = tensor_util.constant_value(self._batch_size)
batch_size_beam_width = (
None if static_batch_size is None
else static_batch_size * self._beam_width)
reshaped_t = array_ops.reshape(
t, array_ops.concat(
([self._batch_size * self._beam_width], t_shape[2:]), 0))
reshaped_t.set_shape(
(tensor_shape.TensorShape([batch_size_beam_width]).concatenate(s)))
return reshaped_t
def _split_batch_beams(self, t, s=None):
"""Splits the tensor from a batch by beams into a batch of beams.
More exactly, t is a tensor of dimension [batch_size*beam_width, s]. We
reshape this into [batch_size, beam_width, s]
Args:
t: Tensor of dimension [batch_size*beam_width, s].
s: (Possibly known) depth shape.
Returns:
A reshaped version of t with dimension [batch_size, beam_width, s].
Raises:
ValueError: If, after reshaping, the new tensor is not shaped
`[batch_size, beam_width, s]` (assuming batch_size and beam_width
are known statically).
"""
if isinstance(s, ops.Tensor):
s = tensor_shape.TensorShape(tensor_util.constant_value(s))
else:
s = tensor_shape.TensorShape(s)
t_shape = array_ops.shape(t)
reshaped_t = array_ops.reshape(
t, array_ops.concat(
([self._batch_size, self._beam_width], t_shape[1:]), 0))
static_batch_size = tensor_util.constant_value(self._batch_size)
expected_reshaped_shape = tensor_shape.TensorShape(
[static_batch_size, self._beam_width]).concatenate(s)
if not reshaped_t.shape.is_compatible_with(expected_reshaped_shape):
raise ValueError("Unexpected behavior when reshaping between beam width "
"and batch size. The reshaped tensor has shape: %s. "
"We expected it to have shape "
"(batch_size, beam_width, depth) == %s. Perhaps you "
"forgot to create a zero_state with "
"batch_size=encoder_batch_size * beam_width?"
% (reshaped_t.shape, expected_reshaped_shape))
reshaped_t.set_shape(expected_reshaped_shape)
return reshaped_t
def _maybe_split_batch_beams(self, t, s):
"""Maybe splits the tensor from a batch by beams into a batch of beams.
We do this so that we can use nest and not run into problems with shapes.
Args:
t: `Tensor`, either scalar or shaped `[batch_size * beam_width] + s`.
s: `Tensor`, Python int, or `TensorShape`.
Returns:
If `t` is a matrix or higher order tensor, then the return value is
`t` reshaped to `[batch_size, beam_width] + s`. Otherwise `t` is
returned unchanged.
Raises:
TypeError: If `t` is an instance of `TensorArray`.
ValueError: If the rank of `t` is not statically known.
"""
_check_maybe(t)
if t.shape.ndims >= 1:
return self._split_batch_beams(t, s)
else:
return t
def _maybe_merge_batch_beams(self, t, s):
"""Splits the tensor from a batch by beams into a batch of beams.
More exactly, `t` is a tensor of dimension `[batch_size * beam_width] + s`,
then we reshape it to `[batch_size, beam_width] + s`.
Args:
t: `Tensor` of dimension `[batch_size * beam_width] + s`.
s: `Tensor`, Python int, or `TensorShape`.
Returns:
A reshaped version of t with shape `[batch_size, beam_width] + s`.
Raises:
TypeError: If `t` is an instance of `TensorArray`.
ValueError: If the rank of `t` is not statically known.
"""
_check_maybe(t)
if t.shape.ndims >= 2:
return self._merge_batch_beams(t, s)
else:
return t
def step(self, time, inputs, state, name=None):
"""Perform a decoding step.
Args:
time: scalar `int32` tensor.
inputs: A (structure of) input tensors.
state: A (structure of) state tensors and TensorArrays.
name: Name scope for any created operations.
Returns:
`(outputs, next_state, next_inputs, finished)`.
"""
batch_size = self._batch_size
beam_width = self._beam_width
end_token = self._end_token
length_penalty_weight = self._length_penalty_weight
with ops.name_scope(name, "BeamSearchDecoderStep", (time, inputs, state)):
cell_state = state.cell_state
inputs = nest.map_structure(
lambda inp: self._merge_batch_beams(inp, s=inp.shape[2:]), inputs)
cell_state = nest.map_structure(
self._maybe_merge_batch_beams,
cell_state, self._cell.state_size)
cell_outputs, next_cell_state = self._cell(inputs, cell_state)
cell_outputs = nest.map_structure(
lambda out: self._split_batch_beams(out, out.shape[1:]), cell_outputs)
next_cell_state = nest.map_structure(
self._maybe_split_batch_beams,
next_cell_state, self._cell.state_size)
if self._output_layer is not None:
cell_outputs = self._output_layer(cell_outputs)
beam_search_output, beam_search_state = _beam_search_step(
time=time,
logits=cell_outputs,
next_cell_state=next_cell_state,
beam_state=state,
batch_size=batch_size,
beam_width=beam_width,
end_token=end_token,
length_penalty_weight=length_penalty_weight)
finished = beam_search_state.finished
sample_ids = beam_search_output.predicted_ids
next_inputs = control_flow_ops.cond(
math_ops.reduce_all(finished), lambda: self._start_inputs,
lambda: self._embedding_fn(sample_ids))
return (beam_search_output, beam_search_state, next_inputs, finished)
def _beam_search_step(time, logits, next_cell_state, beam_state, batch_size,
beam_width, end_token, length_penalty_weight):
"""Performs a single step of Beam Search Decoding.
Args:
time: Beam search time step, should start at 0. At time 0 we assume
that all beams are equal and consider only the first beam for
continuations.
logits: Logits at the current time step. A tensor of shape
`[batch_size, beam_width, vocab_size]`
next_cell_state: The next state from the cell, e.g. an instance of
AttentionWrapperState if the cell is attentional.
beam_state: Current state of the beam search.
An instance of `BeamSearchDecoderState`.
batch_size: The batch size for this input.
beam_width: Python int. The size of the beams.
end_token: The int32 end token.
length_penalty_weight: Float weight to penalize length. Disabled with 0.0.
Returns:
A new beam state.
"""
static_batch_size = tensor_util.constant_value(batch_size)
# Calculate the current lengths of the predictions
prediction_lengths = beam_state.lengths
previously_finished = beam_state.finished
# Calculate the total log probs for the new hypotheses
# Final Shape: [batch_size, beam_width, vocab_size]
step_log_probs = nn_ops.log_softmax(logits)
step_log_probs = _mask_probs(step_log_probs, end_token, previously_finished)
total_probs = array_ops.expand_dims(beam_state.log_probs, 2) + step_log_probs
# Calculate the continuation lengths by adding to all continuing beams.
vocab_size = logits.shape[-1].value or array_ops.shape(logits)[-1]
lengths_to_add = array_ops.one_hot(
indices=array_ops.fill([batch_size, beam_width], end_token),
depth=vocab_size,
on_value=np.int64(0), off_value=np.int64(1),
dtype=dtypes.int64)
add_mask = math_ops.to_int64(math_ops.logical_not(previously_finished))
lengths_to_add *= array_ops.expand_dims(add_mask, 2)
new_prediction_lengths = (
lengths_to_add + array_ops.expand_dims(prediction_lengths, 2))
# Calculate the scores for each beam
scores = _get_scores(
log_probs=total_probs,
sequence_lengths=new_prediction_lengths,
length_penalty_weight=length_penalty_weight)
time = ops.convert_to_tensor(time, name="time")
# During the first time step we only consider the initial beam
scores_shape = array_ops.shape(scores)
scores_flat = control_flow_ops.cond(
time > 0,
lambda: array_ops.reshape(scores, [batch_size, -1]),
lambda: scores[:, 0])
num_available_beam = control_flow_ops.cond(
time > 0, lambda: math_ops.reduce_prod(scores_shape[1:]),
lambda: math_ops.reduce_prod(scores_shape[2:]))
# Pick the next beams according to the specified successors function
next_beam_size = math_ops.minimum(
ops.convert_to_tensor(beam_width, dtype=dtypes.int32, name="beam_width"),
num_available_beam)
next_beam_scores, word_indices = nn_ops.top_k(scores_flat, k=next_beam_size)
next_beam_scores.set_shape([static_batch_size, beam_width])
word_indices.set_shape([static_batch_size, beam_width])
# Pick out the probs, beam_ids, and states according to the chosen predictions
next_beam_probs = _tensor_gather_helper(
gather_indices=word_indices,
gather_from=total_probs,
batch_size=batch_size,
range_size=beam_width * vocab_size,
gather_shape=[-1],
name="next_beam_probs")
# Note: just doing the following
# math_ops.to_int32(word_indices % vocab_size,
# name="next_beam_word_ids")
# would be a lot cleaner but for reasons unclear, that hides the results of
# the op which prevents capturing it with tfdbg debug ops.
raw_next_word_ids = math_ops.mod(word_indices, vocab_size,
name="next_beam_word_ids")
next_word_ids = math_ops.to_int32(raw_next_word_ids)
next_beam_ids = math_ops.to_int32(word_indices / vocab_size,
name="next_beam_parent_ids")
# Append new ids to current predictions
previously_finished = _tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=previously_finished,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[-1])
next_finished = math_ops.logical_or(previously_finished,
math_ops.equal(next_word_ids, end_token),
name="next_beam_finished")
# Calculate the length of the next predictions.
# 1. Finished beams remain unchanged.
# 2. Beams that are now finished (EOS predicted) have their length
# increased by 1.
# 3. Beams that are not yet finished have their length increased by 1.
lengths_to_add = math_ops.to_int64(math_ops.logical_not(previously_finished))
next_prediction_len = _tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=beam_state.lengths,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[-1])
next_prediction_len += lengths_to_add
# Pick out the cell_states according to the next_beam_ids. We use a
# different gather_shape here because the cell_state tensors, i.e.
# the tensors that would be gathered from, all have dimension
# greater than two and we need to preserve those dimensions.
# pylint: disable=g-long-lambda
next_cell_state = nest.map_structure(
lambda gather_from: _maybe_tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=gather_from,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[batch_size * beam_width, -1]),
next_cell_state)
# pylint: enable=g-long-lambda
next_state = BeamSearchDecoderState(
cell_state=next_cell_state,
log_probs=next_beam_probs,
lengths=next_prediction_len,
finished=next_finished)
output = BeamSearchDecoderOutput(
scores=next_beam_scores,
predicted_ids=next_word_ids,
parent_ids=next_beam_ids)
return output, next_state
def _get_scores(log_probs, sequence_lengths, length_penalty_weight):
"""Calculates scores for beam search hypotheses.
Args:
log_probs: The log probabilities with shape
`[batch_size, beam_width, vocab_size]`.
sequence_lengths: The array of sequence lengths.
length_penalty_weight: Float weight to penalize length. Disabled with 0.0.
Returns:
The scores normalized by the length_penalty.
"""
length_penality_ = _length_penalty(
sequence_lengths=sequence_lengths, penalty_factor=length_penalty_weight)
return log_probs / length_penality_
def _length_penalty(sequence_lengths, penalty_factor):
"""Calculates the length penalty. See https://arxiv.org/abs/1609.08144.
Returns the length penalty tensor:
```
[(5+sequence_lengths)/6]**penalty_factor
```
where all operations are performed element-wise.
Args:
sequence_lengths: `Tensor`, the sequence lengths of each hypotheses.
penalty_factor: A scalar that weights the length penalty.
Returns:
If the penalty is `0`, returns the scalar `1.0`. Otherwise returns
the length penalty factor, a tensor with the same shape as
`sequence_lengths`.
"""
penalty_factor = ops.convert_to_tensor(penalty_factor, name="penalty_factor")
penalty_factor.set_shape(()) # penalty should be a scalar.
static_penalty = tensor_util.constant_value(penalty_factor)
if static_penalty is not None and static_penalty == 0:
return 1.0
return math_ops.div((5. + math_ops.to_float(sequence_lengths))
**penalty_factor, (5. + 1.)**penalty_factor)
def _mask_probs(probs, eos_token, finished):
"""Masks log probabilities.
The result is that finished beams allocate all probability mass to eos and
unfinished beams remain unchanged.
Args:
probs: Log probabiltiies of shape `[batch_size, beam_width, vocab_size]`
eos_token: An int32 id corresponding to the EOS token to allocate
probability to.
finished: A boolean tensor of shape `[batch_size, beam_width]` that
specifies which elements in the beam are finished already.
Returns:
A tensor of shape `[batch_size, beam_width, vocab_size]`, where unfinished
beams stay unchanged and finished beams are replaced with a tensor with all
probability on the EOS token.
"""
vocab_size = array_ops.shape(probs)[2]
# All finished examples are replaced with a vector that has all
# probability on EOS
finished_row = array_ops.one_hot(
eos_token,
vocab_size,
dtype=probs.dtype,
on_value=0.,
off_value=probs.dtype.min)
finished_probs = array_ops.tile(
array_ops.reshape(finished_row, [1, 1, -1]),
array_ops.concat([array_ops.shape(finished), [1]], 0))
finished_mask = array_ops.tile(
array_ops.expand_dims(finished, 2), [1, 1, vocab_size])
return array_ops.where(finished_mask, finished_probs, probs)
def _maybe_tensor_gather_helper(gather_indices, gather_from, batch_size,
range_size, gather_shape):
"""Maybe applies _tensor_gather_helper.
This applies _tensor_gather_helper when the gather_from dims is at least as
big as the length of gather_shape. This is used in conjunction with nest so
that we don't apply _tensor_gather_helper to inapplicable values like scalars.
Args:
gather_indices: The tensor indices that we use to gather.
gather_from: The tensor that we are gathering from.
batch_size: The batch size.
range_size: The number of values in each range. Likely equal to beam_width.
gather_shape: What we should reshape gather_from to in order to preserve the
correct values. An example is when gather_from is the attention from an
AttentionWrapperState with shape [batch_size, beam_width, attention_size].
There, we want to preserve the attention_size elements, so gather_shape is
[batch_size * beam_width, -1]. Then, upon reshape, we still have the
attention_size as desired.
Returns:
output: Gathered tensor of shape tf.shape(gather_from)[:1+len(gather_shape)]
or the original tensor if its dimensions are too small.
"""
_check_maybe(gather_from)
if gather_from.shape.ndims >= len(gather_shape):
return _tensor_gather_helper(
gather_indices=gather_indices,
gather_from=gather_from,
batch_size=batch_size,
range_size=range_size,
gather_shape=gather_shape)
else:
return gather_from
def _tensor_gather_helper(gather_indices, gather_from, batch_size,
range_size, gather_shape, name=None):
"""Helper for gathering the right indices from the tensor.
This works by reshaping gather_from to gather_shape (e.g. [-1]) and then
gathering from that according to the gather_indices, which are offset by
the right amounts in order to preserve the batch order.
Args:
gather_indices: The tensor indices that we use to gather.
gather_from: The tensor that we are gathering from.
batch_size: The input batch size.
range_size: The number of values in each range. Likely equal to beam_width.
gather_shape: What we should reshape gather_from to in order to preserve the
correct values. An example is when gather_from is the attention from an
AttentionWrapperState with shape [batch_size, beam_width, attention_size].
There, we want to preserve the attention_size elements, so gather_shape is
[batch_size * beam_width, -1]. Then, upon reshape, we still have the
attention_size as desired.
name: The tensor name for set of operations. By default this is
'tensor_gather_helper'. The final output is named 'output'.
Returns:
output: Gathered tensor of shape tf.shape(gather_from)[:1+len(gather_shape)]
"""
with ops.name_scope(name, "tensor_gather_helper"):
range_ = array_ops.expand_dims(math_ops.range(batch_size) * range_size, 1)
gather_indices = array_ops.reshape(gather_indices + range_, [-1])
output = array_ops.gather(
array_ops.reshape(gather_from, gather_shape), gather_indices)
final_shape = array_ops.shape(gather_from)[:1 + len(gather_shape)]
static_batch_size = tensor_util.constant_value(batch_size)
final_static_shape = (tensor_shape.TensorShape([static_batch_size])
.concatenate(
gather_from.shape[1:1 + len(gather_shape)]))
output = array_ops.reshape(output, final_shape, name="output")
output.set_shape(final_static_shape)
return output
| apache-2.0 |
alisonjo2786/old-python-lessons-fork | section_02_(strings)/string_replace.py | 7 | 2273 | # String methods: string.replace()
# string.replace() is similar to the find -> replace feature in Word, Excel, or other office-y type programs
song = "eat, eat, eat, apples and bananas"
# Let's start here:
print "I like to ... {0}".format(song)
# string.replace() lets us replace all instances of one string with another.
print "I like to ... {0}".format(song.replace("a","o")) # We're replacing all of the lowercase *a*s in song with *o*s
# Let's take a look at the syntax.
# We've seen the {0} syntax; that's the placeholder that string.format() uses to insert a variable into the string that comes before the dot in .format()
# The 0 corresponds to the first variable in the list inside the parentheses (remember that Python starts counting at zero)
# What's the variable we're going to insert at {0}? It's song.replace("a", "o")
# Python will evaluate song.replace("a", "o") and place the result inside of the {0}
# How song.replace("a", "o") works is: .replace() will replace every "a" it finds in song with an "o"
# The way I remember it is .replace() will perform its action on what comes before the dot (which in song.replace("a", "o"), is song)
print "But note that the original song itself is unchanged: {0}".format(song)
print "string.replace() is case-sensitive."
print song.replace("Eat", "chop") # This won't replace anything!
print song
print song.replace("eat", "chop")
print song # the original is unchanged
# If you want your changes to stick, you'll need to assign your variable song a new value
song = song.replace("eat", "chop")
# What we're saying here is essentially:
# song is now equal to the new value of song.replace("eat", "chop")
# If you have lots of replaces to do on a string, you *could* do it like this:
song = song.replace("apples", "mangos")
song = song.replace(" and", ", pears, and")
song = song.replace("bananas", "kiwis")
print song
# Or, you could chain lots of replaces together -- remember that what gets replaced is what comes before the dot!
# In other words, replaces will occur in left-to-right order
song = "eat, eat, eat, apples and bananas" # setting it back to the original
song = song.replace("eat", "chop").replace("apples", "mangos").replace(" and", ", pears, and").replace("bananas", "kiwis")
print song | mit |
naresh21/synergetics-edx-platform | lms/djangoapps/ccx/tests/test_tasks.py | 59 | 4579 | """
Tests for celery tasks defined in tasks module
"""
from mock_django import mock_signal_receiver
from lms.djangoapps.ccx.tests.factories import CcxFactory
from student.roles import CourseCcxCoachRole
from student.tests.factories import (
AdminFactory,
)
from xmodule.modulestore.django import SignalHandler
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import (
ModuleStoreTestCase,
TEST_DATA_SPLIT_MODULESTORE)
from openedx.core.djangoapps.content.course_structures.models import CourseStructure
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from ccx_keys.locator import CCXLocator
from lms.djangoapps.ccx.tasks import send_ccx_course_published
class TestSendCCXCoursePublished(ModuleStoreTestCase):
"""unit tests for the send ccx course published task
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
def setUp(self):
"""
Set up tests
"""
super(TestSendCCXCoursePublished, self).setUp()
course = self.course = CourseFactory.create(org="edX", course="999", display_name="Run 666")
course2 = self.course2 = CourseFactory.create(org="edX", course="999a", display_name="Run 667")
coach = AdminFactory.create()
role = CourseCcxCoachRole(course.id)
role.add_users(coach)
self.ccx = CcxFactory(course_id=course.id, coach=coach)
self.ccx2 = CcxFactory(course_id=course.id, coach=coach)
self.ccx3 = CcxFactory(course_id=course.id, coach=coach)
self.ccx4 = CcxFactory(course_id=course2.id, coach=coach)
def call_fut(self, course_key):
"""Call the function under test
"""
send_ccx_course_published(unicode(course_key))
def test_signal_not_sent_for_ccx(self):
"""Check that course published signal is not sent when course key is for a ccx
"""
course_key = CCXLocator.from_course_locator(self.course.id, self.ccx.id)
with mock_signal_receiver(SignalHandler.course_published) as receiver:
self.call_fut(course_key)
self.assertEqual(receiver.call_count, 0)
def test_signal_sent_for_ccx(self):
"""Check that course published signal is sent when course key is not for a ccx.
We have 4 ccx's, but only 3 are derived from the course id used here, so call
count must be 3 to confirm that all derived courses and no more got the signal.
"""
with mock_signal_receiver(SignalHandler.course_published) as receiver:
self.call_fut(self.course.id)
self.assertEqual(receiver.call_count, 3)
def test_course_structure_generated(self):
"""Check that course structure is generated after course published signal is sent
"""
ccx_structure = {
u"blocks": {
u"ccx-block-v1:edX+999+Run_666+ccx@1+type@course+block@course": {
u"block_type": u"course",
u"graded": False,
u"format": None,
u"usage_key": u"ccx-block-v1:edX+999+Run_666+ccx@1+type@course+block@course",
u"children": [
],
u"display_name": u"Run 666"
}
},
u"root": u"ccx-block-v1:edX+999+Run_666+ccx@1+type@course+block@course"
}
course_key = CCXLocator.from_course_locator(self.course.id, self.ccx.id)
structure = CourseStructure.objects.filter(course_id=course_key)
# no structure exists before signal is called
self.assertEqual(len(structure), 0)
with mock_signal_receiver(SignalHandler.course_published) as receiver:
self.call_fut(self.course.id)
self.assertEqual(receiver.call_count, 3)
structure = CourseStructure.objects.get(course_id=course_key)
self.assertEqual(structure.structure, ccx_structure)
def test_course_overview_cached(self):
"""Check that course overview is cached after course published signal is sent
"""
course_key = CCXLocator.from_course_locator(self.course.id, self.ccx.id)
overview = CourseOverview.objects.filter(id=course_key)
self.assertEqual(len(overview), 0)
with mock_signal_receiver(SignalHandler.course_published) as receiver:
self.call_fut(self.course.id)
self.assertEqual(receiver.call_count, 3)
overview = CourseOverview.objects.filter(id=course_key)
self.assertEqual(len(overview), 1)
| agpl-3.0 |
ChiKaLiO/lge-kernel-gproj | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.