repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
zaxtax/scikit-learn | sklearn/decomposition/tests/test_online_lda.py | 5 | 13165 | import numpy as np
from scipy.linalg import block_diag
from scipy.sparse import csr_matrix
from scipy.special import psi
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition._online_lda import (_dirichlet_expectation_1d,
_dirichlet_expectation_2d)
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.exceptions import NotFittedError
from sklearn.externals.six.moves import xrange
def _build_sparse_mtx():
# Create 3 topics and each topic has 3 distinct words.
# (Each word only belongs to a single topic.)
n_topics = 3
block = n_topics * np.ones((3, 3))
blocks = [block] * n_topics
X = block_diag(*blocks)
X = csr_matrix(X)
return (n_topics, X)
def test_lda_default_prior_params():
# default prior parameter should be `1 / topics`
# and verbose params should not affect result
n_topics, X = _build_sparse_mtx()
prior = 1. / n_topics
lda_1 = LatentDirichletAllocation(n_topics=n_topics, doc_topic_prior=prior,
topic_word_prior=prior, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, random_state=0)
topic_distr_1 = lda_1.fit_transform(X)
topic_distr_2 = lda_2.fit_transform(X)
assert_almost_equal(topic_distr_1, topic_distr_2)
def test_lda_fit_batch():
# Test LDA batch learning_offset (`fit` method with 'batch' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, evaluate_every=1,
learning_method='batch', random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_fit_online():
# Test LDA online learning (`fit` method with 'online' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
evaluate_every=1, learning_method='online',
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_partial_fit():
# Test LDA online learning (`partial_fit` method)
# (same as test_lda_batch)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
total_samples=100, random_state=rng)
for i in xrange(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_dense_input():
# Test LDA with dense input.
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_method='batch',
random_state=rng)
lda.fit(X.toarray())
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_transform():
# Test LDA transform.
# Transform result cannot be negative
rng = np.random.RandomState(0)
X = rng.randint(5, size=(20, 10))
n_topics = 3
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
X_trans = lda.fit_transform(X)
assert_true((X_trans > 0.0).any())
def test_lda_fit_transform():
# Test LDA fit_transform & transform
# fit_transform and transform result should be the same
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
X = rng.randint(10, size=(50, 20))
lda = LatentDirichletAllocation(n_topics=5, learning_method=method,
random_state=rng)
X_fit = lda.fit_transform(X)
X_trans = lda.transform(X)
assert_array_almost_equal(X_fit, X_trans, 4)
def test_lda_partial_fit_dim_mismatch():
# test `n_features` mismatch in `partial_fit`
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_col = rng.randint(6, 10)
X_1 = np.random.randint(4, size=(10, n_col))
X_2 = np.random.randint(4, size=(10, n_col + 1))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.partial_fit(X_1)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
def test_invalid_params():
# test `_check_params` method
X = np.ones((5, 10))
invalid_models = (
('n_topics', LatentDirichletAllocation(n_topics=0)),
('learning_method',
LatentDirichletAllocation(learning_method='unknown')),
('total_samples', LatentDirichletAllocation(total_samples=0)),
('learning_offset', LatentDirichletAllocation(learning_offset=-1)),
)
for param, model in invalid_models:
regex = r"^Invalid %r parameter" % param
assert_raises_regexp(ValueError, regex, model.fit, X)
def test_lda_negative_input():
# test pass dense matrix with sparse negative input.
X = -np.ones((5, 10))
lda = LatentDirichletAllocation()
regex = r"^Negative values in data passed"
assert_raises_regexp(ValueError, regex, lda.fit, X)
def test_lda_no_component_error():
# test `transform` and `perplexity` before `fit`
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
lda = LatentDirichletAllocation()
regex = r"^no 'components_' attribute"
assert_raises_regexp(NotFittedError, regex, lda.transform, X)
assert_raises_regexp(NotFittedError, regex, lda.perplexity, X)
def test_lda_transform_mismatch():
# test `n_features` mismatch in partial_fit and transform
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
X_2 = rng.randint(4, size=(10, 8))
n_topics = rng.randint(3, 6)
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
lda.partial_fit(X)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
@if_safe_multiprocessing_with_blas
def test_lda_multi_jobs():
n_topics, X = _build_sparse_mtx()
# Test LDA batch training with multi CPU
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_method=method,
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
@if_safe_multiprocessing_with_blas
def test_lda_partial_fit_multi_jobs():
# Test LDA online training with multi CPU
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_offset=5., total_samples=30,
random_state=rng)
for i in range(2):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_preplexity_mismatch():
# test dimension mismatch in `perplexity` method
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_samples = rng.randint(6, 10)
X = np.random.randint(4, size=(n_samples, 10))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.fit(X)
# invalid samples
invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_topics))
assert_raises_regexp(ValueError, r'Number of samples', lda.perplexity, X,
invalid_n_samples)
# invalid topic number
invalid_n_topics = rng.randint(4, size=(n_samples, n_topics + 1))
assert_raises_regexp(ValueError, r'Number of topics', lda.perplexity, X,
invalid_n_topics)
def test_lda_perplexity():
# Test LDA perplexity for batch training
# perplexity should be lower after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
distr_1 = lda_1.fit_transform(X)
perp_1 = lda_1.perplexity(X, distr_1, sub_sampling=False)
distr_2 = lda_2.fit_transform(X)
perp_2 = lda_2.perplexity(X, distr_2, sub_sampling=False)
assert_greater_equal(perp_1, perp_2)
perp_1_subsampling = lda_1.perplexity(X, distr_1, sub_sampling=True)
perp_2_subsampling = lda_2.perplexity(X, distr_2, sub_sampling=True)
assert_greater_equal(perp_1_subsampling, perp_2_subsampling)
def test_lda_score():
# Test LDA score for batch training
# score should be higher after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
lda_1.fit_transform(X)
score_1 = lda_1.score(X)
lda_2.fit_transform(X)
score_2 = lda_2.score(X)
assert_greater_equal(score_2, score_1)
def test_perplexity_input_format():
# Test LDA perplexity for sparse and dense input
# score should be the same for both dense and sparse input
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method='batch',
total_samples=100, random_state=0)
distr = lda.fit_transform(X)
perp_1 = lda.perplexity(X)
perp_2 = lda.perplexity(X, distr)
perp_3 = lda.perplexity(X.toarray(), distr)
assert_almost_equal(perp_1, perp_2)
assert_almost_equal(perp_1, perp_3)
def test_lda_score_perplexity():
# Test the relationship between LDA score and perplexity
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
random_state=0)
distr = lda.fit_transform(X)
perplexity_1 = lda.perplexity(X, distr, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1. * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
def test_lda_empty_docs():
"""Test LDA on empty document (all-zero rows)."""
Z = np.zeros((5, 4))
for X in [Z, csr_matrix(Z)]:
lda = LatentDirichletAllocation(max_iter=750).fit(X)
assert_almost_equal(lda.components_.sum(axis=0),
np.ones(lda.components_.shape[1]))
def test_dirichlet_expectation():
"""Test Cython version of Dirichlet expectation calculation."""
x = np.logspace(-100, 10, 10000)
expectation = np.empty_like(x)
_dirichlet_expectation_1d(x, 0, expectation)
assert_allclose(expectation, np.exp(psi(x) - psi(np.sum(x))),
atol=1e-19)
x = x.reshape(100, 100)
assert_allclose(_dirichlet_expectation_2d(x),
psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),
rtol=1e-11, atol=3e-9)
| bsd-3-clause |
Karel-van-de-Plassche/bokeh | examples/plotting/file/graphs.py | 9 | 2346 | from bokeh.models import StaticLayoutProvider, ColumnDataSource, HoverTool, TapTool
from bokeh.models.graphs import NodesAndLinkedEdges
from bokeh.palettes import Set3_12
from bokeh.plotting import figure, show, output_file
from bokeh.sampledata.us_states import data as us_states
from bokeh.sampledata.airport_routes import airports, routes
import numpy as np
output_file("graphs.html")
airports.set_index("AirportID", inplace=True)
airports.index.rename("index", inplace=True)
routes.rename(columns={"SourceID": "start", "DestinationID": "end"}, inplace=True)
lats, lons = [], []
for k, v in us_states.items():
lats.append(np.array(v['lats']))
lons.append(np.array(v['lons']))
source = ColumnDataSource(data=dict(lats=lats, lons=lons))
graph_layout = dict(zip(airports.index.astype(str), zip(airports.Longitude, airports.Latitude)))
layout_provider = StaticLayoutProvider(graph_layout=graph_layout)
fig = figure(x_range=(-180, -60), y_range=(15,75),
x_axis_label="Longitude", y_axis_label="Latitude",
plot_width=800, plot_height=600, background_fill_color=Set3_12[4],
background_fill_alpha=0.2, tools='box_zoom,reset')
fig.patches(xs="lons", ys="lats", line_color='grey', line_width=1.0,
fill_color=Set3_12[10], source=source)
r = fig.graph(airports, routes, layout_provider,
## node style props
node_fill_color=Set3_12[3], node_fill_alpha=0.4, node_line_color="black", node_line_alpha=0.3,
node_nonselection_fill_color=Set3_12[3], node_nonselection_fill_alpha=0.2, node_nonselection_line_alpha=0.1,
node_selection_fill_color=Set3_12[3], node_selection_fill_alpha=0.8, node_selection_line_alpha=0.3,
## edge style props
edge_line_color="black", edge_line_alpha=0.04,
edge_hover_line_alpha=0.6, edge_hover_line_color=Set3_12[1],
edge_nonselection_line_color="black", edge_nonselection_line_alpha=0.01,
edge_selection_line_alpha=0.6, edge_selection_line_color=Set3_12[1],
## graph policies
inspection_policy=NodesAndLinkedEdges(), selection_policy=NodesAndLinkedEdges())
hover = HoverTool(tooltips=[("Airport", "@Name (@IATA), @City ")], renderers=[r])
tap = TapTool(renderers=[r])
fig.add_tools(hover, tap)
show(fig)
| bsd-3-clause |
brianshumate/uniweb | surfbot/validator/admin.py | 1 | 1649 | """
Validator project
admin.py
Copyright (c) 2009 Brian Shumate
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from surfbot.validator.models import Website
from django.contrib import admin
class WebsiteAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['org']}),
(None, {'fields': ['rooturl']}),
# ('Validation', {'fields': ['checkok', 'htmlval', 'cssval', 'accessval', 'linksval'], 'classes': ['collapse']}),
('Validation', {'fields': ['checkok', 'htmlval', 'cssval', 'accessval', 'linksval']}),
('Metrics', {'fields': ['lastcheck', 'nextcheck','checktotal']}),
]
admin.site.register(Website, WebsiteAdmin) | bsd-2-clause |
credativ/pulp | bindings/test/unit/test_upload.py | 17 | 1554 | import unittest
import mock
from pulp.bindings.upload import UploadAPI
class TestUploadAPI(unittest.TestCase):
def setUp(self):
self.api = UploadAPI(mock.MagicMock())
def test_import_upload_with_override_config(self):
ret = self.api.import_upload('upload_id', 'repo_id', 'unit_type_id', unit_key={},
unit_metadata={}, override_config={'mask-id': 'test-mask-id'})
expected_body = {
'upload_id': 'upload_id',
'unit_type_id': 'unit_type_id',
'unit_key': {},
'unit_metadata': {},
'override_config': {'mask-id': 'test-mask-id'},
}
self.api.server.POST.assert_called_once_with('/v2/repositories/%s/actions/import_upload/'
% 'repo_id', expected_body)
self.assertEqual(ret, self.api.server.POST.return_value)
def test_import_upload(self):
ret = self.api.import_upload('upload_id', 'repo_id', 'unit_type_id', unit_key={},
unit_metadata={})
expected_body = {
'upload_id': 'upload_id',
'unit_type_id': 'unit_type_id',
'unit_key': {},
'unit_metadata': {},
'override_config': None,
}
self.api.server.POST.assert_called_once_with('/v2/repositories/%s/actions/import_upload/'
% 'repo_id', expected_body)
self.assertEqual(ret, self.api.server.POST.return_value)
| gpl-2.0 |
denys-duchier/django | tests/auth_tests/models/custom_permissions.py | 21 | 1253 | """
The CustomPermissionsUser users email as the identifier, but uses the normal
Django permissions model. This allows us to check that the PermissionsMixin
includes everything that is needed to interact with the ModelBackend.
"""
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
from django.db import models
from .custom_user import CustomUserManager, RemoveGroupsAndPermissions
class CustomPermissionsUserManager(CustomUserManager):
def create_superuser(self, email, password, date_of_birth):
u = self.create_user(email, password=password, date_of_birth=date_of_birth)
u.is_superuser = True
u.save(using=self._db)
return u
with RemoveGroupsAndPermissions():
class CustomPermissionsUser(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(verbose_name='email address', max_length=255, unique=True)
date_of_birth = models.DateField()
custom_objects = CustomPermissionsUserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['date_of_birth']
def get_full_name(self):
return self.email
def get_short_name(self):
return self.email
def __str__(self):
return self.email
| bsd-3-clause |
sgml/popcorn_maker | popcorn_gallery/popcorn/migrations/0025_auto__del_field_template_template.py | 3 | 11522 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Template.template'
db.delete_column('popcorn_template', 'template')
def backwards(self, orm):
# Adding field 'Template.template'
db.add_column('popcorn_template', 'template',
self.gf('django.db.models.fields.files.FileField')(default='', max_length=100),
keep_default=False)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'popcorn.project': {
'Meta': {'ordering': "('is_featured', '-modified')", 'object_name': 'Project'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['popcorn.ProjectCategory']", 'symmetrical': 'False', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_forkable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_shared': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'metadata': ('django.db.models.fields.TextField', [], {}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['popcorn.Project']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['popcorn.Template']", 'null': 'True', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36', 'blank': 'True'}),
'views_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'votes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'popcorn.projectcategory': {
'Meta': {'object_name': 'ProjectCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'False'})
},
'popcorn.projectcategorymembership': {
'Meta': {'unique_together': "(('user', 'project_category'),)", 'object_name': 'ProjectCategoryMembership'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'project_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['popcorn.ProjectCategory']"}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.Profile']"})
},
'popcorn.template': {
'Meta': {'ordering': "('-is_featured', 'name')", 'object_name': 'Template'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['popcorn.TemplateCategory']", 'symmetrical': 'False', 'blank': 'True'}),
'config': ('django.db.models.fields.TextField', [], {'default': '{}', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'metadata': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'template_content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'views_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'votes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'popcorn.templatecategory': {
'Meta': {'object_name': 'TemplateCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'False'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
},
'users.profile': {
'Meta': {'object_name': 'Profile'},
'bio': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['popcorn.ProjectCategory']", 'through': "orm['popcorn.ProjectCategoryMembership']", 'symmetrical': 'False'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['popcorn'] | bsd-3-clause |
MingyuanXie/CopyNet | emolga/models/pointers.py | 1 | 36210 | __author__ = 'jiataogu'
import theano
import logging
import copy
from emolga.layers.recurrent import *
from emolga.layers.ntm_minibatch import Controller
from emolga.layers.embeddings import *
from emolga.layers.attention import *
from emolga.layers.highwayNet import *
from emolga.models.encdec import *
from core import Model
# theano.config.exception_verbosity = 'high'
logger = logging #.getLogger(__name__)
RNN = GRU # change it here for other RNN models.
class PtrDecoder(Model):
"""
RNN-Decoder for Pointer Networks
"""
def __init__(self,
config, rng, prefix='ptrdec'):
super(PtrDecoder, self).__init__()
self.config = config
self.rng = rng
self.prefix = prefix
"""
Create all elements of the Decoder's computational graph.
"""
# create Initialization Layers
logger.info("{}_create initialization layers.".format(self.prefix))
self.Initializer = Dense(
config['ptr_contxt_dim'],
config['ptr_hidden_dim'],
activation='tanh',
name="{}_init".format(self.prefix)
)
# create RNN cells
logger.info("{}_create RNN cells.".format(self.prefix))
self.RNN = RNN(
self.config['ptr_embedd_dim'],
self.config['ptr_hidden_dim'],
self.config['ptr_contxt_dim'],
name="{}_cell".format(self.prefix)
)
self._add(self.Initializer)
self._add(self.RNN)
# create readout layers
logger.info("_create Attention-Readout layers")
self.attender = Attention(
self.config['ptr_hidden_dim'],
self.config['ptr_source_dim'],
self.config['ptr_middle_dim'],
name='{}_attender'.format(self.prefix)
)
self._add(self.attender)
@staticmethod
def grab_prob(probs, X):
assert probs.ndim == 3
batch_size = probs.shape[0]
max_len = probs.shape[1]
vocab_size = probs.shape[2]
probs = probs.reshape((batch_size * max_len, vocab_size))
return probs[T.arange(batch_size * max_len), X.flatten(1)].reshape(X.shape) # advanced indexing
@staticmethod
def grab_source(source, target):
# source : (nb_samples, source_num, source_dim)
# target : (nb_samples, target_num)
assert source.ndim == 3
batch_size = source.shape[0]
source_num = source.shape[1]
source_dim = source.shape[2]
target_num = target.shape[1]
source_flt = source.reshape((batch_size * source_num, source_dim))
target_idx = (target + (T.arange(batch_size) * source_num)[:, None]).reshape((batch_size * target_num,))
value = source_flt[target_idx].reshape((batch_size, target_num, source_dim))
return value
def build_decoder(self,
inputs,
source, target,
smask=None, tmask=None, context=None):
"""
Build the Pointer Network Decoder Computational Graph
"""
# inputs : (nb_samples, source_num, ptr_embedd_dim)
# source : (nb_samples, source_num, source_dim)
# smask : (nb_samples, source_num)
# target : (nb_samples, target_num)
# tmask : (nb_samples, target_num)
# context: (nb_sample, context_dim)
# initialized hidden state.
assert context is not None
Init_h = self.Initializer(context)
# target is the source inputs.
X = self.grab_source(inputs, target) # (nb_samples, target_num, source_dim)
X = T.concatenate([alloc_zeros_matrix(X.shape[0], 1, X.shape[2]),
X[:, :-1, :]], axis=1)
X = X.dimshuffle((1, 0, 2))
# tmask = tmask.dimshuffle((1, 0))
# eat by recurrent net
def _recurrence(x, prev_h, c, s, s_mask):
# RNN read-out
x_out = self.RNN(x, mask=None, C=c, init_h=prev_h, one_step=True)
s_out = self.attender(x_out, s, s_mask, return_log=True)
return x_out, s_out
outputs, _ = theano.scan(
_recurrence,
sequences=[X],
outputs_info=[Init_h, None],
non_sequences=[context, source, smask]
)
log_prob_dist = outputs[-1].dimshuffle((1, 0, 2))
# tmask = tmask.dimshuffle((1, 0))
log_prob = T.sum(self.grab_prob(log_prob_dist, target) * tmask, axis=1)
return log_prob
"""
Sample one step
"""
def _step_sample(self, prev_idx, prev_stat,
context, inputs, source, smask):
X = T.switch(
prev_idx[:, None] < 0,
alloc_zeros_matrix(prev_idx.shape[0], self.config['ptr_embedd_dim']),
self.grab_source(inputs, prev_idx[:, None])
)
# one step RNN
X_out = self.RNN(X, C=context, init_h=prev_stat, one_step=True)
next_stat = X_out
# compute the attention read-out
next_prob = self.attender(X_out, source, smask)
next_sample = self.rng.multinomial(pvals=next_prob).argmax(1)
return next_prob, next_sample, next_stat
def build_sampler(self):
"""
Build a sampler which only steps once.
"""
logger.info("build sampler ...")
if self.config['sample_stoch'] and self.config['sample_argmax']:
logger.info("use argmax search!")
elif self.config['sample_stoch'] and (not self.config['sample_argmax']):
logger.info("use stochastic sampling!")
elif self.config['sample_beam'] > 1:
logger.info("use beam search! (beam_size={})".format(self.config['sample_beam']))
# initial state of our Decoder.
context = T.matrix() # theano variable.
init_h = self.Initializer(context)
logger.info('compile the function: get_init_state')
self.get_init_state \
= theano.function([context], init_h, name='get_init_state')
logger.info('done.')
# sampler: 1 x 1
prev_idx = T.vector('prev_idx', dtype='int64')
prev_stat = T.matrix('prev_state', dtype='float32')
inputs = T.tensor3()
source = T.tensor3()
smask = T.imatrix()
next_prob, next_sample, next_stat \
= self._step_sample(prev_idx, prev_stat, context,
inputs, source, smask)
# next word probability
logger.info('compile the function: sample_next')
inputs = [prev_idx, prev_stat, context, inputs, source, smask]
outputs = [next_prob, next_sample, next_stat]
self.sample_next = theano.function(inputs, outputs, name='sample_next')
logger.info('done')
pass
"""
Generate samples, either with stochastic sampling or beam-search!
"""
def get_sample(self, context, inputs, source, smask,
k=1, maxlen=30, stochastic=True, argmax=False, fixlen=False):
# beam size
if k > 1:
assert not stochastic, 'Beam search does not support stochastic sampling!!'
# fix length cannot use beam search
# if fixlen:
# assert k == 1
# prepare for searching
sample = []
score = []
if stochastic:
score = 0
live_k = 1
dead_k = 0
hyp_samples = [[]] * live_k
hyp_scores = np.zeros(live_k).astype(theano.config.floatX)
hyp_states = []
# get initial state of decoder RNN with context
next_state = self.get_init_state(context)
next_word = -1 * np.ones((1,)).astype('int64') # indicator for the first target word (bos target)
# Start searching!
for ii in xrange(maxlen):
# print next_word
ctx = np.tile(context, [live_k, 1])
ipt = np.tile(inputs, [live_k, 1, 1])
sor = np.tile(source, [live_k, 1, 1])
smk = np.tile(smask, [live_k, 1])
next_prob, next_word, next_state \
= self.sample_next(next_word, next_state,
ctx, ipt, sor, smk) # wtf.
if stochastic:
# using stochastic sampling (or greedy sampling.)
if argmax:
nw = next_prob[0].argmax()
next_word[0] = nw
else:
nw = next_word[0]
sample.append(nw)
score += next_prob[0, nw]
if (not fixlen) and (nw == 0): # sample reached the end
break
else:
# using beam-search
# we can only computed in a flatten way!
cand_scores = hyp_scores[:, None] - np.log(next_prob)
cand_flat = cand_scores.flatten()
ranks_flat = cand_flat.argsort()[:(k - dead_k)]
# fetch the best results.
voc_size = next_prob.shape[1]
trans_index = ranks_flat / voc_size
word_index = ranks_flat % voc_size
costs = cand_flat[ranks_flat]
# get the new hyp samples
new_hyp_samples = []
new_hyp_scores = np.zeros(k - dead_k).astype(theano.config.floatX)
new_hyp_states = []
for idx, [ti, wi] in enumerate(zip(trans_index, word_index)):
new_hyp_samples.append(hyp_samples[ti] + [wi])
new_hyp_scores[idx] = copy.copy(costs[idx])
new_hyp_states.append(copy.copy(next_state[ti]))
# check the finished samples
new_live_k = 0
hyp_samples = []
hyp_scores = []
hyp_states = []
for idx in xrange(len(new_hyp_samples)):
if (new_hyp_states[idx][-1] == 0) and (not fixlen):
sample.append(new_hyp_samples[idx])
score.append(new_hyp_scores[idx])
dead_k += 1
else:
new_live_k += 1
hyp_samples.append(new_hyp_samples[idx])
hyp_scores.append(new_hyp_scores[idx])
hyp_states.append(new_hyp_states[idx])
hyp_scores = np.array(hyp_scores)
live_k = new_live_k
if new_live_k < 1:
break
if dead_k >= k:
break
next_word = np.array([w[-1] for w in hyp_samples])
next_state = np.array(hyp_states)
pass
pass
# end.
if not stochastic:
# dump every remaining one
if live_k > 0:
for idx in xrange(live_k):
sample.append(hyp_samples[idx])
score.append(hyp_scores[idx])
return sample, score
class PointerDecoder(Model):
"""
RNN-Decoder for Pointer Networks [version 2]
Pointer to 2 place once a time.
"""
def __init__(self,
config, rng, prefix='ptrdec'):
super(PointerDecoder, self).__init__()
self.config = config
self.rng = rng
self.prefix = prefix
"""
Create all elements of the Decoder's computational graph.
"""
# create Initialization Layers
logger.info("{}_create initialization layers.".format(self.prefix))
self.Initializer = Dense(
config['ptr_contxt_dim'],
config['ptr_hidden_dim'],
activation='tanh',
name="{}_init".format(self.prefix)
)
# create RNN cells
logger.info("{}_create RNN cells.".format(self.prefix))
self.RNN = RNN(
self.config['ptr_embedd_dim'],
self.config['ptr_hidden_dim'],
self.config['ptr_contxt_dim'],
name="{}_cell".format(self.prefix)
)
self._add(self.Initializer)
self._add(self.RNN)
# create 2 attention heads
logger.info("_create Attention-Readout layers")
self.att_head = Attention(
self.config['ptr_hidden_dim'],
self.config['ptr_source_dim'],
self.config['ptr_middle_dim'],
name='{}_head_attender'.format(self.prefix)
)
self.att_tail = Attention(
self.config['ptr_hidden_dim'],
self.config['ptr_source_dim'],
self.config['ptr_middle_dim'],
name='{}_tail_attender'.format(self.prefix)
)
self._add(self.att_head)
self._add(self.att_tail)
@staticmethod
def grab_prob(probs, X):
assert probs.ndim == 3
batch_size = probs.shape[0]
max_len = probs.shape[1]
vocab_size = probs.shape[2]
probs = probs.reshape((batch_size * max_len, vocab_size))
return probs[T.arange(batch_size * max_len), X.flatten(1)].reshape(X.shape) # advanced indexing
@staticmethod
def grab_source(source, target):
# source : (nb_samples, source_num, source_dim)
# target : (nb_samples, target_num)
assert source.ndim == 3
batch_size = source.shape[0]
source_num = source.shape[1]
source_dim = source.shape[2]
target_num = target.shape[1]
source_flt = source.reshape((batch_size * source_num, source_dim))
target_idx = (target + (T.arange(batch_size) * source_num)[:, None]).reshape((batch_size * target_num,))
value = source_flt[target_idx].reshape((batch_size, target_num, source_dim))
return value
def build_decoder(self,
inputs,
source, target,
smask=None, tmask=None, context=None):
"""
Build the Pointer Network Decoder Computational Graph
"""
# inputs : (nb_samples, source_num, ptr_embedd_dim)
# source : (nb_samples, source_num, source_dim)
# smask : (nb_samples, source_num)
# target : (nb_samples, target_num)
# tmask : (nb_samples, target_num)
# context: (nb_sample, context_dim)
# initialized hidden state.
assert context is not None
Init_h = self.Initializer(context)
# target is the source inputs.
X = self.grab_source(inputs, target) # (nb_samples, target_num, source_dim)
nb_dim = X.shape[0]
tg_num = X.shape[1]
sc_dim = X.shape[2]
# since it changes to two pointers once a time:
# concatenate + reshape
def _get_ht(A, mask=False):
if A.ndim == 2:
B = A[:, -1:]
if mask:
B *= 0.
A = T.concatenate([A, B], axis=1)
return A[:, ::2], A[:, 1::2]
else:
B = A[:, -1:, :]
print B.ndim
if mask:
B *= 0.
A = T.concatenate([A, B], axis=1)
return A[:, ::2, :], A[:, 1::2, :]
Xh, Xt = _get_ht(X)
Th, Tt = _get_ht(target)
Mh, Mt = _get_ht(tmask, mask=True)
Xa = Xh + Xt
Xa = T.concatenate([alloc_zeros_matrix(nb_dim, 1, sc_dim),
Xa[:, :-1, :, :]], axis=1)
Xa = Xa.dimshuffle((1, 0, 2))
# eat by recurrent net
def _recurrence(x, prev_h, c, s, s_mask):
# RNN read-out
x_out = self.RNN(x, mask=None, C=c, init_h=prev_h, one_step=True)
h_out = self.att_head(x_out, s, s_mask, return_log=True)
t_out = self.att_tail(x_out, s, s_mask, return_log=True)
return x_out, h_out, t_out
outputs, _ = theano.scan(
_recurrence,
sequences=[Xa],
outputs_info=[Init_h, None, None],
non_sequences=[context, source, smask]
)
log_prob_head = outputs[1].dimshuffle((1, 0, 2))
log_prob_tail = outputs[2].dimshuffle((1, 0, 2))
log_prob = T.sum(self.grab_prob(log_prob_head, Th) * Mh, axis=1) \
+ T.sum(self.grab_prob(log_prob_tail, Tt) * Mt, axis=1)
return log_prob
"""
Sample one step
"""
def _step_sample(self,
prev_idx_h, prev_idx_t,
prev_stat,
context, inputs, source, smask):
X = T.switch(
prev_idx_h[:, None] < 0,
alloc_zeros_matrix(prev_idx_h.shape[0], self.config['ptr_embedd_dim']),
self.grab_source(inputs, prev_idx_h[:, None]) + self.grab_source(inputs, prev_idx_t[:, None])
)
# one step RNN
X_out = self.RNN(X, C=context, init_h=prev_stat, one_step=True)
next_stat = X_out
# compute the attention read-out
next_prob_h = self.att_head(X_out, source, smask)
next_sample_h = self.rng.multinomial(pvals=next_prob_h).argmax(1)
next_prob_t = self.att_tail(X_out, source, smask)
next_sample_t = self.rng.multinomial(pvals=next_prob_t).argmax(1)
return next_prob_h, next_sample_h, next_prob_t, next_sample_t, next_stat
def build_sampler(self):
"""
Build a sampler which only steps once.
"""
logger.info("build sampler ...")
if self.config['sample_stoch'] and self.config['sample_argmax']:
logger.info("use argmax search!")
elif self.config['sample_stoch'] and (not self.config['sample_argmax']):
logger.info("use stochastic sampling!")
elif self.config['sample_beam'] > 1:
logger.info("use beam search! (beam_size={})".format(self.config['sample_beam']))
# initial state of our Decoder.
context = T.matrix() # theano variable.
init_h = self.Initializer(context)
logger.info('compile the function: get_init_state')
self.get_init_state \
= theano.function([context], init_h, name='get_init_state')
logger.info('done.')
# sampler: 1 x 1
prev_idxh = T.vector('prev_idxh', dtype='int64')
prev_idxt = T.vector('prev_idxt', dtype='int64')
prev_stat = T.matrix('prev_state', dtype='float32')
inputs = T.tensor3()
source = T.tensor3()
smask = T.imatrix()
next_prob_h, next_sample_h, next_prob_t, next_sample_t, next_stat \
= self._step_sample(prev_idxh, prev_idxt, prev_stat, context,
inputs, source, smask)
# next word probability
logger.info('compile the function: sample_next')
inputs = [prev_idxh, prev_idxt, prev_stat, context, inputs, source, smask]
outputs = [next_prob_h, next_sample_h, next_prob_t, next_sample_t, next_stat]
self.sample_next = theano.function(inputs, outputs, name='sample_next')
logger.info('done')
pass
"""
Generate samples, either with stochastic sampling or beam-search!
"""
def get_sample(self, context, inputs, source, smask,
k=1, maxlen=30, stochastic=True, argmax=False, fixlen=False):
# beam size
if k > 1:
assert not stochastic, 'Beam search does not support stochastic sampling!!'
# fix length cannot use beam search
# if fixlen:
# assert k == 1
# prepare for searching
sample = []
score = []
if stochastic:
score = 0
live_k = 1
dead_k = 0
hyp_samples = [[]] * live_k
hyp_scores = np.zeros(live_k).astype(theano.config.floatX)
hyp_states = []
# get initial state of decoder RNN with context
next_state = self.get_init_state(context)
next_wordh = -1 * np.ones((1,)).astype('int64') # indicator for the first target word (bos target)
next_wordt = -1 * np.ones((1,)).astype('int64')
# Start searching!
for ii in xrange(maxlen):
# print next_word
ctx = np.tile(context, [live_k, 1])
ipt = np.tile(inputs, [live_k, 1, 1])
sor = np.tile(source, [live_k, 1, 1])
smk = np.tile(smask, [live_k, 1])
next_probh, next_wordh, next_probt, next_wordt, next_state \
= self.sample_next(next_wordh, next_wordt, next_state,
ctx, ipt, sor, smk) # wtf.
if stochastic:
# using stochastic sampling (or greedy sampling.)
if argmax:
nw = next_probh[0].argmax()
next_wordh[0] = nw
else:
nw = next_wordh[0]
sample.append(nw)
score += next_probh[0, nw]
if (not fixlen) and (nw == 0): # sample reached the end
break
if argmax:
nw = next_probt[0].argmax()
next_wordt[0] = nw
else:
nw = next_wordt[0]
sample.append(nw)
score += next_probt[0, nw]
if (not fixlen) and (nw == 0): # sample reached the end
break
else:
# using beam-search
# I don't know how to apply 2 point beam-search
# we can only computed in a flatten way!
assert True, 'In this stage, I do not know how to use Beam-search for this problem.'
return sample, score
class MemNet(Model):
"""
Memory Networks:
==> Assign a Matrix to store rules
"""
def __init__(self,
config, rng, learn_memory=False,
prefix='mem'):
super(MemNet, self).__init__()
self.config = config
self.rng = rng # Theano random stream
self.prefix = prefix
self.init = initializations.get('glorot_uniform')
if learn_memory:
self.memory = self.init((self.config['mem_size'], self.config['mem_source_dim']))
self.memory.name = '{}_inner_memory'.format(self.prefix)
self.params += [self.memory]
"""
Create the read-head of the MemoryNets
"""
if self.config['mem_type'] == 'dnn':
self.attender = Attention(
config['mem_hidden_dim'],
config['mem_source_dim'],
config['mem_middle_dim'],
name='{}_attender'.format(self.prefix)
)
else:
self.attender = CosineAttention(
config['mem_hidden_dim'],
config['mem_source_dim'],
use_pipe=config['mem_use_pipe'],
name='{}_attender'.format(self.prefix)
)
self._add(self.attender)
def __call__(self, key, memory=None, mem_mask=None, out_memory=None):
# key: (nb_samples, mem_hidden_dim)
# memory: (nb_samples, mem_size, mem_source_dim)
nb_samples = key.shape[0]
if not memory:
memory = T.repeat(self.memory[None, :, :], nb_samples, axis=0)
mem_mask = None
if memory.ndim == 2:
memory = T.repeat(memory[None, :, :], nb_samples, axis=0)
probout = self.attender(key, memory, mem_mask) # (nb_samples, mem_size)
if self.config['mem_att_drop'] > 0:
probout = T.clip(probout - self.config['mem_att_drop'], 0, 1)
if out_memory is None:
readout = T.sum(memory * probout[:, :, None], axis=1)
else:
readout = T.sum(out_memory * probout[:, :, None], axis=1)
return readout, probout
class PtrNet(Model):
"""
Pointer Networks [with/without] External Rule Memory
"""
def __init__(self, config, n_rng, rng,
name='PtrNet', w_mem=True):
super(PtrNet, self).__init__()
self.config = config
self.n_rng = n_rng # numpy random stream
self.rng = rng # Theano random stream
self.name = name
self.w_mem = w_mem
def build_(self, encoder=None):
logger.info("build the Pointer Networks")
# encoder
if not encoder:
self.encoder = Encoder(self.config, self.rng, prefix='enc1')
self._add(self.encoder)
else:
self.encoder = encoder
if self.config['mem_output_mem']:
self.encoder_out = Encoder(self.config, self.rng, prefix='enc_out')
self._add(self.encoder_out)
# twice encoding
if self.config['ptr_twice_enc']:
self.encoder2 = Encoder(self.config, self.rng, prefix='enc2', use_context=True)
self._add(self.encoder2)
# pointer decoder
self.ptrdec = PtrDecoder(self.config, self.rng) # PtrDecoder(self.config, self.rng)
self._add(self.ptrdec)
# memory grabber
self.grabber = MemNet(self.config, self.rng)
self._add(self.grabber)
# memory predictor :: alternative ::
if self.config['use_predict']:
logger.info('create a predictor AS Long Term Memory.s')
if self.config['pred_type'] == 'highway':
self.predictor = HighwayNet(self.config['mem_hidden_dim'],
self.config['pred_depth'],
activation='relu',
name='phw')
elif self.config['pred_type'] == 'dense':
self.predictor = Dense(self.config['mem_hidden_dim'],
self.config['mem_hidden_dim'],
name='pdnn')
elif self.config['pred_type'] == 'encoder':
config = self.config
# config['enc_embedd_dim'] = 300
# config['enc_hidden_dim'] = 300
self.predictor = Encoder(config, self.rng, prefix='enc3', use_context=False)
else:
NotImplementedError
self._add(self.predictor)
# objectives and optimizers
assert self.config['optimizer'] == 'adam'
self.optimizer = optimizers.get(self.config['optimizer'],
kwargs=dict(rng=self.rng,
save=self.config['save_updates']))
def build_train(self, memory=None, out_memory=None, compile_train=False, guide=None):
# training function for Pointer Networks
indices = T.imatrix() # padded word indices (for training)
target = T.imatrix() # target indices (leading to relative locations)
tmask = T.imatrix() # target masks
pmask = T.cast(1 - T.eq(target[:, 0], 0), dtype='float32')
assert memory is not None, 'we must have an input memory'
if self.config['mem_output_mem']:
assert out_memory is not None, 'we must have an output memory'
# L1 of memory
loss_mem = T.sum(abs(T.mean(memory, axis=0)))
# encoding
if not self.config['ptr_twice_enc']:
source, inputs, smask, tail = self.encoder.build_encoder(indices, None, return_embed=True, return_sequence=True)
# grab memory
readout, probout = self.grabber(tail, memory)
if not self.config['use_tail']:
tailx = tail * 0.0
else:
tailx = tail
if not self.config['use_memory']:
readout *= 0.0
# concatenate
context = T.concatenate([tailx, readout], axis=1)
# if predict ?
# predictor: minimize || readout - predict ||^2
if self.config['use_predict']:
if self.config['pred_type'] == 'encoder':
predict = self.predictor.build_encoder(indices, None, return_sequence=False)
else:
predict = self.predictor(tail)
# reconstruction loss [note that we only compute loss for correct memory read.]
loss_r = 0.5 * T.sum(pmask * T.sum(T.sqr(predict - readout), axis=-1).reshape(pmask.shape)) / T.sum(pmask)
# use predicted readout to compute loss
contextz = T.concatenate([tailx, predict], axis=1)
sourcez, inputsz, smaskz = source, inputs, smask
else:
tail = self.encoder.build_encoder(indices, None, return_sequence=False)
# grab memory
readout, probout = self.grabber(tail, memory, out_memory)
# get PrtNet input
if not self.config['use_tail']:
tailx = tail * 0.0
else:
tailx = tail
if not self.config['use_memory']:
readout *= 0.0
# concatenate
context0 = T.concatenate([tailx, readout], axis=1)
# twice encoding ?
source, inputs, smask, context = self.encoder2.build_encoder(
indices, context=context0, return_embed=True, return_sequence=True)
# if predict ?
# predictor: minimize | readout - predict ||^2
if self.config['use_predict']:
if self.config['pred_type'] == 'encoder':
predict = self.predictor.build_encoder(indices, None, return_sequence=False)
else:
predict = self.predictor(tail)
# reconstruction loss [note that we only compute loss for correct memory read.]
loss_r = 0.5 * T.sum(pmask * T.sum(T.sqr(predict - readout), axis=-1).reshape(pmask.shape)) / T.sum(pmask)
dist = T.sum(T.sum(T.sqr(tail - readout), axis=-1).reshape(pmask.shape) * pmask) / T.sum(pmask)
# use predicted readout to compute loss
context1 = T.concatenate([tailx, predict], axis=1)
# twice encoding..
sourcez, inputsz, smaskz, contextz = self.encoder2.build_encoder(
indices, context=context1, return_embed=True, return_sequence=True)
# pointer decoder & loss
logProb = self.ptrdec.build_decoder(inputs, source, target,
smask, tmask, context)
loss = T.mean(-logProb)
# if predict?
if self.config['use_predict']:
logProbz = self.ptrdec.build_decoder(
inputsz, sourcez, target, smaskz, tmask, contextz)
loss_z = -T.sum(pmask * logProbz.reshape(pmask.shape)) / T.sum(pmask)
# if guidance ?
if guide:
# attention loss
# >>>>>>> BE CAUTION !!! <<<<<<
# guide vector may contains '-1' which needs a mask for that.
mask = T.ones_like(guide) * (1 - T.eq(guide, -1))
loss_g = T.mean(
-T.sum(
T.log(PtrDecoder.grab_prob(probout[:, None, :], guide)),
axis=1).reshape(mask.shape) * mask
)
# attention accuracy
attend = probout.argmax(axis=1, keepdims=True)
maxp = T.sum(probout.max(axis=1).reshape(mask.shape) * mask) / T.cast(T.sum(mask), 'float32')
error = T.sum((abs(attend - guide) * mask) > 0) / T.cast(T.sum(mask), 'float32')
if self.config['mem_learn_guide']:
loss += loss_g
# loss += 0.1 * loss_mem
if compile_train:
train_inputs = [indices, target, tmask, memory]
if guide:
train_inputs += [guide]
logger.info("compiling the compuational graph ::training function::")
updates = self.optimizer.get_updates(self.params, loss)
self.train_ = theano.function(train_inputs, loss, updates=updates, name='train_sub')
logger.info("training functions compile done.")
# output the building results for Training
outputs = [loss]
if guide:
outputs += [maxp, error]
outputs += [indices, target, tmask]
if self.config['use_predict']:
outputs += [loss_r, loss_z, dist, readout]
return outputs
def build_sampler(self, memory=None, out_mem=None):
# training function for Pointer Networks
indices = T.imatrix() # padded word indices (for training)
# encoding
if not self.config['ptr_twice_enc']:
# encoding
source, inputs, smask, tail = self.encoder.build_encoder(indices, None, return_embed=True, return_sequence=True)
# grab memory
readout, probout = self.grabber(tail, memory, out_mem)
if not self.config['use_tail']:
tail *= 0.0
if not self.config['use_memory']:
readout *= 0.0
# concatenate
context = T.concatenate([tail, readout], axis=1)
else:
tail = self.encoder.build_encoder(indices, None, return_sequence=False)
# grab memory
readout, probout = self.grabber(tail, memory, out_mem)
if not self.config['use_tail']:
tail *= 0.0
if not self.config['use_memory']:
readout *= 0.0
# concatenate
context0 = T.concatenate([tail, readout], axis=1)
# twice encoding ?
source, inputs, smask, context = self.encoder2.build_encoder(
indices, context=context0, return_embed=True, return_sequence=True)
# monitoring
self.monitor['attention_prob'] = probout
self._monitoring()
return context, source, smask, inputs, indices
def build_predict_sampler(self):
# training function for Pointer Networks
indices = T.imatrix() # padded word indices (for training)
flag = True
# encoding
if not self.config['ptr_twice_enc']:
# encoding
source, inputs, smask, tail = self.encoder.build_encoder(indices, None, return_embed=True, return_sequence=True)
# predict memory
if self.config['pred_type'] == 'encoder':
readout = self.predictor.build_encoder(indices, None, return_sequence=False)
else:
readout = self.predictor(tail)
if not self.config['use_tail']:
tail *= 0.0
if not self.config['use_memory']:
readout *= 0.0
# concatenate
context = T.concatenate([tail, readout], axis=1)
else:
tail = self.encoder.build_encoder(indices, None, return_sequence=False)
# predict memory
if self.config['pred_type'] == 'encoder':
readout = self.predictor.build_encoder(indices, None, return_sequence=False)
else:
readout = self.predictor(tail)
if not self.config['use_tail']:
tail *= 0.0
if not self.config['use_memory']:
readout *= 0.0
# concatenate
context0 = T.concatenate([tail, readout], axis=1)
# twice encoding ?
source, inputs, smask, context = self.encoder2.build_encoder(
indices, context=context0, return_embed=True, return_sequence=True)
return context, source, smask, inputs, indices
def generate_(self, inputs, context, source, smask):
args = dict(k=4, maxlen=5, stochastic=False, argmax=False)
sample, score = self.ptrdec.get_sample(context, inputs, source, smask,
**args)
if not args['stochastic']:
score = score / np.array([len(s) for s in sample])
sample = sample[score.argmin()]
score = score.min()
else:
score /= float(len(sample))
return sample, np.exp(score) | mit |
brunogamacatao/portalsaladeaula | django/utils/unittest/util.py | 751 | 2821 | """Various utility functions."""
__unittest = True
_MAX_LENGTH = 80
def safe_repr(obj, short=False):
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < _MAX_LENGTH:
return result
return result[:_MAX_LENGTH] + ' [truncated]...'
def safe_str(obj):
try:
return str(obj)
except Exception:
return object.__str__(obj)
def strclass(cls):
return "%s.%s" % (cls.__module__, cls.__name__)
def sorted_list_difference(expected, actual):
"""Finds elements in only one or the other of two, sorted input lists.
Returns a two-element tuple of lists. The first list contains those
elements in the "expected" list but not in the "actual" list, and the
second contains those elements in the "actual" list but not in the
"expected" list. Duplicate elements in either input list are ignored.
"""
i = j = 0
missing = []
unexpected = []
while True:
try:
e = expected[i]
a = actual[j]
if e < a:
missing.append(e)
i += 1
while expected[i] == e:
i += 1
elif e > a:
unexpected.append(a)
j += 1
while actual[j] == a:
j += 1
else:
i += 1
try:
while expected[i] == e:
i += 1
finally:
j += 1
while actual[j] == a:
j += 1
except IndexError:
missing.extend(expected[i:])
unexpected.extend(actual[j:])
break
return missing, unexpected
def unorderable_list_difference(expected, actual, ignore_duplicate=False):
"""Same behavior as sorted_list_difference but
for lists of unorderable items (like dicts).
As it does a linear search per item (remove) it
has O(n*n) performance.
"""
missing = []
unexpected = []
while expected:
item = expected.pop()
try:
actual.remove(item)
except ValueError:
missing.append(item)
if ignore_duplicate:
for lst in expected, actual:
try:
while True:
lst.remove(item)
except ValueError:
pass
if ignore_duplicate:
while actual:
item = actual.pop()
unexpected.append(item)
try:
while True:
actual.remove(item)
except ValueError:
pass
return missing, unexpected
# anything left in actual is unexpected
return missing, actual
| bsd-3-clause |
annatisch/autorest | src/generator/AutoRest.Python.Azure.Tests/Expected/AcceptanceTests/Paging/autorestpagingtestservice/models/__init__.py | 16 | 1275 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .product_properties import ProductProperties
from .product import Product
from .operation_result import OperationResult
from .paging_get_multiple_pages_options import PagingGetMultiplePagesOptions
from .paging_get_odata_multiple_pages_options import PagingGetOdataMultiplePagesOptions
from .paging_get_multiple_pages_with_offset_options import PagingGetMultiplePagesWithOffsetOptions
from .custom_parameter_group import CustomParameterGroup
from .product_paged import ProductPaged
from .product_paged1 import ProductPaged1
__all__ = [
'ProductProperties',
'Product',
'OperationResult',
'PagingGetMultiplePagesOptions',
'PagingGetOdataMultiplePagesOptions',
'PagingGetMultiplePagesWithOffsetOptions',
'CustomParameterGroup',
'ProductPaged',
'ProductPaged1',
]
| mit |
mwx1993/TACTIC | 3rd_party/CherryPy/cherrypy/_cpnative_server.py | 80 | 6042 | """Native adapter for serving CherryPy via its builtin server."""
import logging
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import cherrypy
from cherrypy._cperror import format_exc, bare_error
from cherrypy.lib import httputil
from cherrypy import wsgiserver
class NativeGateway(wsgiserver.Gateway):
recursive = False
def respond(self):
req = self.req
try:
# Obtain a Request object from CherryPy
local = req.server.bind_addr
local = httputil.Host(local[0], local[1], "")
remote = req.conn.remote_addr, req.conn.remote_port
remote = httputil.Host(remote[0], remote[1], "")
scheme = req.scheme
sn = cherrypy.tree.script_name(req.uri or "/")
if sn is None:
self.send_response('404 Not Found', [], [''])
else:
app = cherrypy.tree.apps[sn]
method = req.method
path = req.path
qs = req.qs or ""
headers = req.inheaders.items()
rfile = req.rfile
prev = None
try:
redirections = []
while True:
request, response = app.get_serving(
local, remote, scheme, "HTTP/1.1")
request.multithread = True
request.multiprocess = False
request.app = app
request.prev = prev
# Run the CherryPy Request object and obtain the response
try:
request.run(method, path, qs, req.request_protocol, headers, rfile)
break
except cherrypy.InternalRedirect, ir:
app.release_serving()
prev = request
if not self.recursive:
if ir.path in redirections:
raise RuntimeError("InternalRedirector visited the "
"same URL twice: %r" % ir.path)
else:
# Add the *previous* path_info + qs to redirections.
if qs:
qs = "?" + qs
redirections.append(sn + path + qs)
# Munge environment and try again.
method = "GET"
path = ir.path
qs = ir.query_string
rfile = StringIO()
self.send_response(
response.output_status, response.header_list,
response.body)
finally:
app.release_serving()
except:
tb = format_exc()
#print tb
cherrypy.log(tb, 'NATIVE_ADAPTER', severity=logging.ERROR)
s, h, b = bare_error()
self.send_response(s, h, b)
def send_response(self, status, headers, body):
req = self.req
# Set response status
req.status = str(status or "500 Server Error")
# Set response headers
for header, value in headers:
req.outheaders.append((header, value))
if (req.ready and not req.sent_headers):
req.sent_headers = True
req.send_headers()
# Set response body
for seg in body:
req.write(seg)
class CPHTTPServer(wsgiserver.HTTPServer):
"""Wrapper for wsgiserver.HTTPServer.
wsgiserver has been designed to not reference CherryPy in any way,
so that it can be used in other frameworks and applications.
Therefore, we wrap it here, so we can apply some attributes
from config -> cherrypy.server -> HTTPServer.
"""
def __init__(self, server_adapter=cherrypy.server):
self.server_adapter = server_adapter
server_name = (self.server_adapter.socket_host or
self.server_adapter.socket_file or
None)
wsgiserver.HTTPServer.__init__(
self, server_adapter.bind_addr, NativeGateway,
minthreads=server_adapter.thread_pool,
maxthreads=server_adapter.thread_pool_max,
server_name=server_name)
self.max_request_header_size = self.server_adapter.max_request_header_size or 0
self.max_request_body_size = self.server_adapter.max_request_body_size or 0
self.request_queue_size = self.server_adapter.socket_queue_size
self.timeout = self.server_adapter.socket_timeout
self.shutdown_timeout = self.server_adapter.shutdown_timeout
self.protocol = self.server_adapter.protocol_version
self.nodelay = self.server_adapter.nodelay
ssl_module = self.server_adapter.ssl_module or 'pyopenssl'
if self.server_adapter.ssl_context:
adapter_class = wsgiserver.get_ssl_adapter_class(ssl_module)
self.ssl_adapter = adapter_class(
self.server_adapter.ssl_certificate,
self.server_adapter.ssl_private_key,
self.server_adapter.ssl_certificate_chain)
self.ssl_adapter.context = self.server_adapter.ssl_context
elif self.server_adapter.ssl_certificate:
adapter_class = wsgiserver.get_ssl_adapter_class(ssl_module)
self.ssl_adapter = adapter_class(
self.server_adapter.ssl_certificate,
self.server_adapter.ssl_private_key,
self.server_adapter.ssl_certificate_chain)
| epl-1.0 |
gsnbng/erpnext | erpnext/hr/doctype/vehicle_log/vehicle_log.py | 2 | 1904 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt, cstr
from frappe.model.mapper import get_mapped_doc
from frappe.model.document import Document
class VehicleLog(Document):
def validate(self):
if flt(self.odometer) < flt(self.last_odometer):
frappe.throw(_("Current Odometer Value should be greater than Last Odometer Value {0}").format(self.last_odometer))
def on_submit(self):
frappe.db.set_value("Vehicle", self.license_plate, "last_odometer", self.odometer)
def on_cancel(self):
distance_travelled = self.odometer - self.last_odometer
if(distance_travelled > 0):
updated_odometer_value = int(frappe.db.get_value("Vehicle", self.license_plate, "last_odometer")) - distance_travelled
frappe.db.set_value("Vehicle", self.license_plate, "last_odometer", updated_odometer_value)
@frappe.whitelist()
def make_expense_claim(docname):
expense_claim = frappe.db.exists("Expense Claim", {"vehicle_log": docname})
if expense_claim:
frappe.throw(_("Expense Claim {0} already exists for the Vehicle Log").format(expense_claim))
vehicle_log = frappe.get_doc("Vehicle Log", docname)
service_expense = sum([flt(d.expense_amount) for d in vehicle_log.service_detail])
claim_amount = service_expense + flt(vehicle_log.price)
if not claim_amount:
frappe.throw(_("No additional expenses has been added"))
exp_claim = frappe.new_doc("Expense Claim")
exp_claim.employee = vehicle_log.employee
exp_claim.vehicle_log = vehicle_log.name
exp_claim.remark = _("Expense Claim for Vehicle Log {0}").format(vehicle_log.name)
exp_claim.append("expenses", {
"expense_date": vehicle_log.date,
"description": _("Vehicle Expenses"),
"amount": claim_amount
})
return exp_claim.as_dict()
| agpl-3.0 |
benjaminbrinkman/open-ad-platform | .venv/lib/python3.4/site-packages/werkzeug/_compat.py | 148 | 6190 | import sys
import operator
import functools
try:
import builtins
except ImportError:
import __builtin__ as builtins
PY2 = sys.version_info[0] == 2
_identity = lambda x: x
if PY2:
unichr = unichr
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
int_to_byte = chr
iterkeys = lambda d, *args, **kwargs: d.iterkeys(*args, **kwargs)
itervalues = lambda d, *args, **kwargs: d.itervalues(*args, **kwargs)
iteritems = lambda d, *args, **kwargs: d.iteritems(*args, **kwargs)
iterlists = lambda d, *args, **kwargs: d.iterlists(*args, **kwargs)
iterlistvalues = lambda d, *args, **kwargs: d.iterlistvalues(*args, **kwargs)
iter_bytes = lambda x: iter(x)
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
def fix_tuple_repr(obj):
def __repr__(self):
cls = self.__class__
return '%s(%s)' % (cls.__name__, ', '.join(
'%s=%r' % (field, self[index])
for index, field in enumerate(cls._fields)
))
obj.__repr__ = __repr__
return obj
def implements_iterator(cls):
cls.next = cls.__next__
del cls.__next__
return cls
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
def native_string_result(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs).encode('utf-8')
return functools.update_wrapper(wrapper, func)
def implements_bool(cls):
cls.__nonzero__ = cls.__bool__
del cls.__bool__
return cls
from itertools import imap, izip, ifilter
range_type = xrange
from StringIO import StringIO
from cStringIO import StringIO as BytesIO
NativeStringIO = BytesIO
def make_literal_wrapper(reference):
return lambda x: x
def normalize_string_tuple(tup):
"""Normalizes a string tuple to a common type. Following Python 2
rules, upgrades to unicode are implicit.
"""
if any(isinstance(x, text_type) for x in tup):
return tuple(to_unicode(x) for x in tup)
return tup
def try_coerce_native(s):
"""Try to coerce a unicode string to native if possible. Otherwise,
leave it as unicode.
"""
try:
return to_native(s)
except UnicodeError:
return s
wsgi_get_bytes = _identity
def wsgi_decoding_dance(s, charset='utf-8', errors='replace'):
return s.decode(charset, errors)
def wsgi_encoding_dance(s, charset='utf-8', errors='replace'):
if isinstance(s, bytes):
return s
return s.encode(charset, errors)
def to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None:
return None
if isinstance(x, (bytes, bytearray, buffer)):
return bytes(x)
if isinstance(x, unicode):
return x.encode(charset, errors)
raise TypeError('Expected bytes')
def to_native(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None or isinstance(x, str):
return x
return x.encode(charset, errors)
else:
unichr = chr
text_type = str
string_types = (str, )
integer_types = (int, )
iterkeys = lambda d, *args, **kwargs: iter(d.keys(*args, **kwargs))
itervalues = lambda d, *args, **kwargs: iter(d.values(*args, **kwargs))
iteritems = lambda d, *args, **kwargs: iter(d.items(*args, **kwargs))
iterlists = lambda d, *args, **kwargs: iter(d.lists(*args, **kwargs))
iterlistvalues = lambda d, *args, **kwargs: iter(d.listvalues(*args, **kwargs))
int_to_byte = operator.methodcaller('to_bytes', 1, 'big')
def iter_bytes(b):
return map(int_to_byte, b)
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
fix_tuple_repr = _identity
implements_iterator = _identity
implements_to_string = _identity
implements_bool = _identity
native_string_result = _identity
imap = map
izip = zip
ifilter = filter
range_type = range
from io import StringIO, BytesIO
NativeStringIO = StringIO
def make_literal_wrapper(reference):
if isinstance(reference, text_type):
return lambda x: x
return lambda x: x.encode('latin1')
def normalize_string_tuple(tup):
"""Ensures that all types in the tuple are either strings
or bytes.
"""
tupiter = iter(tup)
is_text = isinstance(next(tupiter, None), text_type)
for arg in tupiter:
if isinstance(arg, text_type) != is_text:
raise TypeError('Cannot mix str and bytes arguments (got %s)'
% repr(tup))
return tup
try_coerce_native = _identity
def wsgi_get_bytes(s):
return s.encode('latin1')
def wsgi_decoding_dance(s, charset='utf-8', errors='replace'):
return s.encode('latin1').decode(charset, errors)
def wsgi_encoding_dance(s, charset='utf-8', errors='replace'):
if isinstance(s, bytes):
return s.decode('latin1', errors)
return s.encode(charset).decode('latin1', errors)
def to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None:
return None
if isinstance(x, (bytes, bytearray, memoryview)):
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError('Expected bytes')
def to_native(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None or isinstance(x, str):
return x
return x.decode(charset, errors)
def to_unicode(x, charset=sys.getdefaultencoding(), errors='strict',
allow_none_charset=False):
if x is None:
return None
if not isinstance(x, bytes):
return text_type(x)
if charset is None and allow_none_charset:
return x
return x.decode(charset, errors)
| mit |
saukrIppl/seahub | seahub/auth/tokens.py | 2 | 2939 | from datetime import date
from django.conf import settings
from django.utils.http import int_to_base36, base36_to_int
from django.utils.crypto import constant_time_compare, salted_hmac
from django.utils import six
from seahub.base.models import UserLastLogin
class PasswordResetTokenGenerator(object):
"""
Strategy object used to generate and check tokens for the password
reset mechanism.
"""
def make_token(self, user):
"""
Returns a token that can be used once to do a password reset
for the given user.
"""
return self._make_token_with_timestamp(user, self._num_days(self._today()))
def check_token(self, user, token):
"""
Check that a password reset token is correct for a given user.
"""
# Parse the token
try:
ts_b36, hash = token.split("-")
except ValueError:
return False
try:
ts = base36_to_int(ts_b36)
except ValueError:
return False
# Check that the timestamp/uid has not been tampered with
if not constant_time_compare(self._make_token_with_timestamp(user, ts), token):
return False
# Check the timestamp is within limit
if (self._num_days(self._today()) - ts) > settings.PASSWORD_RESET_TIMEOUT_DAYS:
return False
return True
def _make_token_with_timestamp(self, user, timestamp):
# timestamp is number of days since 2001-1-1. Converted to
# base 36, this gives us a 3 digit string until about 2121
ts_b36 = int_to_base36(timestamp)
# By hashing on the internal state of the user and using state
# that is sure to change (the password salt will change as soon as
# the password is set, at least for current Django auth, and
# last_login will also change), we produce a hash that will be
# invalid as soon as it is used.
# We limit the hash to 20 chars to keep URL short
key_salt = "django.contrib.auth.tokens.PasswordResetTokenGenerator"
# Ensure results are consistent across DB backends
user_last_login = UserLastLogin.objects.get_by_username(user.username)
if user_last_login is None:
from seahub.utils.timeutils import dt
login_dt = dt(user.ctime)
else:
login_dt = user_last_login.last_login
login_timestamp = login_dt.replace(microsecond=0, tzinfo=None)
value = (six.text_type(user.id) + user.enc_password +
six.text_type(login_timestamp) + six.text_type(timestamp))
hash = salted_hmac(key_salt, value).hexdigest()[::2]
return "%s-%s" % (ts_b36, hash)
def _num_days(self, dt):
return (dt - date(2001,1,1)).days
def _today(self):
# Used for mocking in tests
return date.today()
default_token_generator = PasswordResetTokenGenerator()
| apache-2.0 |
GreenRecycleBin/servo | python/mach/mach/dispatcher.py | 22 | 14395 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import unicode_literals
import argparse
import difflib
import sys
from operator import itemgetter
from .base import (
NoCommandError,
UnknownCommandError,
UnrecognizedArgumentError,
)
class CommandFormatter(argparse.HelpFormatter):
"""Custom formatter to format just a subcommand."""
def add_usage(self, *args):
pass
class CommandAction(argparse.Action):
"""An argparse action that handles mach commands.
This class is essentially a reimplementation of argparse's sub-parsers
feature. We first tried to use sub-parsers. However, they were missing
features like grouping of commands (http://bugs.python.org/issue14037).
The way this works involves light magic and a partial understanding of how
argparse works.
Arguments registered with an argparse.ArgumentParser have an action
associated with them. An action is essentially a class that when called
does something with the encountered argument(s). This class is one of those
action classes.
An instance of this class is created doing something like:
parser.add_argument('command', action=CommandAction, registrar=r)
Note that a mach.registrar.Registrar instance is passed in. The Registrar
holds information on all the mach commands that have been registered.
When this argument is registered with the ArgumentParser, an instance of
this class is instantiated. One of the subtle but important things it does
is tell the argument parser that it's interested in *all* of the remaining
program arguments. So, when the ArgumentParser calls this action, we will
receive the command name plus all of its arguments.
For more, read the docs in __call__.
"""
def __init__(self, option_strings, dest, required=True, default=None,
registrar=None, context=None):
# A proper API would have **kwargs here. However, since we are a little
# hacky, we intentionally omit it as a way of detecting potentially
# breaking changes with argparse's implementation.
#
# In a similar vein, default is passed in but is not needed, so we drop
# it.
argparse.Action.__init__(self, option_strings, dest, required=required,
help=argparse.SUPPRESS, nargs=argparse.REMAINDER)
self._mach_registrar = registrar
self._context = context
def __call__(self, parser, namespace, values, option_string=None):
"""This is called when the ArgumentParser has reached our arguments.
Since we always register ourselves with nargs=argparse.REMAINDER,
values should be a list of remaining arguments to parse. The first
argument should be the name of the command to invoke and all remaining
arguments are arguments for that command.
The gist of the flow is that we look at the command being invoked. If
it's *help*, we handle that specially (because argparse's default help
handler isn't satisfactory). Else, we create a new, independent
ArgumentParser instance for just the invoked command (based on the
information contained in the command registrar) and feed the arguments
into that parser. We then merge the results with the main
ArgumentParser.
"""
if namespace.help:
# -h or --help is in the global arguments.
self._handle_main_help(parser, namespace.verbose)
sys.exit(0)
elif values:
command = values[0].lower()
args = values[1:]
if command == 'help':
if args and args[0] not in ['-h', '--help']:
# Make sure args[0] is indeed a command.
self._handle_subcommand_help(parser, args[0])
else:
self._handle_main_help(parser, namespace.verbose)
sys.exit(0)
elif '-h' in args or '--help' in args:
# -h or --help is in the command arguments.
self._handle_subcommand_help(parser, command)
sys.exit(0)
else:
raise NoCommandError()
# Command suggestion
if command not in self._mach_registrar.command_handlers:
# We first try to look for a valid command that is very similar to the given command.
suggested_commands = difflib.get_close_matches(command, self._mach_registrar.command_handlers.keys(), cutoff=0.8)
# If we find more than one matching command, or no command at all, we give command suggestions instead
# (with a lower matching threshold). All commands that start with the given command (for instance: 'mochitest-plain',
# 'mochitest-chrome', etc. for 'mochitest-') are also included.
if len(suggested_commands) != 1:
suggested_commands = set(difflib.get_close_matches(command, self._mach_registrar.command_handlers.keys(), cutoff=0.5))
suggested_commands |= {cmd for cmd in self._mach_registrar.command_handlers if cmd.startswith(command)}
raise UnknownCommandError(command, 'run', suggested_commands)
sys.stderr.write("We're assuming the '%s' command is '%s' and we're executing it for you.\n\n" % (command, suggested_commands[0]))
command = suggested_commands[0]
handler = self._mach_registrar.command_handlers.get(command)
# FUTURE
# If we wanted to conditionally enable commands based on whether
# it's possible to run them given the current state of system, here
# would be a good place to hook that up.
# We create a new parser, populate it with the command's arguments,
# then feed all remaining arguments to it, merging the results
# with ourselves. This is essentially what argparse subparsers
# do.
parser_args = {
'add_help': False,
'usage': '%(prog)s [global arguments] ' + command +
' [command arguments]',
}
if handler.parser:
subparser = handler.parser
else:
subparser = argparse.ArgumentParser(**parser_args)
remainder = None
for arg in handler.arguments:
# Remove our group keyword; it's not needed here.
group_name = arg[1].get('group')
if group_name:
del arg[1]['group']
if arg[1].get('nargs') == argparse.REMAINDER:
# parse_known_args expects all argparse.REMAINDER ('...')
# arguments to be all stuck together. Instead, we want them to
# pick any extra argument, wherever they are.
# Assume a limited CommandArgument for those arguments.
assert len(arg[0]) == 1
assert all(k in ('default', 'nargs', 'help') for k in arg[1])
remainder = arg
else:
subparser.add_argument(*arg[0], **arg[1])
# We define the command information on the main parser result so as to
# not interfere with arguments passed to the command.
setattr(namespace, 'mach_handler', handler)
setattr(namespace, 'command', command)
command_namespace, extra = subparser.parse_known_args(args)
setattr(namespace, 'command_args', command_namespace)
if remainder:
(name,), options = remainder
# parse_known_args usefully puts all arguments after '--' in
# extra, but also puts '--' there. We don't want to pass it down
# to the command handler. Note that if multiple '--' are on the
# command line, only the first one is removed, so that subsequent
# ones are passed down.
if '--' in extra:
extra.remove('--')
# Commands with argparse.REMAINDER arguments used to force the
# other arguments to be '+' prefixed. If a user now passes such
# an argument, if will silently end up in extra. So, check if any
# of the allowed arguments appear in a '+' prefixed form, and error
# out if that's the case.
for args, _ in handler.arguments:
for arg in args:
arg = arg.replace('-', '+', 1)
if arg in extra:
raise UnrecognizedArgumentError(command, [arg])
if extra:
setattr(command_namespace, name, extra)
else:
setattr(command_namespace, name, options.get('default', []))
elif extra:
raise UnrecognizedArgumentError(command, extra)
def _handle_main_help(self, parser, verbose):
# Since we don't need full sub-parser support for the main help output,
# we create groups in the ArgumentParser and populate each group with
# arguments corresponding to command names. This has the side-effect
# that argparse renders it nicely.
r = self._mach_registrar
disabled_commands = []
cats = [(k, v[2]) for k, v in r.categories.items()]
sorted_cats = sorted(cats, key=itemgetter(1), reverse=True)
for category, priority in sorted_cats:
group = None
for command in sorted(r.commands_by_category[category]):
handler = r.command_handlers[command]
# Instantiate a handler class to see if it should be filtered
# out for the current context or not. Condition functions can be
# applied to the command's decorator.
if handler.conditions:
if handler.pass_context:
instance = handler.cls(self._context)
else:
instance = handler.cls()
is_filtered = False
for c in handler.conditions:
if not c(instance):
is_filtered = True
break
if is_filtered:
description = handler.description
disabled_command = {'command': command, 'description': description}
disabled_commands.append(disabled_command)
continue
if group is None:
title, description, _priority = r.categories[category]
group = parser.add_argument_group(title, description)
description = handler.description
group.add_argument(command, help=description,
action='store_true')
if disabled_commands and 'disabled' in r.categories:
title, description, _priority = r.categories['disabled']
group = parser.add_argument_group(title, description)
if verbose == True:
for c in disabled_commands:
group.add_argument(c['command'], help=c['description'],
action='store_true')
parser.print_help()
def _handle_subcommand_help(self, parser, command):
handler = self._mach_registrar.command_handlers.get(command)
if not handler:
raise UnknownCommandError(command, 'query')
# This code is worth explaining. Because we are doing funky things with
# argument registration to allow the same option in both global and
# command arguments, we can't simply put all arguments on the same
# parser instance because argparse would complain. We can't register an
# argparse subparser here because it won't properly show help for
# global arguments. So, we employ a strategy similar to command
# execution where we construct a 2nd, independent ArgumentParser for
# just the command data then supplement the main help's output with
# this 2nd parser's. We use a custom formatter class to ignore some of
# the help output.
parser_args = {
'formatter_class': CommandFormatter,
'add_help': False,
}
if handler.parser:
c_parser = handler.parser
c_parser.formatter_class = NoUsageFormatter
# Accessing _action_groups is a bit shady. We are highly dependent
# on the argparse implementation not changing. We fail fast to
# detect upstream changes so we can intelligently react to them.
group = c_parser._action_groups[1]
# By default argparse adds two groups called "positional arguments"
# and "optional arguments". We want to rename these to reflect standard
# mach terminology.
c_parser._action_groups[0].title = 'Command Parameters'
c_parser._action_groups[1].title = 'Command Arguments'
if not handler.description:
handler.description = c_parser.description
c_parser.description = None
else:
c_parser = argparse.ArgumentParser(**parser_args)
group = c_parser.add_argument_group('Command Arguments')
extra_groups = {}
for group_name in handler.argument_group_names:
group_full_name = 'Command Arguments for ' + group_name
extra_groups[group_name] = \
c_parser.add_argument_group(group_full_name)
for arg in handler.arguments:
# Apply our group keyword.
group_name = arg[1].get('group')
if group_name:
del arg[1]['group']
group = extra_groups[group_name]
group.add_argument(*arg[0], **arg[1])
# This will print the description of the command below the usage.
description = handler.description
if description:
parser.description = description
parser.usage = '%(prog)s [global arguments] ' + command + \
' [command arguments]'
parser.print_help()
print('')
c_parser.print_help()
class NoUsageFormatter(argparse.HelpFormatter):
def _format_usage(self, *args, **kwargs):
return ""
| mpl-2.0 |
atiqueahmedziad/addons-server | src/olympia/tags/models.py | 1 | 2686 | from django.urls import NoReverseMatch
from django.db import models
from olympia import activity, amo
from olympia.amo.fields import PositiveAutoField
from olympia.amo.models import ManagerBase, ModelBase
from olympia.amo.urlresolvers import reverse
class TagManager(ManagerBase):
def not_denied(self):
"""Get allowed tags only"""
return self.filter(denied=False)
class Tag(ModelBase):
id = PositiveAutoField(primary_key=True)
tag_text = models.CharField(max_length=128)
denied = models.BooleanField(default=False)
restricted = models.BooleanField(default=False)
addons = models.ManyToManyField('addons.Addon', through='AddonTag',
related_name='tags')
num_addons = models.IntegerField(default=0)
objects = TagManager()
class Meta:
db_table = 'tags'
ordering = ('tag_text',)
def __unicode__(self):
return self.tag_text
@property
def popularity(self):
return self.num_addons
def can_reverse(self):
try:
self.get_url_path()
return True
except NoReverseMatch:
return False
def get_url_path(self):
return reverse('tags.detail', args=[self.tag_text])
def save_tag(self, addon):
tag, created = Tag.objects.get_or_create(tag_text=self.tag_text)
AddonTag.objects.get_or_create(addon=addon, tag=tag)
activity.log_create(amo.LOG.ADD_TAG, tag, addon)
return tag
def remove_tag(self, addon):
tag, created = Tag.objects.get_or_create(tag_text=self.tag_text)
for addon_tag in AddonTag.objects.filter(addon=addon, tag=tag):
addon_tag.delete()
activity.log_create(amo.LOG.REMOVE_TAG, tag, addon)
def update_stat(self):
if self.denied:
return
self.num_addons = self.addons.count()
self.save()
class AddonTag(ModelBase):
id = PositiveAutoField(primary_key=True)
addon = models.ForeignKey('addons.Addon', related_name='addon_tags')
tag = models.ForeignKey(Tag, related_name='addon_tags')
class Meta:
db_table = 'users_tags_addons'
def update_tag_stat_signal(sender, instance, **kw):
from .tasks import update_tag_stat
if not kw.get('raw'):
try:
update_tag_stat.delay(instance.tag.pk)
except Tag.DoesNotExist:
pass
models.signals.post_save.connect(update_tag_stat_signal, sender=AddonTag,
dispatch_uid='update_tag_stat')
models.signals.post_delete.connect(update_tag_stat_signal, sender=AddonTag,
dispatch_uid='delete_tag_stat')
| bsd-3-clause |
jswope00/GAI | lms/djangoapps/licenses/migrations/0001_initial.py | 188 | 8260 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CourseSoftware'
db.create_table('licenses_coursesoftware', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('full_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('url', self.gf('django.db.models.fields.CharField')(max_length=255)),
('course_id', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('licenses', ['CourseSoftware'])
# Adding model 'UserLicense'
db.create_table('licenses_userlicense', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('software', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['licenses.CourseSoftware'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
('serial', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('licenses', ['UserLicense'])
def backwards(self, orm):
# Deleting model 'CourseSoftware'
db.delete_table('licenses_coursesoftware')
# Deleting model 'UserLicense'
db.delete_table('licenses_userlicense')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'licenses.coursesoftware': {
'Meta': {'object_name': 'CourseSoftware'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'licenses.userlicense': {
'Meta': {'object_name': 'UserLicense'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'serial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'software': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['licenses.CourseSoftware']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
}
}
complete_apps = ['licenses']
| agpl-3.0 |
Frodox/buildbot | master/buildbot/test/util/validation.py | 1 | 20786 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
# See "Type Validation" in master/docs/developer/tests.rst
from future.utils import integer_types
from future.utils import iteritems
from future.utils import text_type
import datetime
import json
import re
from buildbot.util import UTC
from buildbot.util import bytes2NativeString
# Base class
validatorsByName = {}
class Validator(object):
name = None
hasArgs = False
def validate(self, name, object):
raise NotImplementedError
class __metaclass__(type):
def __new__(mcs, name, bases, attrs):
cls = type.__new__(mcs, name, bases, attrs)
if 'name' in attrs and attrs['name']:
assert attrs['name'] not in validatorsByName
validatorsByName[attrs['name']] = cls
return cls
# Basic types
class InstanceValidator(Validator):
types = ()
def validate(self, name, object):
if not isinstance(object, self.types):
yield "%s (%r) is not a %s" % (
name, object, self.name or repr(self.types))
class IntValidator(InstanceValidator):
types = integer_types
name = 'integer'
class BooleanValidator(InstanceValidator):
types = (bool,)
name = 'boolean'
class StringValidator(InstanceValidator):
# strings must be unicode
types = (text_type,)
name = 'string'
class BinaryValidator(InstanceValidator):
types = (bytes,)
name = 'bytestring'
class StrValidator(InstanceValidator):
types = (str,)
name = 'str'
class DateTimeValidator(Validator):
types = (datetime.datetime,)
name = 'datetime'
def validate(self, name, object):
if not isinstance(object, datetime.datetime):
yield "%s - %r - is not a datetime" % (name, object)
elif object.tzinfo != UTC:
yield "%s is not a UTC datetime" % (name,)
class IdentifierValidator(Validator):
types = (text_type,)
name = 'identifier'
hasArgs = True
ident_re = re.compile('^[a-zA-Z_-][a-zA-Z0-9_-]*$')
def __init__(self, len):
self.len = len
def validate(self, name, object):
if not isinstance(object, text_type):
yield "%s - %r - is not a unicode string" % (name, object)
elif not self.ident_re.match(object):
yield "%s - %r - is not an identifier" % (name, object)
elif len(object) < 1:
yield "%s - identifiers cannot be an empty string" % (name,)
elif len(object) > self.len:
yield "%s - %r - is longer than %d characters" % (name, object,
self.len)
# Miscellaneous
class NoneOk(object):
def __init__(self, original):
self.original = original
def validate(self, name, object):
if object is None:
return
else:
for msg in self.original.validate(name, object):
yield msg
class Any(object):
def validate(self, name, object):
return
# Compound Types
class DictValidator(Validator):
name = 'dict'
def __init__(self, optionalNames=None, **keys):
if optionalNames is None:
optionalNames = []
self.optionalNames = set(optionalNames)
self.keys = keys
self.expectedNames = set(keys.keys())
def validate(self, name, object):
# this uses isinstance, allowing dict subclasses as used by the DB API
if not isinstance(object, dict):
yield "%s (%r) is not a dictionary (got type %s)" \
% (name, object, type(object))
return
gotNames = set(object.keys())
unexpected = gotNames - self.expectedNames
if unexpected:
yield "%s has unexpected keys %s" % (name,
", ".join([repr(n) for n in unexpected]))
missing = self.expectedNames - self.optionalNames - gotNames
if missing:
yield "%s is missing keys %s" % (name,
", ".join([repr(n) for n in missing]))
for k in gotNames & self.expectedNames:
for msg in self.keys[k].validate("%s[%r]" % (name, k), object[k]):
yield msg
class SequenceValidator(Validator):
type = None
def __init__(self, elementValidator):
self.elementValidator = elementValidator
def validate(self, name, object):
if not isinstance(object, self.type):
yield "%s (%r) is not a %s" % (name, object, self.name)
return
for idx, elt in enumerate(object):
for msg in self.elementValidator.validate("%s[%d]" % (name, idx),
elt):
yield msg
class ListValidator(SequenceValidator):
type = list
name = 'list'
class TupleValidator(SequenceValidator):
type = tuple
name = 'tuple'
class StringListValidator(ListValidator):
name = 'string-list'
def __init__(self):
ListValidator.__init__(self, StringValidator())
class SourcedPropertiesValidator(Validator):
name = 'sourced-properties'
def validate(self, name, object):
if not isinstance(object, dict):
yield "%s is not sourced properties (not a dict)" % (name,)
return
for k, v in iteritems(object):
if not isinstance(k, text_type):
yield "%s property name %r is not unicode" % (name, k)
if not isinstance(v, tuple) or len(v) != 2:
yield "%s property value for '%s' is not a 2-tuple" % (name, k)
return
propval, propsrc = v
if not isinstance(propsrc, text_type):
yield "%s[%s] source %r is not unicode" % (name, k, propsrc)
try:
json.dumps(propval)
except (TypeError, ValueError):
yield "%s[%r] value is not JSON-able" % (name, k)
class JsonValidator(Validator):
name = 'json'
def validate(self, name, object):
try:
json.dumps(object)
except (TypeError, ValueError):
yield "%s[%r] value is not JSON-able" % (name, object)
class PatchValidator(Validator):
name = 'patch'
validator = DictValidator(
body=NoneOk(BinaryValidator()),
level=NoneOk(IntValidator()),
subdir=NoneOk(StringValidator()),
author=NoneOk(StringValidator()),
comment=NoneOk(StringValidator()),
)
def validate(self, name, object):
for msg in self.validator.validate(name, object):
yield msg
class MessageValidator(Validator):
routingKeyValidator = TupleValidator(StrValidator())
def __init__(self, events, messageValidator):
self.events = [bytes2NativeString(e) for e in set(events)]
self.messageValidator = messageValidator
def validate(self, name, routingKey_message):
try:
routingKey, message = routingKey_message
except (TypeError, ValueError) as e:
yield "%r: not a routing key and message: %s" % (routingKey_message, e)
routingKeyBad = False
for msg in self.routingKeyValidator.validate("routingKey", routingKey):
yield msg
routingKeyBad = True
if not routingKeyBad:
event = routingKey[-1]
if event not in self.events:
yield "routing key event %r is not valid" % (event,)
for msg in self.messageValidator.validate("%s message" % routingKey[0],
message):
yield msg
class Selector(Validator):
def __init__(self):
self.selectors = []
def add(self, selector, validator):
self.selectors.append((selector, validator))
def validate(self, name, arg_object):
try:
arg, object = arg_object
except (TypeError, ValueError) as e:
yield "%r: not a not data options and data dict: %s" % (arg_object, e)
for selector, validator in self.selectors:
if selector is None or selector(arg):
for msg in validator.validate(name, object):
yield msg
return
yield "no match for selector argument %r" % (arg,)
# Type definitions
message = {}
dbdict = {}
# parse and use a ResourceType class's dataFields into a validator
# masters
message['masters'] = Selector()
message['masters'].add(None,
MessageValidator(
events=[b'started', b'stopped'],
messageValidator=DictValidator(
masterid=IntValidator(),
name=StringValidator(),
active=BooleanValidator(),
# last_active is not included
)))
dbdict['masterdict'] = DictValidator(
id=IntValidator(),
name=StringValidator(),
active=BooleanValidator(),
last_active=DateTimeValidator(),
)
# sourcestamp
_sourcestamp = dict(
ssid=IntValidator(),
branch=NoneOk(StringValidator()),
revision=NoneOk(StringValidator()),
repository=StringValidator(),
project=StringValidator(),
codebase=StringValidator(),
created_at=DateTimeValidator(),
patch=NoneOk(DictValidator(
body=NoneOk(BinaryValidator()),
level=NoneOk(IntValidator()),
subdir=NoneOk(StringValidator()),
author=NoneOk(StringValidator()),
comment=NoneOk(StringValidator()))),
)
message['sourcestamps'] = Selector()
message['sourcestamps'].add(None,
DictValidator(
**_sourcestamp
))
dbdict['ssdict'] = DictValidator(
ssid=IntValidator(),
branch=NoneOk(StringValidator()),
revision=NoneOk(StringValidator()),
patchid=NoneOk(IntValidator()),
patch_body=NoneOk(BinaryValidator()),
patch_level=NoneOk(IntValidator()),
patch_subdir=NoneOk(StringValidator()),
patch_author=NoneOk(StringValidator()),
patch_comment=NoneOk(StringValidator()),
codebase=StringValidator(),
repository=StringValidator(),
project=StringValidator(),
created_at=DateTimeValidator(),
)
# builder
message['builders'] = Selector()
message['builders'].add(None,
MessageValidator(
events=[b'started', b'stopped'],
messageValidator=DictValidator(
builderid=IntValidator(),
masterid=IntValidator(),
name=StringValidator(),
)))
dbdict['builderdict'] = DictValidator(
id=IntValidator(),
masterids=ListValidator(IntValidator()),
name=StringValidator(),
description=NoneOk(StringValidator()),
tags=ListValidator(StringValidator()),
)
# worker
dbdict['workerdict'] = DictValidator(
id=IntValidator(),
name=StringValidator(),
configured_on=ListValidator(
DictValidator(
masterid=IntValidator(),
builderid=IntValidator(),
)
),
connected_to=ListValidator(IntValidator()),
workerinfo=JsonValidator(),
)
# buildset
_buildset = dict(
bsid=IntValidator(),
external_idstring=NoneOk(StringValidator()),
reason=StringValidator(),
submitted_at=IntValidator(),
complete=BooleanValidator(),
complete_at=NoneOk(IntValidator()),
results=NoneOk(IntValidator()),
parent_buildid=NoneOk(IntValidator()),
parent_relationship=NoneOk(StringValidator()),
)
_buildsetEvents = [b'new', b'complete']
message['buildsets'] = Selector()
message['buildsets'].add(lambda k: k[-1] == 'new',
MessageValidator(
events=_buildsetEvents,
messageValidator=DictValidator(
scheduler=StringValidator(), # only for 'new'
sourcestamps=ListValidator(
DictValidator(
**_sourcestamp
)),
**_buildset
)))
message['buildsets'].add(None,
MessageValidator(
events=_buildsetEvents,
messageValidator=DictValidator(
sourcestamps=ListValidator(
DictValidator(
**_sourcestamp
)),
**_buildset
)))
dbdict['bsdict'] = DictValidator(
bsid=IntValidator(),
external_idstring=NoneOk(StringValidator()),
reason=StringValidator(),
sourcestamps=ListValidator(IntValidator()),
submitted_at=DateTimeValidator(),
complete=BooleanValidator(),
complete_at=NoneOk(DateTimeValidator()),
results=NoneOk(IntValidator()),
parent_buildid=NoneOk(IntValidator()),
parent_relationship=NoneOk(StringValidator()),
)
# buildrequest
message['buildrequests'] = Selector()
message['buildrequests'].add(None,
MessageValidator(
events=[b'new', b'claimed', b'unclaimed'],
messageValidator=DictValidator(
# TODO: probably wrong!
brid=IntValidator(),
builderid=IntValidator(),
bsid=IntValidator(),
buildername=StringValidator(),
)))
# change
message['changes'] = Selector()
message['changes'].add(None,
MessageValidator(
events=[b'new'],
messageValidator=DictValidator(
changeid=IntValidator(),
parent_changeids=ListValidator(IntValidator()),
author=StringValidator(),
files=ListValidator(StringValidator()),
comments=StringValidator(),
revision=NoneOk(StringValidator()),
when_timestamp=IntValidator(),
branch=NoneOk(StringValidator()),
category=NoneOk(StringValidator()),
revlink=NoneOk(StringValidator()),
properties=SourcedPropertiesValidator(),
repository=StringValidator(),
project=StringValidator(),
codebase=StringValidator(),
sourcestamp=DictValidator(
**_sourcestamp
),
)))
dbdict['chdict'] = DictValidator(
changeid=IntValidator(),
author=StringValidator(),
files=ListValidator(StringValidator()),
comments=StringValidator(),
revision=NoneOk(StringValidator()),
when_timestamp=DateTimeValidator(),
branch=NoneOk(StringValidator()),
category=NoneOk(StringValidator()),
revlink=NoneOk(StringValidator()),
properties=SourcedPropertiesValidator(),
repository=StringValidator(),
project=StringValidator(),
codebase=StringValidator(),
sourcestampid=IntValidator(),
parent_changeids=ListValidator(IntValidator()),
)
# changesources
dbdict['changesourcedict'] = DictValidator(
id=IntValidator(),
name=StringValidator(),
masterid=NoneOk(IntValidator()),
)
# schedulers
dbdict['schedulerdict'] = DictValidator(
id=IntValidator(),
name=StringValidator(),
masterid=NoneOk(IntValidator()),
enabled=BooleanValidator(),
)
# builds
_build = dict(
buildid=IntValidator(),
number=IntValidator(),
builderid=IntValidator(),
buildrequestid=IntValidator(),
workerid=IntValidator(),
masterid=IntValidator(),
started_at=IntValidator(),
complete=BooleanValidator(),
complete_at=NoneOk(IntValidator()),
state_string=StringValidator(),
results=NoneOk(IntValidator()),
)
_buildEvents = [b'new', b'complete']
message['builds'] = Selector()
message['builds'].add(None,
MessageValidator(
events=_buildEvents,
messageValidator=DictValidator(
**_build
)))
# As build's properties are fetched at DATA API level,
# a distinction shall be made as both are not equal.
# Validates DB layer
dbdict['dbbuilddict'] = buildbase = DictValidator(
id=IntValidator(),
number=IntValidator(),
builderid=IntValidator(),
buildrequestid=IntValidator(),
workerid=IntValidator(),
masterid=IntValidator(),
started_at=DateTimeValidator(),
complete_at=NoneOk(DateTimeValidator()),
state_string=StringValidator(),
results=NoneOk(IntValidator()),
)
# Validates DATA API layer
dbdict['builddict'] = DictValidator(
properties=NoneOk(SourcedPropertiesValidator()), **buildbase.keys)
# steps
_step = dict(
stepid=IntValidator(),
number=IntValidator(),
name=IdentifierValidator(50),
buildid=IntValidator(),
started_at=IntValidator(),
complete=BooleanValidator(),
complete_at=NoneOk(IntValidator()),
state_string=StringValidator(),
results=NoneOk(IntValidator()),
urls=ListValidator(StringValidator()),
hidden=BooleanValidator(),
)
_stepEvents = [b'new', b'complete']
message['steps'] = Selector()
message['steps'].add(None,
MessageValidator(
events=_stepEvents,
messageValidator=DictValidator(
**_step
)))
dbdict['stepdict'] = DictValidator(
id=IntValidator(),
number=IntValidator(),
name=IdentifierValidator(50),
buildid=IntValidator(),
started_at=DateTimeValidator(),
complete_at=NoneOk(DateTimeValidator()),
state_string=StringValidator(),
results=NoneOk(IntValidator()),
urls=ListValidator(StringValidator()),
hidden=BooleanValidator(),
)
# logs
_log = dict(
logid=IntValidator(),
name=IdentifierValidator(50),
stepid=IntValidator(),
complete=BooleanValidator(),
num_lines=IntValidator(),
type=IdentifierValidator(1))
_logEvents = ['new', 'complete', 'appended']
# message['log']
dbdict['logdict'] = DictValidator(
id=IntValidator(),
stepid=IntValidator(),
name=StringValidator(),
slug=IdentifierValidator(50),
complete=BooleanValidator(),
num_lines=IntValidator(),
type=IdentifierValidator(1))
# external functions
def _verify(testcase, validator, name, object):
msgs = list(validator.validate(name, object))
if msgs:
msg = "; ".join(msgs)
if testcase:
testcase.fail(msg)
else:
raise AssertionError(msg)
def verifyMessage(testcase, routingKey, message_):
# the validator is a Selector wrapping a MessageValidator, so we need to
# pass (arg, (routingKey, message)), where the routing key is the arg
# the "type" of the message is identified by last path name
# -1 being the event, and -2 the id.
validator = message[bytes2NativeString(routingKey[-3])]
_verify(testcase, validator, '',
(routingKey, (routingKey, message_)))
def verifyDbDict(testcase, type, value):
_verify(testcase, dbdict[type], type, value)
def verifyData(testcase, entityType, options, value):
_verify(testcase, entityType, entityType.name, value)
def verifyType(testcase, name, value, validator):
_verify(testcase, validator, name, value)
| gpl-2.0 |
repotvsupertuga/repo | plugin.video.pancas/resources/lib/resolvers/filehoot.py | 23 | 1140 | # -*- coding: utf-8 -*-
'''
Specto Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
from resources.lib.libraries import client
def resolve(url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://filehoot.com/embed-%s.html' % url
result = client.request(url, mobile=True)
url = re.compile('file *: *"(http.+?)"').findall(result)[0]
return url
except:
return
| gpl-2.0 |
leayousoufov/laravel-a-love-story | node_modules/node-gyp/gyp/pylib/gyp/MSVSUserFile.py | 2710 | 5094 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import os
import re
import socket # for gethostname
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version, name):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
version: Version info.
name: Name of the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.name = name
self.configurations = {}
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self.configurations[name] = ['Configuration', {'Name': name}]
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
abs_command = _FindCommandInPath(command[0])
if environment and isinstance(environment, dict):
env_list = ['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]
environment = ' '.join(env_list)
else:
environment = ''
n_cmd = ['DebugSettings',
{'Command': abs_command,
'WorkingDirectory': working_directory,
'CommandArguments': " ".join(command[1:]),
'RemoteMachine': socket.gethostname(),
'Environment': environment,
'EnvironmentMerge': 'true',
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
'Attach': 'false',
'DebuggerType': '3', # 'auto' debugger
'Remote': '1',
'RemoteCommand': '',
'HttpUrl': '',
'PDBPath': '',
'SQLDebugging': '',
'DebuggerFlavor': '0',
'MPIRunCommand': '',
'MPIRunArguments': '',
'MPIRunWorkingDirectory': '',
'ApplicationCommand': '',
'ApplicationArguments': '',
'ShimCommand': '',
'MPIAcceptMode': '',
'MPIAcceptFilter': ''
}]
# Find the config, and add it if it doesn't exist.
if config_name not in self.configurations:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
self.configurations[config_name].append(n_cmd)
def WriteIfChanged(self):
"""Writes the user file."""
configs = ['Configurations']
for config, spec in sorted(self.configurations.iteritems()):
configs.append(spec)
content = ['VisualStudioUserFile',
{'Version': self.version.ProjectVersion(),
'Name': self.name
},
configs]
easy_xml.WriteXmlIfChanged(content, self.user_file_path,
encoding="Windows-1252")
| mit |
samvarankashyap/googlecloudutility2 | lib/pyasn1/codec/cer/encoder.py | 226 | 3161 | # CER encoder
from pyasn1.type import univ
from pyasn1.codec.ber import encoder
from pyasn1.compat.octets import int2oct, null
class BooleanEncoder(encoder.IntegerEncoder):
def encodeValue(self, encodeFun, client, defMode, maxChunkSize):
if client == 0:
substrate = int2oct(0)
else:
substrate = int2oct(255)
return substrate, 0
class BitStringEncoder(encoder.BitStringEncoder):
def encodeValue(self, encodeFun, client, defMode, maxChunkSize):
return encoder.BitStringEncoder.encodeValue(
self, encodeFun, client, defMode, 1000
)
class OctetStringEncoder(encoder.OctetStringEncoder):
def encodeValue(self, encodeFun, client, defMode, maxChunkSize):
return encoder.OctetStringEncoder.encodeValue(
self, encodeFun, client, defMode, 1000
)
# specialized RealEncoder here
# specialized GeneralStringEncoder here
# specialized GeneralizedTimeEncoder here
# specialized UTCTimeEncoder here
class SetOfEncoder(encoder.SequenceOfEncoder):
def encodeValue(self, encodeFun, client, defMode, maxChunkSize):
if isinstance(client, univ.SequenceAndSetBase):
client.setDefaultComponents()
client.verifySizeSpec()
substrate = null; idx = len(client)
# This is certainly a hack but how else do I distinguish SetOf
# from Set if they have the same tags&constraints?
if isinstance(client, univ.SequenceAndSetBase):
# Set
comps = []
while idx > 0:
idx = idx - 1
if client[idx] is None: # Optional component
continue
if client.getDefaultComponentByPosition(idx) == client[idx]:
continue
comps.append(client[idx])
comps.sort(key=lambda x: isinstance(x, univ.Choice) and \
x.getMinTagSet() or x.getTagSet())
for c in comps:
substrate += encodeFun(c, defMode, maxChunkSize)
else:
# SetOf
compSubs = []
while idx > 0:
idx = idx - 1
compSubs.append(
encodeFun(client[idx], defMode, maxChunkSize)
)
compSubs.sort() # perhaps padding's not needed
substrate = null
for compSub in compSubs:
substrate += compSub
return substrate, 1
tagMap = encoder.tagMap.copy()
tagMap.update({
univ.Boolean.tagSet: BooleanEncoder(),
univ.BitString.tagSet: BitStringEncoder(),
univ.OctetString.tagSet: OctetStringEncoder(),
univ.SetOf().tagSet: SetOfEncoder() # conflcts with Set
})
typeMap = encoder.typeMap.copy()
typeMap.update({
univ.Set.typeId: SetOfEncoder(),
univ.SetOf.typeId: SetOfEncoder()
})
class Encoder(encoder.Encoder):
def __call__(self, client, defMode=0, maxChunkSize=0):
return encoder.Encoder.__call__(self, client, defMode, maxChunkSize)
encode = Encoder(tagMap, typeMap)
# EncoderFactory queries class instance and builds a map of tags -> encoders
| apache-2.0 |
Arno-Nymous/pyload | module/plugins/accounts/UptoboxCom.py | 5 | 1247 | # -*- coding: utf-8 -*-
import time
import re
import urlparse
from ..internal.misc import json
from ..internal.XFSAccount import XFSAccount
class UptoboxCom(XFSAccount):
__name__ = "UptoboxCom"
__type__ = "account"
__version__ = "0.23"
__status__ = "testing"
__description__ = """Uptobox.com account plugin"""
__license__ = "GPLv3"
__authors__ = [("benbox69", "dev@tollet.me"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
PLUGIN_DOMAIN = "uptobox.com"
PLUGIN_URL = "https://uptobox.com/"
PREMIUM_PATTERN = r'Premium member'
VALID_UNTIL_PATTERN = r"class='expiration-date .+?'>(\d{1,2} [\w^_]+ \d{4})"
def signin(self, user, password, data):
html = self.load(self.LOGIN_URL, cookies=self.COOKIES)
if re.search(self.LOGIN_SKIP_PATTERN, html):
self.skip_login()
html = self.load(self.PLUGIN_URL,
get={'op': "login",
'referer': "homepage"},
post={'login': user,
'password': password},
cookies=self.COOKIES)
if re.search(self.LOGIN_SKIP_PATTERN, html) is None:
self.fail_login()
| gpl-3.0 |
stephane-martin/salt-debian-packaging | salt-2016.3.3/salt/modules/cmdmod.py | 2 | 98226 | # -*- coding: utf-8 -*-
'''
A module for shelling out.
Keep in mind that this module is insecure, in that it can give whomever has
access to the master root execution access to all salt minions.
'''
from __future__ import absolute_import
# Import python libs
import functools
import glob
import json
import logging
import os
import shutil
import subprocess
import sys
import time
import traceback
from salt.utils import vt
# Import salt libs
import salt.utils
import salt.utils.timed_subprocess
import salt.grains.extra
import salt.ext.six as six
from salt.exceptions import CommandExecutionError, TimedProcTimeoutError
from salt.log import LOG_LEVELS
from salt.ext.six.moves import range
from salt.ext.six.moves import shlex_quote as _cmd_quote
# Only available on POSIX systems, nonfatal on windows
try:
import pwd
except ImportError:
pass
if salt.utils.is_windows():
from salt.utils.win_runas import runas as win_runas
HAS_WIN_RUNAS = True
else:
HAS_WIN_RUNAS = False
__proxyenabled__ = ['*']
# Define the module's virtual name
__virtualname__ = 'cmd'
# Set up logging
log = logging.getLogger(__name__)
DEFAULT_SHELL = salt.grains.extra.shell()['shell']
def __virtual__():
'''
Overwriting the cmd python module makes debugging modules
with pdb a bit harder so lets do it this way instead.
'''
return __virtualname__
def _check_cb(cb_):
'''
If the callback is None or is not callable, return a lambda that returns
the value passed.
'''
if cb_ is not None:
if hasattr(cb_, '__call__'):
return cb_
else:
log.error('log_callback is not callable, ignoring')
return lambda x: x
def _python_shell_default(python_shell, __pub_jid):
'''
Set python_shell default based on remote execution and __opts__['cmd_safe']
'''
try:
# Default to python_shell=True when run directly from remote execution
# system. Cross-module calls won't have a jid.
if __pub_jid and python_shell is None:
return True
elif __opts__.get('cmd_safe', True) is False and python_shell is None:
# Override-switch for python_shell
return True
except NameError:
pass
return python_shell
def _chroot_pids(chroot):
pids = []
for root in glob.glob('/proc/[0-9]*/root'):
try:
link = os.path.realpath(root)
if link.startswith(chroot):
pids.append(int(os.path.basename(
os.path.dirname(root)
)))
except OSError:
pass
return pids
def _render_cmd(cmd, cwd, template, saltenv='base', pillarenv=None, pillar_override=None):
'''
If template is a valid template engine, process the cmd and cwd through
that engine.
'''
if not template:
return (cmd, cwd)
# render the path as a template using path_template_engine as the engine
if template not in salt.utils.templates.TEMPLATE_REGISTRY:
raise CommandExecutionError(
'Attempted to render file paths with unavailable engine '
'{0}'.format(template)
)
kwargs = {}
kwargs['salt'] = __salt__
if pillarenv is not None or pillar_override is not None:
pillarenv = pillarenv or __opts__['pillarenv']
kwargs['pillar'] = _gather_pillar(pillarenv, pillar_override)
else:
kwargs['pillar'] = __pillar__
kwargs['grains'] = __grains__
kwargs['opts'] = __opts__
kwargs['saltenv'] = saltenv
def _render(contents):
# write out path to temp file
tmp_path_fn = salt.utils.mkstemp()
with salt.utils.fopen(tmp_path_fn, 'w+') as fp_:
fp_.write(contents)
data = salt.utils.templates.TEMPLATE_REGISTRY[template](
tmp_path_fn,
to_str=True,
**kwargs
)
salt.utils.safe_rm(tmp_path_fn)
if not data['result']:
# Failed to render the template
raise CommandExecutionError(
'Failed to execute cmd with error: {0}'.format(
data['data']
)
)
else:
return data['data']
cmd = _render(cmd)
cwd = _render(cwd)
return (cmd, cwd)
def _check_loglevel(level='info', quiet=False):
'''
Retrieve the level code for use in logging.Logger.log().
'''
def _bad_level(level):
log.error(
'Invalid output_loglevel \'{0}\'. Valid levels are: {1}. Falling '
'back to \'info\'.'
.format(
level,
', '.join(
sorted(LOG_LEVELS, key=LOG_LEVELS.get, reverse=True)
)
)
)
return LOG_LEVELS['info']
if salt.utils.is_true(quiet) or str(level).lower() == 'quiet':
return None
try:
level = level.lower()
if level not in LOG_LEVELS:
return _bad_level(level)
except AttributeError:
return _bad_level(level)
return LOG_LEVELS[level]
def _parse_env(env):
if not env:
env = {}
if isinstance(env, list):
env = salt.utils.repack_dictlist(env)
if not isinstance(env, dict):
env = {}
return env
def _gather_pillar(pillarenv, pillar_override):
'''
Whenever a state run starts, gather the pillar data fresh
'''
pillar = salt.pillar.get_pillar(
__opts__,
__grains__,
__opts__['id'],
__opts__['environment'],
pillar=pillar_override,
pillarenv=pillarenv
)
ret = pillar.compile_pillar()
if pillar_override and isinstance(pillar_override, dict):
ret.update(pillar_override)
return ret
def _run(cmd,
cwd=None,
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
output_loglevel='debug',
log_callback=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=False,
env=None,
clean_env=False,
rstrip=True,
template=None,
umask=None,
timeout=None,
with_communicate=True,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
pillarenv=None,
pillar_override=None,
use_vt=False,
password=None,
bg=False,
**kwargs):
'''
Do the DRY thing and only call subprocess.Popen() once
'''
if _is_valid_shell(shell) is False:
log.warning(
'Attempt to run a shell command with what may be an invalid shell! '
'Check to ensure that the shell <{0}> is valid for this user.'
.format(shell))
log_callback = _check_cb(log_callback)
# Set the default working directory to the home directory of the user
# salt-minion is running as. Defaults to home directory of user under which
# the minion is running.
if not cwd:
cwd = os.path.expanduser('~{0}'.format('' if not runas else runas))
# make sure we can access the cwd
# when run from sudo or another environment where the euid is
# changed ~ will expand to the home of the original uid and
# the euid might not have access to it. See issue #1844
if not os.access(cwd, os.R_OK):
cwd = '/'
if salt.utils.is_windows():
cwd = os.tempnam()[:3]
else:
# Handle edge cases where numeric/other input is entered, and would be
# yaml-ified into non-string types
cwd = str(cwd)
if not salt.utils.is_windows():
if not os.path.isfile(shell) or not os.access(shell, os.X_OK):
msg = 'The shell {0} is not available'.format(shell)
raise CommandExecutionError(msg)
if salt.utils.is_windows() and use_vt: # Memozation so not much overhead
raise CommandExecutionError('VT not available on windows')
if shell.lower().strip() == 'powershell':
# If we were called by script(), then fakeout the Windows
# shell to run a Powershell script.
# Else just run a Powershell command.
stack = traceback.extract_stack(limit=2)
# extract_stack() returns a list of tuples.
# The last item in the list [-1] is the current method.
# The third item[2] in each tuple is the name of that method.
if stack[-2][2] == 'script':
cmd = 'Powershell -NonInteractive -ExecutionPolicy Bypass -File ' + cmd
else:
cmd = 'Powershell -NonInteractive "{0}"'.format(cmd.replace('"', '\\"'))
# munge the cmd and cwd through the template
(cmd, cwd) = _render_cmd(cmd, cwd, template, saltenv, pillarenv, pillar_override)
ret = {}
env = _parse_env(env)
for bad_env_key in (x for x, y in six.iteritems(env) if y is None):
log.error('Environment variable \'{0}\' passed without a value. '
'Setting value to an empty string'.format(bad_env_key))
env[bad_env_key] = ''
if runas and salt.utils.is_windows():
if not password:
msg = 'password is a required argument for runas on Windows'
raise CommandExecutionError(msg)
if not HAS_WIN_RUNAS:
msg = 'missing salt/utils/win_runas.py'
raise CommandExecutionError(msg)
if not isinstance(cmd, list):
cmd = salt.utils.shlex_split(cmd, posix=False)
cmd = ' '.join(cmd)
return win_runas(cmd, runas, password, cwd)
if runas:
# Save the original command before munging it
try:
pwd.getpwnam(runas)
except KeyError:
raise CommandExecutionError(
'User \'{0}\' is not available'.format(runas)
)
try:
# Getting the environment for the runas user
# There must be a better way to do this.
py_code = (
'import sys, os, itertools; '
'sys.stdout.write(\"\\0\".join(itertools.chain(*os.environ.items())))'
)
if __grains__['os'] in ['MacOS', 'Darwin']:
env_cmd = ('sudo', '-i', '-u', runas, '--',
sys.executable)
elif __grains__['os'] in ['FreeBSD']:
env_cmd = ('su', '-', runas, '-c',
"{0} -c {1}".format(shell, sys.executable))
elif __grains__['os_family'] in ['Solaris']:
env_cmd = ('su', '-', runas, '-c', sys.executable)
elif __grains__['os_family'] in ['AIX']:
env_cmd = ('su', runas, '-c', sys.executable)
else:
env_cmd = ('su', '-s', shell, '-', runas, '-c', sys.executable)
env_encoded = subprocess.Popen(
env_cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE
).communicate(py_code)[0]
import itertools
env_runas = dict(itertools.izip(*[iter(env_encoded.split(b'\0'))]*2))
env_runas.update(env)
env = env_runas
# Encode unicode kwargs to filesystem encoding to avoid a
# UnicodeEncodeError when the subprocess is invoked.
fse = sys.getfilesystemencoding()
for key, val in six.iteritems(env):
if isinstance(val, six.text_type):
env[key] = val.encode(fse)
except ValueError:
raise CommandExecutionError(
'Environment could not be retrieved for User \'{0}\''.format(
runas
)
)
if _check_loglevel(output_loglevel) is not None:
# Always log the shell commands at INFO unless quiet logging is
# requested. The command output is what will be controlled by the
# 'loglevel' parameter.
msg = (
'Executing command {0}{1}{0} {2}in directory \'{3}\'{4}'.format(
'\'' if not isinstance(cmd, list) else '',
cmd,
'as user \'{0}\' '.format(runas) if runas else '',
cwd,
' in the background, no output will be logged' if bg else ''
)
)
log.info(log_callback(msg))
if reset_system_locale is True:
if not salt.utils.is_windows():
# Default to C!
# Salt only knows how to parse English words
# Don't override if the user has passed LC_ALL
env.setdefault('LC_CTYPE', 'C')
env.setdefault('LC_NUMERIC', 'C')
env.setdefault('LC_TIME', 'C')
env.setdefault('LC_COLLATE', 'C')
env.setdefault('LC_MONETARY', 'C')
env.setdefault('LC_MESSAGES', 'C')
env.setdefault('LC_PAPER', 'C')
env.setdefault('LC_NAME', 'C')
env.setdefault('LC_ADDRESS', 'C')
env.setdefault('LC_TELEPHONE', 'C')
env.setdefault('LC_MEASUREMENT', 'C')
env.setdefault('LC_IDENTIFICATION', 'C')
else:
# On Windows set the codepage to US English.
if python_shell:
cmd = 'chcp 437 > nul & ' + cmd
if clean_env:
run_env = env
else:
run_env = os.environ.copy()
run_env.update(env)
if python_shell is None:
python_shell = False
kwargs = {'cwd': cwd,
'shell': python_shell,
'env': run_env,
'stdin': str(stdin) if stdin is not None else stdin,
'stdout': stdout,
'stderr': stderr,
'with_communicate': with_communicate,
'timeout': timeout,
'bg': bg,
}
if umask is not None:
_umask = str(umask).lstrip('0')
if _umask == '':
msg = 'Zero umask is not allowed.'
raise CommandExecutionError(msg)
try:
_umask = int(_umask, 8)
except ValueError:
msg = 'Invalid umask: \'{0}\''.format(umask)
raise CommandExecutionError(msg)
else:
_umask = None
if runas or umask:
kwargs['preexec_fn'] = functools.partial(
salt.utils.chugid_and_umask,
runas,
_umask)
if not salt.utils.is_windows():
# close_fds is not supported on Windows platforms if you redirect
# stdin/stdout/stderr
if kwargs['shell'] is True:
kwargs['executable'] = shell
kwargs['close_fds'] = True
if not os.path.isabs(cwd) or not os.path.isdir(cwd):
raise CommandExecutionError(
'Specified cwd \'{0}\' either not absolute or does not exist'
.format(cwd)
)
if python_shell is not True and not isinstance(cmd, list):
posix = True
if salt.utils.is_windows():
posix = False
cmd = salt.utils.shlex_split(cmd, posix=posix)
if not use_vt:
# This is where the magic happens
try:
proc = salt.utils.timed_subprocess.TimedProc(cmd, **kwargs)
except (OSError, IOError) as exc:
raise CommandExecutionError(
'Unable to run command \'{0}\' with the context \'{1}\', '
'reason: {2}'.format(cmd, kwargs, exc)
)
try:
proc.run()
except TimedProcTimeoutError as exc:
ret['stdout'] = str(exc)
ret['stderr'] = ''
ret['retcode'] = None
ret['pid'] = proc.process.pid
# ok return code for timeouts?
ret['retcode'] = 1
return ret
out, err = proc.stdout, proc.stderr
if err is None:
# Will happen if redirect_stderr is True, since stderr was sent to
# stdout.
err = ''
if rstrip:
if out is not None:
out = salt.utils.to_str(out).rstrip()
if err is not None:
err = salt.utils.to_str(err).rstrip()
ret['pid'] = proc.process.pid
ret['retcode'] = proc.process.returncode
ret['stdout'] = out
ret['stderr'] = err
else:
to = ''
if timeout:
to = ' (timeout: {0}s)'.format(timeout)
if _check_loglevel(output_loglevel) is not None:
msg = 'Running {0} in VT{1}'.format(cmd, to)
log.debug(log_callback(msg))
stdout, stderr = '', ''
now = time.time()
if timeout:
will_timeout = now + timeout
else:
will_timeout = -1
try:
proc = vt.Terminal(cmd,
shell=True,
log_stdout=True,
log_stderr=True,
cwd=cwd,
preexec_fn=kwargs.get('preexec_fn', None),
env=run_env,
log_stdin_level=output_loglevel,
log_stdout_level=output_loglevel,
log_stderr_level=output_loglevel,
stream_stdout=True,
stream_stderr=True)
ret['pid'] = proc.pid
while proc.has_unread_data:
try:
try:
time.sleep(0.5)
try:
cstdout, cstderr = proc.recv()
except IOError:
cstdout, cstderr = '', ''
if cstdout:
stdout += cstdout
else:
cstdout = ''
if cstderr:
stderr += cstderr
else:
cstderr = ''
if timeout and (time.time() > will_timeout):
ret['stderr'] = (
'SALT: Timeout after {0}s\n{1}').format(
timeout, stderr)
ret['retcode'] = None
break
except KeyboardInterrupt:
ret['stderr'] = 'SALT: User break\n{0}'.format(stderr)
ret['retcode'] = 1
break
except vt.TerminalException as exc:
log.error(
'VT: {0}'.format(exc),
exc_info_on_loglevel=logging.DEBUG)
ret = {'retcode': 1, 'pid': '2'}
break
# only set stdout on success as we already mangled in other
# cases
ret['stdout'] = stdout
if not proc.isalive():
# Process terminated, i.e., not canceled by the user or by
# the timeout
ret['stderr'] = stderr
ret['retcode'] = proc.exitstatus
ret['pid'] = proc.pid
finally:
proc.close(terminate=True, kill=True)
try:
if ignore_retcode:
__context__['retcode'] = 0
else:
__context__['retcode'] = ret['retcode']
except NameError:
# Ignore the context error during grain generation
pass
return ret
def _run_quiet(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=False,
env=None,
template=None,
umask=None,
timeout=None,
reset_system_locale=True,
saltenv='base',
pillarenv=None,
pillar_override=None):
'''
Helper for running commands quietly for minion startup
'''
return _run(cmd,
runas=runas,
cwd=cwd,
stdin=stdin,
stderr=subprocess.STDOUT,
output_loglevel='quiet',
log_callback=None,
shell=shell,
python_shell=python_shell,
env=env,
template=template,
umask=umask,
timeout=timeout,
reset_system_locale=reset_system_locale,
saltenv=saltenv,
pillarenv=pillarenv,
pillar_override=pillar_override)['stdout']
def _run_all_quiet(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=False,
env=None,
template=None,
umask=None,
timeout=None,
reset_system_locale=True,
saltenv='base',
pillarenv=None,
pillar_override=None,
output_loglevel=None):
'''
Helper for running commands quietly for minion startup.
Returns a dict of return data.
output_loglevel argument is ignored. This is here for when we alias
cmd.run_all directly to _run_all_quiet in certain chicken-and-egg
situations where modules need to work both before and after
the __salt__ dictionary is populated (cf dracr.py)
'''
return _run(cmd,
runas=runas,
cwd=cwd,
stdin=stdin,
shell=shell,
python_shell=python_shell,
env=env,
output_loglevel='quiet',
log_callback=None,
template=template,
umask=umask,
timeout=timeout,
reset_system_locale=reset_system_locale,
saltenv=saltenv,
pillarenv=pillarenv,
pillar_override=pillar_override)
def run(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_loglevel='debug',
log_callback=None,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
bg=False,
**kwargs):
r'''
Execute the passed command and return the output as a string
Note that ``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
:param str cmd: The command to run. ex: ``ls -lart /home``
:param str cwd: The current working directory to execute the command in,
defaults to ``/root`` (``C:\`` in windows)
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in cases
where sensitive information must be read from standard input.:
:param str runas: User to run script as. If running on a Windows minion you
must also pass a password
:param str password: Windows only. Pass a password if you specify runas.
This parameter will be ignored for other OS's
.. versionadded:: 2016.3.0
:param str shell: Shell to execute under. Defaults to the system default
shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or redirection
:param bool bg: If True, run command in background and do not await or deliver it's results
:param list env: A list of environment variables to be set prior to
execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja, mako,
and wempy are supported
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
:param int timeout: A timeout in seconds for the executed process to return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
.. warning::
This function does not process commands through a shell
unless the python_shell flag is set to True. This means that any
shell-specific functionality such as 'echo' or the use of pipes,
redirection or &&, should either be migrated to cmd.shell or
have the python_shell=True flag set here.
The use of python_shell=True means that the shell will accept _any_ input
including potentially malicious commands such as 'good_command;rm -rf /'.
Be absolutely certain that you have sanitized your input prior to using
python_shell=True
CLI Example:
.. code-block:: bash
salt '*' cmd.run "ls -l | awk '/foo/{print \\$2}'"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.run template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
Specify an alternate shell with the shell parameter:
.. code-block:: bash
salt '*' cmd.run "Get-ChildItem C:\\ " shell='powershell'
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
.. code-block:: bash
salt '*' cmd.run "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: bash
salt '*' cmd.run cmd='sed -e s/=/:/g'
'''
python_shell = _python_shell_default(python_shell,
kwargs.get('__pub_jid', ''))
ret = _run(cmd,
runas=runas,
shell=shell,
python_shell=python_shell,
cwd=cwd,
stdin=stdin,
stderr=subprocess.STDOUT,
env=env,
clean_env=clean_env,
template=template,
rstrip=rstrip,
umask=umask,
output_loglevel=output_loglevel,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
pillarenv=kwargs.get('pillarenv'),
pillar_override=kwargs.get('pillar'),
use_vt=use_vt,
password=kwargs.get('password', None),
bg=bg)
log_callback = _check_cb(log_callback)
lvl = _check_loglevel(output_loglevel)
if lvl is not None:
if not ignore_retcode and ret['retcode'] != 0:
if lvl < LOG_LEVELS['error']:
lvl = LOG_LEVELS['error']
msg = (
'Command \'{0}\' failed with return code: {1}'.format(
cmd,
ret['retcode']
)
)
log.error(log_callback(msg))
log.log(lvl, 'output: {0}'.format(log_callback(ret['stdout'])))
return ret['stdout']
def shell(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_loglevel='debug',
log_callback=None,
quiet=False,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
bg=False,
**kwargs):
'''
Execute the passed command and return the output as a string.
.. versionadded:: 2015.5.0
:param str cmd: The command to run. ex: 'ls -lart /home'
:param str cwd: The current working directory to execute the command in,
defaults to /root
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in cases
where sensitive information must be read from standard input.
:param str runas: User to run script as. If running on a Windows minion you
must also pass a password
:param str password: Windows only. Pass a password if you specify runas.
This parameter will be ignored for other OS's
.. versionadded:: 2016.3.0
:param int shell: Shell to execute under. Defaults to the system default
shell.
:param bool bg: If True, run command in background and do not await or deliver it's results
:param list env: A list of environment variables to be set prior to
execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja, mako,
and wempy are supported
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
:param int timeout: A timeout in seconds for the executed process to return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
.. warning::
This passes the cmd argument directly to the shell
without any further processing! Be absolutely sure that you
have properly sanitized the command passed to this function
and do not use untrusted inputs.
.. note::
``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
CLI Example:
.. code-block:: bash
salt '*' cmd.shell "ls -l | awk '/foo/{print \\$2}'"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.shell template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
Specify an alternate shell with the shell parameter:
.. code-block:: bash
salt '*' cmd.shell "Get-ChildItem C:\\ " shell='powershell'
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
.. code-block:: bash
salt '*' cmd.shell "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: bash
salt '*' cmd.shell cmd='sed -e s/=/:/g'
'''
if 'python_shell' in kwargs:
python_shell = kwargs.pop('python_shell')
else:
python_shell = True
return run(cmd,
cwd=cwd,
stdin=stdin,
runas=runas,
shell=shell,
env=env,
clean_env=clean_env,
template=template,
rstrip=rstrip,
umask=umask,
output_loglevel=output_loglevel,
log_callback=log_callback,
quiet=quiet,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
use_vt=use_vt,
python_shell=python_shell,
bg=bg,
**kwargs)
def run_stdout(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_loglevel='debug',
log_callback=None,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
**kwargs):
'''
Execute a command, and only return the standard out
:param str cmd: The command to run. ex: 'ls -lart /home'
:param str cwd: The current working directory to execute the command in,
defaults to /root
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in cases
where sensitive information must be read from standard input.:
:param str runas: User to run script as. If running on a Windows minion you
must also pass a password
:param str password: Windows only. Pass a password if you specify runas.
This parameter will be ignored for other OS's
.. versionadded:: 2016.3.0
:param str shell: Shell to execute under. Defaults to the system default shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or redirection
:param list env: A list of environment variables to be set prior to
execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja, mako,
and wempy are supported
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
:param int timeout: A timeout in seconds for the executed process to return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
.. note::
``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
CLI Example:
.. code-block:: bash
salt '*' cmd.run_stdout "ls -l | awk '/foo/{print \\$2}'"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.run_stdout template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
.. code-block:: bash
salt '*' cmd.run_stdout "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
python_shell = _python_shell_default(python_shell,
kwargs.get('__pub_jid', ''))
ret = _run(cmd,
runas=runas,
cwd=cwd,
stdin=stdin,
shell=shell,
python_shell=python_shell,
env=env,
clean_env=clean_env,
template=template,
rstrip=rstrip,
umask=umask,
output_loglevel=output_loglevel,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
pillarenv=kwargs.get('pillarenv'),
pillar_override=kwargs.get('pillar'),
use_vt=use_vt,
**kwargs)
log_callback = _check_cb(log_callback)
lvl = _check_loglevel(output_loglevel)
if lvl is not None:
if not ignore_retcode and ret['retcode'] != 0:
if lvl < LOG_LEVELS['error']:
lvl = LOG_LEVELS['error']
msg = (
'Command \'{0}\' failed with return code: {1}'.format(
cmd,
ret['retcode']
)
)
log.error(log_callback(msg))
if ret['stdout']:
log.log(lvl, 'stdout: {0}'.format(log_callback(ret['stdout'])))
if ret['stderr']:
log.log(lvl, 'stderr: {0}'.format(log_callback(ret['stderr'])))
if ret['retcode']:
log.log(lvl, 'retcode: {0}'.format(ret['retcode']))
return ret['stdout']
def run_stderr(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_loglevel='debug',
log_callback=None,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
**kwargs):
'''
Execute a command and only return the standard error
:param str cmd: The command to run. ex: 'ls -lart /home'
:param str cwd: The current working directory to execute the command in,
defaults to /root
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in cases
where sensitive information must be read from standard input.:
:param str runas: User to run script as. If running on a Windows minion you
must also pass a password
:param str password: Windows only. Pass a password if you specify runas.
This parameter will be ignored for other OS's
.. versionadded:: 2016.3.0
:param str shell: Shell to execute under. Defaults to the system default
shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or redirection
:param list env: A list of environment variables to be set prior to
execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja, mako,
and wempy are supported
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
:param int timeout: A timeout in seconds for the executed process to return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
.. note::
``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
CLI Example:
.. code-block:: bash
salt '*' cmd.run_stderr "ls -l | awk '/foo/{print \\$2}'"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.run_stderr template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
.. code-block:: bash
salt '*' cmd.run_stderr "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
python_shell = _python_shell_default(python_shell,
kwargs.get('__pub_jid', ''))
ret = _run(cmd,
runas=runas,
cwd=cwd,
stdin=stdin,
shell=shell,
python_shell=python_shell,
env=env,
clean_env=clean_env,
template=template,
rstrip=rstrip,
umask=umask,
output_loglevel=output_loglevel,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
use_vt=use_vt,
saltenv=saltenv,
pillarenv=kwargs.get('pillarenv'),
pillar_override=kwargs.get('pillar'),
password=kwargs.get('password', None))
log_callback = _check_cb(log_callback)
lvl = _check_loglevel(output_loglevel)
if lvl is not None:
if not ignore_retcode and ret['retcode'] != 0:
if lvl < LOG_LEVELS['error']:
lvl = LOG_LEVELS['error']
msg = (
'Command \'{0}\' failed with return code: {1}'.format(
cmd,
ret['retcode']
)
)
log.error(log_callback(msg))
if ret['stdout']:
log.log(lvl, 'stdout: {0}'.format(log_callback(ret['stdout'])))
if ret['stderr']:
log.log(lvl, 'stderr: {0}'.format(log_callback(ret['stderr'])))
if ret['retcode']:
log.log(lvl, 'retcode: {0}'.format(ret['retcode']))
return ret['stderr']
def run_all(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_loglevel='debug',
log_callback=None,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
redirect_stderr=False,
**kwargs):
'''
Execute the passed command and return a dict of return data
:param str cmd: The command to run. ex: 'ls -lart /home'
:param str cwd: The current working directory to execute the command in,
defaults to /root
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in cases
where sensitive information must be read from standard input.:
:param str runas: User to run script as. If running on a Windows minion you
must also pass a password
:param str password: Windows only. Pass a password if you specify runas.
This parameter will be ignored for other OS's
.. versionadded:: 2016.3.0
:param str shell: Shell to execute under. Defaults to the system default
shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or redirection
:param list env: A list of environment variables to be set prior to
execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja, mako,
and wempy are supported
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
:param int timeout: A timeout in seconds for the executed process to return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
.. note::
``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
redirect_stderr : False
If set to ``True``, then stderr will be redirected to stdout. This is
helpful for cases where obtaining both the retcode and output is
desired, but it is not desired to have the output separated into both
stdout and stderr.
.. versionadded:: 2015.8.2
CLI Example:
.. code-block:: bash
salt '*' cmd.run_all "ls -l | awk '/foo/{print \\$2}'"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.run_all template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
.. code-block:: bash
salt '*' cmd.run_all "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
python_shell = _python_shell_default(python_shell,
kwargs.get('__pub_jid', ''))
stderr = subprocess.STDOUT if redirect_stderr else subprocess.PIPE
ret = _run(cmd,
runas=runas,
cwd=cwd,
stdin=stdin,
stderr=stderr,
shell=shell,
python_shell=python_shell,
env=env,
clean_env=clean_env,
template=template,
rstrip=rstrip,
umask=umask,
output_loglevel=output_loglevel,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
pillarenv=kwargs.get('pillarenv'),
pillar_override=kwargs.get('pillar'),
use_vt=use_vt,
password=kwargs.get('password', None))
log_callback = _check_cb(log_callback)
lvl = _check_loglevel(output_loglevel)
if lvl is not None:
if not ignore_retcode and ret['retcode'] != 0:
if lvl < LOG_LEVELS['error']:
lvl = LOG_LEVELS['error']
msg = (
'Command \'{0}\' failed with return code: {1}'.format(
cmd,
ret['retcode']
)
)
log.error(log_callback(msg))
if ret['stdout']:
log.log(lvl, 'stdout: {0}'.format(log_callback(ret['stdout'])))
if ret['stderr']:
log.log(lvl, 'stderr: {0}'.format(log_callback(ret['stderr'])))
if ret['retcode']:
log.log(lvl, 'retcode: {0}'.format(ret['retcode']))
return ret
def retcode(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
clean_env=False,
template=None,
umask=None,
output_loglevel='debug',
log_callback=None,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
**kwargs):
'''
Execute a shell command and return the command's return code.
:param str cmd: The command to run. ex: 'ls -lart /home'
:param str cwd: The current working directory to execute the command in,
defaults to /root
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in cases
where sensitive information must be read from standard input.:
:param str runas: User to run script as. If running on a Windows minion you
must also pass a password
:param str password: Windows only. Pass a password if you specify runas.
This parameter will be ignored for other OS's
.. versionadded:: 2016.3.0
:param str shell: Shell to execute under. Defaults to the system default
shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or redirection
:param list env: A list of environment variables to be set prior to
execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja, mako,
and wempy are supported
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
:param int timeout: A timeout in seconds for the executed process to return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
.. note::
``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
:rtype: int
:rtype: None
:returns: Return Code as an int or None if there was an exception.
CLI Example:
.. code-block:: bash
salt '*' cmd.retcode "file /bin/bash"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.retcode template=jinja "file {{grains.pythonpath[0]}}/python"
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
.. code-block:: bash
salt '*' cmd.retcode "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
ret = _run(cmd,
runas=runas,
cwd=cwd,
stdin=stdin,
stderr=subprocess.STDOUT,
shell=shell,
python_shell=python_shell,
env=env,
clean_env=clean_env,
template=template,
umask=umask,
output_loglevel=output_loglevel,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
pillarenv=kwargs.get('pillarenv'),
pillar_override=kwargs.get('pillar'),
use_vt=use_vt,
password=kwargs.get('password', None))
log_callback = _check_cb(log_callback)
lvl = _check_loglevel(output_loglevel)
if lvl is not None:
if not ignore_retcode and ret['retcode'] != 0:
if lvl < LOG_LEVELS['error']:
lvl = LOG_LEVELS['error']
msg = (
'Command \'{0}\' failed with return code: {1}'.format(
cmd,
ret['retcode']
)
)
log.error(log_callback(msg))
log.log(lvl, 'output: {0}'.format(log_callback(ret['stdout'])))
return ret['retcode']
def _retcode_quiet(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=False,
env=None,
clean_env=False,
template=None,
umask=None,
output_loglevel='quiet',
log_callback=None,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
**kwargs):
'''
Helper for running commands quietly for minion startup.
Returns same as retcode
'''
return retcode(cmd,
cwd=cwd,
stdin=stdin,
runas=runas,
shell=shell,
python_shell=python_shell,
env=env,
clean_env=clean_env,
template=template,
umask=umask,
output_loglevel=output_loglevel,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
use_vt=use_vt,
**kwargs)
def script(source,
args=None,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
template=None,
umask=None,
output_loglevel='debug',
log_callback=None,
quiet=False,
timeout=None,
reset_system_locale=True,
__env__=None,
saltenv='base',
use_vt=False,
bg=False,
**kwargs):
'''
Download a script from a remote location and execute the script locally.
The script can be located on the salt master file server or on an HTTP/FTP
server.
The script will be executed directly, so it can be written in any available
programming language.
:param str source: The location of the script to download. If the file is
located on the master in the directory named spam, and is called eggs, the
source string is salt://spam/eggs
:param str args: String of command line args to pass to the script. Only
used if no args are specified as part of the `name` argument. To pass a
string containing spaces in YAML, you will need to doubly-quote it:
"arg1 'arg two' arg3"
:param str cwd: The current working directory to execute the command in,
defaults to /root
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in cases
where sensitive information must be read from standard input.:
:param str runas: User to run script as. If running on a Windows minion you
must also pass a password
:param str password: Windows only. Pass a password if you specify runas.
This parameter will be ignored for other OS's
.. versionadded:: 2016.3.0
:param str shell: Shell to execute under. Defaults to the system default
shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or redirection
:param bool bg: If True, run script in background and do not await or deliver it's results
:param list env: A list of environment variables to be set prior to
execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja, mako,
and wempy are supported
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG)regardless, unless ``quiet`` is used for this value.
:param bool quiet: The command will be executed quietly, meaning no log
entries of the actual command or its return data. This is deprecated as of
the **2014.1.0** release, and is being replaced with ``output_loglevel: quiet``.
:param int timeout: If the command has not terminated after timeout seconds,
send the subprocess sigterm, and if sigterm is ignored, follow up with
sigkill
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
CLI Example:
.. code-block:: bash
salt '*' cmd.script salt://scripts/runme.sh
salt '*' cmd.script salt://scripts/runme.sh 'arg1 arg2 "arg 3"'
salt '*' cmd.script salt://scripts/windows_task.ps1 args=' -Input c:\\tmp\\infile.txt' shell='powershell'
.. code-block:: bash
salt '*' cmd.script salt://scripts/runme.sh stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
python_shell = _python_shell_default(python_shell,
kwargs.get('__pub_jid', ''))
def _cleanup_tempfile(path):
try:
os.remove(path)
except (IOError, OSError) as exc:
log.error(
'cmd.script: Unable to clean tempfile \'{0}\': {1}'.format(
path,
exc
)
)
if isinstance(__env__, six.string_types):
salt.utils.warn_until(
'Carbon',
'Passing a salt environment should be done using \'saltenv\' not '
'\'__env__\'. This functionality will be removed in Salt Carbon.'
)
# Backwards compatibility
saltenv = __env__
path = salt.utils.mkstemp(dir=cwd, suffix=os.path.splitext(source)[1])
if template:
if 'pillarenv' in kwargs or 'pillar' in kwargs:
pillarenv = kwargs.get('pillarenv', __opts__.get('pillarenv'))
kwargs['pillar'] = _gather_pillar(pillarenv, kwargs.get('pillar'))
fn_ = __salt__['cp.get_template'](source,
path,
template,
saltenv,
**kwargs)
if not fn_:
_cleanup_tempfile(path)
return {'pid': 0,
'retcode': 1,
'stdout': '',
'stderr': '',
'cache_error': True}
else:
fn_ = __salt__['cp.cache_file'](source, saltenv)
if not fn_:
_cleanup_tempfile(path)
return {'pid': 0,
'retcode': 1,
'stdout': '',
'stderr': '',
'cache_error': True}
shutil.copyfile(fn_, path)
if not salt.utils.is_windows():
os.chmod(path, 320)
os.chown(path, __salt__['file.user_to_uid'](runas), -1)
ret = _run(path + ' ' + str(args) if args else path,
cwd=cwd,
stdin=stdin,
output_loglevel=output_loglevel,
log_callback=log_callback,
runas=runas,
shell=shell,
python_shell=python_shell,
env=env,
umask=umask,
timeout=timeout,
reset_system_locale=reset_system_locale,
saltenv=saltenv,
pillarenv=kwargs.get('pillarenv'),
pillar_override=kwargs.get('pillar'),
use_vt=use_vt,
password=kwargs.get('password', None),
bg=bg)
_cleanup_tempfile(path)
return ret
def script_retcode(source,
args=None,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
template='jinja',
umask=None,
timeout=None,
reset_system_locale=True,
__env__=None,
saltenv='base',
output_loglevel='debug',
log_callback=None,
use_vt=False,
**kwargs):
'''
Download a script from a remote location and execute the script locally.
The script can be located on the salt master file server or on an HTTP/FTP
server.
The script will be executed directly, so it can be written in any available
programming language.
The script can also be formatted as a template, the default is jinja.
Only evaluate the script return code and do not block for terminal output
:param str source: The location of the script to download. If the file is
located on the master in the directory named spam, and is called eggs, the
source string is salt://spam/eggs
:param str args: String of command line args to pass to the script. Only
used if no args are specified as part of the `name` argument. To pass a
string containing spaces in YAML, you will need to doubly-quote it: "arg1
'arg two' arg3"
:param str cwd: The current working directory to execute the command in,
defaults to /root
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in cases
where sensitive information must be read from standard input.:
:param str runas: User to run script as. If running on a Windows minion you
must also pass a password
:param str password: Windows only. Pass a password if you specify runas.
This parameter will be ignored for other OS's
.. versionadded:: 2016.3.0
:param str shell: Shell to execute under. Defaults to the system default
shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or redirection
:param list env: A list of environment variables to be set prior to
execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja, mako,
and wempy are supported
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
:param bool quiet: The command will be executed quietly, meaning no log
entries of the actual command or its return data. This is deprecated as of
the **2014.1.0** release, and is being replaced with ``output_loglevel:
quiet``.
:param int timeout: If the command has not terminated after timeout seconds,
send the subprocess sigterm, and if sigterm is ignored, follow up with
sigkill
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
CLI Example:
.. code-block:: bash
salt '*' cmd.script_retcode salt://scripts/runme.sh
salt '*' cmd.script_retcode salt://scripts/runme.sh 'arg1 arg2 "arg 3"'
salt '*' cmd.script_retcode salt://scripts/windows_task.ps1 args=' -Input c:\\tmp\\infile.txt' shell='powershell'
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
.. code-block:: bash
salt '*' cmd.script_retcode salt://scripts/runme.sh stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
return script(source=source,
args=args,
cwd=cwd,
stdin=stdin,
runas=runas,
shell=shell,
python_shell=python_shell,
env=env,
template=template,
umask=umask,
timeout=timeout,
reset_system_locale=reset_system_locale,
__env__=__env__,
saltenv=saltenv,
output_loglevel=output_loglevel,
log_callback=log_callback,
use_vt=use_vt,
**kwargs)['retcode']
def which(cmd):
'''
Returns the path of an executable available on the minion, None otherwise
CLI Example:
.. code-block:: bash
salt '*' cmd.which cat
'''
return salt.utils.which(cmd)
def which_bin(cmds):
'''
Returns the first command found in a list of commands
CLI Example:
.. code-block:: bash
salt '*' cmd.which_bin '[pip2, pip, pip-python]'
'''
return salt.utils.which_bin(cmds)
def has_exec(cmd):
'''
Returns true if the executable is available on the minion, false otherwise
CLI Example:
.. code-block:: bash
salt '*' cmd.has_exec cat
'''
return which(cmd) is not None
def exec_code(lang, code, cwd=None):
'''
Pass in two strings, the first naming the executable language, aka -
python2, python3, ruby, perl, lua, etc. the second string containing
the code you wish to execute. The stdout will be returned.
CLI Example:
.. code-block:: bash
salt '*' cmd.exec_code ruby 'puts "cheese"'
'''
return exec_code_all(lang, code, cwd)['stdout']
def exec_code_all(lang, code, cwd=None):
'''
Pass in two strings, the first naming the executable language, aka -
python2, python3, ruby, perl, lua, etc. the second string containing
the code you wish to execute. All cmd artifacts (stdout, stderr, retcode, pid)
will be returned.
CLI Example:
.. code-block:: bash
salt '*' cmd.exec_code_all ruby 'puts "cheese"'
'''
powershell = lang.lower().startswith("powershell")
if powershell:
codefile = salt.utils.mkstemp(suffix=".ps1")
else:
codefile = salt.utils.mkstemp()
with salt.utils.fopen(codefile, 'w+t', binary=False) as fp_:
fp_.write(code)
if powershell:
cmd = [lang, "-File", codefile]
else:
cmd = [lang, codefile]
ret = run_all(cmd, cwd=cwd, python_shell=False)
os.remove(codefile)
return ret
def tty(device, echo=None):
'''
Echo a string to a specific tty
CLI Example:
.. code-block:: bash
salt '*' cmd.tty tty0 'This is a test'
salt '*' cmd.tty pts3 'This is a test'
'''
if device.startswith('tty'):
teletype = '/dev/{0}'.format(device)
elif device.startswith('pts'):
teletype = '/dev/{0}'.format(device.replace('pts', 'pts/'))
else:
return {'Error': 'The specified device is not a valid TTY'}
try:
with salt.utils.fopen(teletype, 'wb') as tty_device:
tty_device.write(echo)
return {
'Success': 'Message was successfully echoed to {0}'.format(teletype)
}
except IOError:
return {
'Error': 'Echoing to {0} returned error'.format(teletype)
}
def run_chroot(root,
cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=True,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_loglevel='quiet',
log_callback=None,
quiet=False,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
bg=False,
**kwargs):
'''
.. versionadded:: 2014.7.0
This function runs :mod:`cmd.run_all <salt.modules.cmdmod.run_all>` wrapped
within a chroot, with dev and proc mounted in the chroot
root:
Path to the root of the jail to use.
cmd:
The command to run. ex: 'ls -lart /home'
cwd
The current working directory to execute the command in, defaults to
/root
stdin
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
runas
User to run script as.
shell
Shell to execute under. Defaults to the system default shell.
python_shell
If False, let python handle the positional arguments. Set to True
to use shell features, such as pipes or redirection
env
A list of environment variables to be set prior to execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
clean_env:
Attempt to clean out all other shell environment variables and set
only those provided in the 'env' argument to this function.
template
If this setting is applied then the named templating engine will be
used to render the downloaded file. Currently jinja, mako, and wempy
are supported
rstrip
Strip all whitespace off the end of output before it is returned.
umask
The umask (in octal) to use when running the command.
output_loglevel
Control the loglevel at which the output from the command is logged.
Note that the command being run will still be logged (loglevel: DEBUG)
regardless, unless ``quiet`` is used for this value.
timeout
A timeout in seconds for the executed process to return.
use_vt
Use VT utils (saltstack) to stream the command output more
interactively to the console and the logs.
This is experimental.
CLI Example:
.. code-block:: bash
salt '*' cmd.run_chroot /var/lib/lxc/container_name/rootfs 'sh /tmp/bootstrap.sh'
'''
__salt__['mount.mount'](
os.path.join(root, 'dev'),
'udev',
fstype='devtmpfs')
__salt__['mount.mount'](
os.path.join(root, 'proc'),
'proc',
fstype='proc')
# Execute chroot routine
sh_ = '/bin/sh'
if os.path.isfile(os.path.join(root, 'bin/bash')):
sh_ = '/bin/bash'
if isinstance(cmd, (list, tuple)):
cmd = ' '.join([str(i) for i in cmd])
cmd = 'chroot {0} {1} -c {2}'.format(root, sh_, _cmd_quote(cmd))
run_func = __context__.pop('cmd.run_chroot.func', run_all)
ret = run_func(cmd,
runas=runas,
cwd=cwd,
stdin=stdin,
shell=shell,
python_shell=python_shell,
env=env,
clean_env=clean_env,
template=template,
rstrip=rstrip,
umask=umask,
output_loglevel=output_loglevel,
log_callback=log_callback,
quiet=quiet,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
pillarenv=kwargs.get('pillarenv'),
pillar=kwargs.get('pillar'),
use_vt=use_vt,
bg=bg)
# Kill processes running in the chroot
for i in range(6):
pids = _chroot_pids(root)
if not pids:
break
for pid in pids:
# use sig 15 (TERM) for first 3 attempts, then 9 (KILL)
sig = 15 if i < 3 else 9
os.kill(pid, sig)
if _chroot_pids(root):
log.error('Processes running in chroot could not be killed, '
'filesystem will remain mounted')
__salt__['mount.umount'](os.path.join(root, 'proc'))
__salt__['mount.umount'](os.path.join(root, 'dev'))
return ret
def _is_valid_shell(shell):
'''
Attempts to search for valid shells on a system and
see if a given shell is in the list
'''
if salt.utils.is_windows():
return True # Don't even try this for Windows
shells = '/etc/shells'
available_shells = []
if os.path.exists(shells):
try:
with salt.utils.fopen(shells, 'r') as shell_fp:
lines = shell_fp.read().splitlines()
for line in lines:
if line.startswith('#'):
continue
else:
available_shells.append(line)
except OSError:
return True
else:
# No known method of determining available shells
return None
if shell in available_shells:
return True
else:
return False
def shells():
'''
Lists the valid shells on this system via the /etc/shells file
.. versionadded:: 2015.5.0
CLI Example::
salt '*' cmd.shells
'''
shells_fn = '/etc/shells'
ret = []
if os.path.exists(shells_fn):
try:
with salt.utils.fopen(shells_fn, 'r') as shell_fp:
lines = shell_fp.read().splitlines()
for line in lines:
line = line.strip()
if line.startswith('#'):
continue
elif not line:
continue
else:
ret.append(line)
except OSError:
log.error("File '{0}' was not found".format(shells_fn))
return ret
def powershell(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_loglevel='debug',
quiet=False,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
**kwargs):
'''
Execute the passed PowerShell command and return the output as a string.
.. versionadded:: 2016.3.0
.. warning ::
This passes the cmd argument directly to PowerShell
without any further processing! Be absolutely sure that you
have properly sanitized the command passed to this function
and do not use untrusted inputs.
Note that ``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
:param str cmd: The powershell command to run.
:param str cwd: The current working directory to execute the command in
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in cases
where sensitive information must be read from standard input.:
:param str runas: User to run script as. If running on a Windows minion you
must also pass a password
:param str password: Windows only. Pass a password if you specify runas.
This parameter will be ignored for other OS's
.. versionadded:: 2016.3.0
:param str shell: Shell to execute under. Defaults to the system default
shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or redirection
:param list env: A list of environment variables to be set prior to
execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja, mako,
and wempy are supported
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
:param int timeout: A timeout in seconds for the executed process to return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
:param bool reset_system_locale: Resets the system locale
:param bool ignore_retcode: Ignore the return code
:param str saltenv: The salt environment to use. Default is 'base'
CLI Example:
.. code-block:: powershell
salt '*' cmd.powershell "$PSVersionTable.CLRVersion"
'''
if 'python_shell' in kwargs:
python_shell = kwargs.pop('python_shell')
else:
python_shell = True
# Append PowerShell Object formatting
cmd = '{0} | ConvertTo-Json -Depth 32'.format(cmd)
# Retrieve the response, while overriding shell with 'powershell'
response = run(cmd,
cwd=cwd,
stdin=stdin,
runas=runas,
shell='powershell',
env=env,
clean_env=clean_env,
template=template,
rstrip=rstrip,
umask=umask,
output_loglevel=output_loglevel,
quiet=quiet,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
use_vt=use_vt,
python_shell=python_shell,
**kwargs)
try:
return json.loads(response)
except Exception:
log.error("Error converting PowerShell JSON return", exc_info=True)
return {}
def run_bg(cmd,
cwd=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
clean_env=False,
template=None,
umask=None,
timeout=None,
output_loglevel='debug',
log_callback=None,
reset_system_locale=True,
saltenv='base',
**kwargs):
r'''
.. versionadded: 2016.3.0
Execute the passed command in the background and return it's PID
Note that ``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
:param str cmd: The command to run. ex: 'ls -lart /home'
:param str cwd: The current working directory to execute the command in,
defaults to `/root` (`C:\` in windows)
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
:param str runas: User to run script as. If running on a Windows minion you
must also pass a password
:param str shell: Shell to execute under. Defaults to the system default
shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or redirection
:param list env: A list of environment variables to be set prior to
execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja, mako,
and wempy are supported
:param str umask: The umask (in octal) to use when running the command.
:param int timeout: A timeout in seconds for the executed process to return.
.. warning::
This function does not process commands through a shell
unless the python_shell flag is set to True. This means that any
shell-specific functionality such as 'echo' or the use of pipes,
redirection or &&, should either be migrated to cmd.shell or
have the python_shell=True flag set here.
The use of python_shell=True means that the shell will accept _any_ input
including potentially malicious commands such as 'good_command;rm -rf /'.
Be absolutely certain that you have sanitized your input prior to using
python_shell=True
CLI Example:
.. code-block:: bash
salt '*' cmd.run_bg "fstrim-all"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.run_bg template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
Specify an alternate shell with the shell parameter:
.. code-block:: bash
salt '*' cmd.run_bg "Get-ChildItem C:\\ " shell='powershell'
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: bash
salt '*' cmd.run_bg cmd='ls -lR / | sed -e s/=/:/g > /tmp/dontwait'
'''
python_shell = _python_shell_default(python_shell,
kwargs.get('__pub_jid', ''))
res = _run(cmd,
stdin=None,
stderr=None,
stdout=None,
output_loglevel=output_loglevel,
use_vt=None,
bg=True,
with_communicate=False,
rstrip=False,
runas=runas,
shell=shell,
python_shell=python_shell,
cwd=cwd,
env=env,
clean_env=clean_env,
template=template,
umask=umask,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
# ignore_retcode=ignore_retcode,
saltenv=saltenv,
pillarenv=kwargs.get('pillarenv'),
pillar_override=kwargs.get('pillar'),
# password=kwargs.get('password', None),
)
return {
'pid': res['pid']
}
| apache-2.0 |
Stanford-Online/edx-platform | lms/djangoapps/support/tests/test_views.py | 9 | 17238 | # coding: UTF-8
"""
Tests for support views.
"""
import itertools
import json
import re
from datetime import datetime, timedelta
import ddt
import pytest
from django.contrib.auth.models import User
from django.urls import reverse
from django.db.models import signals
from nose.plugins.attrib import attr
from pytz import UTC
from common.test.utils import disable_signal
from course_modes.models import CourseMode
from course_modes.tests.factories import CourseModeFactory
from lms.djangoapps.verify_student.models import VerificationDeadline
from student.models import ENROLLED_TO_ENROLLED, CourseEnrollment, ManualEnrollmentAudit
from student.roles import GlobalStaff, SupportStaffRole
from student.tests.factories import TEST_PASSWORD, CourseEnrollmentFactory, UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class SupportViewTestCase(ModuleStoreTestCase):
"""
Base class for support view tests.
"""
USERNAME = "support"
EMAIL = "support@example.com"
PASSWORD = "support"
def setUp(self):
"""Create a user and log in. """
super(SupportViewTestCase, self).setUp()
self.user = UserFactory(username=self.USERNAME, email=self.EMAIL, password=self.PASSWORD)
self.course = CourseFactory.create()
success = self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.assertTrue(success, msg="Could not log in")
class SupportViewManageUserTests(SupportViewTestCase):
"""
Base class for support view tests.
"""
def setUp(self):
"""Make the user support staff"""
super(SupportViewManageUserTests, self).setUp()
SupportStaffRole().add_users(self.user)
def test_get_support_form(self):
"""
Tests Support View to return Manage User Form
"""
url = reverse('support:manage_user')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_get_form_with_user_info(self):
"""
Tests Support View to return Manage User Form
with user info
"""
url = reverse('support:manage_user_detail') + self.user.username
response = self.client.get(url)
data = json.loads(response.content)
self.assertEqual(data['username'], self.user.username)
def test_disable_user_account(self):
"""
Tests Support View to disable the user account
"""
test_user = UserFactory(
username='foobar', email='foobar@foobar.com', password='foobar'
)
url = reverse('support:manage_user_detail') + test_user.username
response = self.client.post(url, data={
'username_or_email': test_user.username
})
data = json.loads(response.content)
self.assertEqual(data['success_msg'], 'User Disabled Successfully')
test_user = User.objects.get(username=test_user.username, email=test_user.email)
self.assertEqual(test_user.has_usable_password(), False)
@attr(shard=3)
@ddt.ddt
class SupportViewAccessTests(SupportViewTestCase):
"""
Tests for access control of support views.
"""
@ddt.data(*(
(url_name, role, has_access)
for (url_name, (role, has_access))
in itertools.product((
'support:index',
'support:certificates',
'support:refund',
'support:enrollment',
'support:enrollment_list',
'support:manage_user',
'support:manage_user_detail',
), (
(GlobalStaff, True),
(SupportStaffRole, True),
(None, False)
))
))
@ddt.unpack
def test_access(self, url_name, role, has_access):
if role is not None:
role().add_users(self.user)
url = reverse(url_name)
response = self.client.get(url)
if has_access:
self.assertEqual(response.status_code, 200)
else:
self.assertEqual(response.status_code, 403)
@ddt.data(
"support:index",
"support:certificates",
"support:refund",
"support:enrollment",
"support:enrollment_list",
"support:manage_user",
"support:manage_user_detail",
)
def test_require_login(self, url_name):
url = reverse(url_name)
# Log out then try to retrieve the page
self.client.logout()
response = self.client.get(url)
# Expect a redirect to the login page
redirect_url = "{login_url}?next={original_url}".format(
login_url=reverse("signin_user"),
original_url=url,
)
self.assertRedirects(response, redirect_url)
class SupportViewIndexTests(SupportViewTestCase):
"""
Tests for the support index view.
"""
EXPECTED_URL_NAMES = [
"support:certificates",
"support:refund",
]
def setUp(self):
"""Make the user support staff. """
super(SupportViewIndexTests, self).setUp()
SupportStaffRole().add_users(self.user)
def test_index(self):
response = self.client.get(reverse("support:index"))
self.assertContains(response, "Support")
# Check that all the expected links appear on the index page.
for url_name in self.EXPECTED_URL_NAMES:
self.assertContains(response, reverse(url_name))
class SupportViewCertificatesTests(SupportViewTestCase):
"""
Tests for the certificates support view.
"""
def setUp(self):
"""Make the user support staff. """
super(SupportViewCertificatesTests, self).setUp()
SupportStaffRole().add_users(self.user)
def test_certificates_no_filter(self):
# Check that an empty initial filter is passed to the JavaScript client correctly.
response = self.client.get(reverse("support:certificates"))
self.assertContains(response, "userFilter: ''")
def test_certificates_with_user_filter(self):
# Check that an initial filter is passed to the JavaScript client.
url = reverse("support:certificates") + "?user=student@example.com"
response = self.client.get(url)
self.assertContains(response, "userFilter: 'student@example.com'")
def test_certificates_along_with_course_filter(self):
# Check that an initial filter is passed to the JavaScript client.
url = reverse("support:certificates") + "?user=student@example.com&course_id=" + unicode(self.course.id)
response = self.client.get(url)
self.assertContains(response, "userFilter: 'student@example.com'")
self.assertContains(response, "courseFilter: '" + unicode(self.course.id) + "'")
@ddt.ddt
class SupportViewEnrollmentsTests(SharedModuleStoreTestCase, SupportViewTestCase):
"""Tests for the enrollment support view."""
def setUp(self):
super(SupportViewEnrollmentsTests, self).setUp()
SupportStaffRole().add_users(self.user)
self.course = CourseFactory(display_name=u'teꜱᴛ')
self.student = UserFactory.create(username='student', email='test@example.com', password='test')
for mode in (
CourseMode.AUDIT, CourseMode.PROFESSIONAL, CourseMode.CREDIT_MODE,
CourseMode.NO_ID_PROFESSIONAL_MODE, CourseMode.VERIFIED, CourseMode.HONOR
):
CourseModeFactory.create(mode_slug=mode, course_id=self.course.id) # pylint: disable=no-member
self.verification_deadline = VerificationDeadline(
course_key=self.course.id, # pylint: disable=no-member
deadline=datetime.now(UTC) + timedelta(days=365)
)
self.verification_deadline.save()
CourseEnrollmentFactory.create(mode=CourseMode.AUDIT, user=self.student, course_id=self.course.id) # pylint: disable=no-member
self.url = reverse('support:enrollment_list', kwargs={'username_or_email': self.student.username})
def assert_enrollment(self, mode):
"""
Assert that the student's enrollment has the correct mode.
"""
enrollment = CourseEnrollment.get_enrollment(self.student, self.course.id) # pylint: disable=no-member
self.assertEqual(enrollment.mode, mode)
@ddt.data('username', 'email')
def test_get_enrollments(self, search_string_type):
url = reverse(
'support:enrollment_list',
kwargs={'username_or_email': getattr(self.student, search_string_type)}
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEqual(len(data), 1)
self.assertDictContainsSubset({
'mode': CourseMode.AUDIT,
'manual_enrollment': {},
'user': self.student.username,
'course_id': unicode(self.course.id), # pylint: disable=no-member
'is_active': True,
'verified_upgrade_deadline': None,
}, data[0])
self.assertEqual(
{CourseMode.VERIFIED, CourseMode.AUDIT, CourseMode.HONOR,
CourseMode.NO_ID_PROFESSIONAL_MODE, CourseMode.PROFESSIONAL},
{mode['slug'] for mode in data[0]['course_modes']}
)
def test_get_manual_enrollment_history(self):
ManualEnrollmentAudit.create_manual_enrollment_audit(
self.user,
self.student.email,
ENROLLED_TO_ENROLLED,
'Financial Assistance',
CourseEnrollment.objects.get(course_id=self.course.id, user=self.student) # pylint: disable=no-member
)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertDictContainsSubset({
'enrolled_by': self.user.email,
'reason': 'Financial Assistance',
}, json.loads(response.content)[0]['manual_enrollment'])
@disable_signal(signals, 'post_save')
@ddt.data('username', 'email')
def test_change_enrollment(self, search_string_type):
self.assertIsNone(ManualEnrollmentAudit.get_manual_enrollment_by_email(self.student.email))
url = reverse(
'support:enrollment_list',
kwargs={'username_or_email': getattr(self.student, search_string_type)}
)
response = self.client.post(url, data={
'course_id': unicode(self.course.id), # pylint: disable=no-member
'old_mode': CourseMode.AUDIT,
'new_mode': CourseMode.VERIFIED,
'reason': 'Financial Assistance'
})
self.assertEqual(response.status_code, 200)
self.assertIsNotNone(ManualEnrollmentAudit.get_manual_enrollment_by_email(self.student.email))
self.assert_enrollment(CourseMode.VERIFIED)
@ddt.data(
({}, r"The field \"'\w+'\" is required."), # The double quoting goes away in Django 2.0.1
({'course_id': 'bad course key'}, 'Could not parse course key.'),
({
'course_id': 'course-v1:TestX+T101+2015',
'old_mode': CourseMode.AUDIT,
'new_mode': CourseMode.VERIFIED,
'reason': ''
}, 'Could not find enrollment for user'),
({
'course_id': None,
'old_mode': CourseMode.HONOR,
'new_mode': CourseMode.VERIFIED,
'reason': ''
}, r'User \w+ is not enrolled with mode ' + CourseMode.HONOR),
({
'course_id': 'course-v1:TestX+T101+2015',
'old_mode': CourseMode.AUDIT,
'new_mode': CourseMode.CREDIT_MODE,
'reason': 'Enrollment cannot be changed to credit mode'
}, '')
)
@ddt.unpack
def test_change_enrollment_bad_data(self, data, error_message):
# `self` isn't available from within the DDT declaration, so
# assign the course ID here
if 'course_id' in data and data['course_id'] is None:
data['course_id'] = unicode(self.course.id) # pylint: disable=no-member
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, 400)
self.assertIsNotNone(re.match(error_message, response.content))
self.assert_enrollment(CourseMode.AUDIT)
self.assertIsNone(ManualEnrollmentAudit.get_manual_enrollment_by_email(self.student.email))
@disable_signal(signals, 'post_save')
@ddt.data('honor', 'audit', 'verified', 'professional', 'no-id-professional')
def test_update_enrollment_for_all_modes(self, new_mode):
""" Verify support can changed the enrollment to all available modes
except credit. """
self.assert_update_enrollment('username', new_mode)
@disable_signal(signals, 'post_save')
@ddt.data('honor', 'audit', 'verified', 'professional', 'no-id-professional')
def test_update_enrollment_for_ended_course(self, new_mode):
""" Verify support can changed the enrollment of archived course. """
self.set_course_end_date_and_expiry()
self.assert_update_enrollment('username', new_mode)
def test_update_enrollment_with_credit_mode_throws_error(self):
""" Verify that enrollment cannot be changed to credit mode. """
self.assert_update_enrollment('username', CourseMode.CREDIT_MODE)
@ddt.data('username', 'email')
def test_get_enrollments_with_expired_mode(self, search_string_type):
""" Verify that page can get the all modes with archived course. """
self.set_course_end_date_and_expiry()
url = reverse(
'support:enrollment_list',
kwargs={'username_or_email': getattr(self.student, search_string_type)}
)
response = self.client.get(url)
self._assert_generated_modes(response)
@disable_signal(signals, 'post_save')
@ddt.data('username', 'email')
def test_update_enrollments_with_expired_mode(self, search_string_type):
""" Verify that enrollment can be updated to verified mode. """
self.set_course_end_date_and_expiry()
self.assertIsNone(ManualEnrollmentAudit.get_manual_enrollment_by_email(self.student.email))
self.assert_update_enrollment(search_string_type, CourseMode.VERIFIED)
def _assert_generated_modes(self, response):
"""Dry method to generate course modes dict and test with response data."""
modes = CourseMode.modes_for_course(self.course.id, include_expired=True) # pylint: disable=no-member
modes_data = []
for mode in modes:
expiry = mode.expiration_datetime.strftime('%Y-%m-%dT%H:%M:%SZ') if mode.expiration_datetime else None
modes_data.append({
'sku': mode.sku,
'expiration_datetime': expiry,
'name': mode.name,
'currency': mode.currency,
'bulk_sku': mode.bulk_sku,
'min_price': mode.min_price,
'suggested_prices': mode.suggested_prices,
'slug': mode.slug,
'description': mode.description
})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEqual(len(data), 1)
self.assertEqual(
modes_data,
data[0]['course_modes']
)
self.assertEqual(
{CourseMode.VERIFIED, CourseMode.AUDIT, CourseMode.NO_ID_PROFESSIONAL_MODE,
CourseMode.PROFESSIONAL, CourseMode.HONOR},
{mode['slug'] for mode in data[0]['course_modes']}
)
def assert_update_enrollment(self, search_string_type, new_mode):
""" Dry method to update the enrollment and assert response."""
self.assertIsNone(ManualEnrollmentAudit.get_manual_enrollment_by_email(self.student.email))
url = reverse(
'support:enrollment_list',
kwargs={'username_or_email': getattr(self.student, search_string_type)}
)
response = self.client.post(url, data={
'course_id': unicode(self.course.id), # pylint: disable=no-member
'old_mode': CourseMode.AUDIT,
'new_mode': new_mode,
'reason': 'Financial Assistance'
})
# Enrollment cannot be changed to credit mode.
if new_mode == CourseMode.CREDIT_MODE:
self.assertEqual(response.status_code, 400)
else:
self.assertEqual(response.status_code, 200)
self.assertIsNotNone(ManualEnrollmentAudit.get_manual_enrollment_by_email(self.student.email))
self.assert_enrollment(new_mode)
def set_course_end_date_and_expiry(self):
""" Set the course-end date and expire its verified mode."""
self.course.start = datetime(year=1970, month=1, day=1, tzinfo=UTC)
self.course.end = datetime(year=1970, month=1, day=10, tzinfo=UTC)
# change verified mode expiry.
verified_mode = CourseMode.objects.get(
course_id=self.course.id, # pylint: disable=no-member
mode_slug=CourseMode.VERIFIED
)
verified_mode.expiration_datetime = datetime(year=1970, month=1, day=9, tzinfo=UTC)
verified_mode.save()
| agpl-3.0 |
ardekantur/pyglet | pyglet/graphics/vertexdomain.py | 9 | 30147 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id:$
'''Manage related vertex attributes within a single vertex domain.
A vertex "domain" consists of a set of attribute descriptions that together
describe the layout of one or more vertex buffers which are used together to
specify the vertices in a primitive. Additionally, the domain manages the
buffers used to store the data and will resize them as necessary to accommodate
new vertices.
Domains can optionally be indexed, in which case they also manage a buffer
containing vertex indices. This buffer is grown separately and has no size
relation to the attribute buffers.
Applications can create vertices (and optionally, indices) within a domain
with the `VertexDomain.create` method. This returns a `VertexList`
representing the list of vertices created. The vertex attribute data within
the group can be modified, and the changes will be made to the underlying
buffers automatically.
The entire domain can be efficiently drawn in one step with the
`VertexDomain.draw` method, assuming all the vertices comprise primitives of
the same OpenGL primitive mode.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import ctypes
import re
from pyglet.gl import *
from pyglet.graphics import allocation, vertexattribute, vertexbuffer
_usage_format_re = re.compile(r'''
(?P<attribute>[^/]*)
(/ (?P<usage> static|dynamic|stream|none))?
''', re.VERBOSE)
_gl_usages = {
'static': GL_STATIC_DRAW,
'dynamic': GL_DYNAMIC_DRAW,
'stream': GL_STREAM_DRAW,
'none': GL_STREAM_DRAW_ARB, # Force no VBO
}
def _nearest_pow2(v):
# From http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
# Credit: Sean Anderson
v -= 1
v |= v >> 1
v |= v >> 2
v |= v >> 4
v |= v >> 8
v |= v >> 16
return v + 1
def create_attribute_usage(format):
'''Create an attribute and usage pair from a format string. The
format string is as documented in `pyglet.graphics.vertexattribute`, with
the addition of an optional usage component::
usage ::= attribute ( '/' ('static' | 'dynamic' | 'stream' | 'none') )?
If the usage is not given it defaults to 'dynamic'. The usage corresponds
to the OpenGL VBO usage hint, and for ``static`` also indicates a
preference for interleaved arrays. If ``none`` is specified a buffer
object is not created, and vertex data is stored in system memory.
Some examples:
``v3f/stream``
3D vertex position using floats, for stream usage
``c4b/static``
4-byte color attribute, for static usage
:return: attribute, usage
'''
match = _usage_format_re.match(format)
attribute_format = match.group('attribute')
attribute = vertexattribute.create_attribute(attribute_format)
usage = match.group('usage')
if usage:
vbo = not usage == 'none'
usage = _gl_usages[usage]
else:
usage = GL_DYNAMIC_DRAW
vbo = True
return (attribute, usage, vbo)
def create_domain(*attribute_usage_formats):
'''Create a vertex domain covering the given attribute usage formats.
See documentation for `create_attribute_usage` and
`pyglet.graphics.vertexattribute.create_attribute` for the grammar of
these format strings.
:rtype: `VertexDomain`
'''
attribute_usages = [create_attribute_usage(f) \
for f in attribute_usage_formats]
return VertexDomain(attribute_usages)
def create_indexed_domain(*attribute_usage_formats):
'''Create an indexed vertex domain covering the given attribute usage
formats. See documentation for `create_attribute_usage` and
`pyglet.graphics.vertexattribute.create_attribute` for the grammar of
these format strings.
:rtype: `VertexDomain`
'''
attribute_usages = [create_attribute_usage(f) \
for f in attribute_usage_formats]
return IndexedVertexDomain(attribute_usages)
class VertexDomain(object):
'''Management of a set of vertex lists.
Construction of a vertex domain is usually done with the `create_domain`
function.
'''
_version = 0
_initial_count = 16
def __init__(self, attribute_usages):
self.allocator = allocation.Allocator(self._initial_count)
# If there are any MultiTexCoord attributes, then a TexCoord attribute
# must be converted.
have_multi_texcoord = False
for attribute, _, _ in attribute_usages:
if isinstance(attribute, vertexattribute.MultiTexCoordAttribute):
have_multi_texcoord = True
break
static_attributes = []
attributes = []
self.buffer_attributes = [] # list of (buffer, attributes)
for attribute, usage, vbo in attribute_usages:
if (have_multi_texcoord and
isinstance(attribute, vertexattribute.TexCoordAttribute)):
attribute.convert_to_multi_tex_coord_attribute()
if usage == GL_STATIC_DRAW:
# Group attributes for interleaved buffer
static_attributes.append(attribute)
attributes.append(attribute)
else:
# Create non-interleaved buffer
attributes.append(attribute)
attribute.buffer = vertexbuffer.create_mappable_buffer(
attribute.stride * self.allocator.capacity,
usage=usage, vbo=vbo)
attribute.buffer.element_size = attribute.stride
attribute.buffer.attributes = (attribute,)
self.buffer_attributes.append(
(attribute.buffer, (attribute,)))
# Create buffer for interleaved data
if static_attributes:
vertexattribute.interleave_attributes(static_attributes)
stride = static_attributes[0].stride
buffer = vertexbuffer.create_mappable_buffer(
stride * self.allocator.capacity, usage=GL_STATIC_DRAW)
buffer.element_size = stride
self.buffer_attributes.append(
(buffer, static_attributes))
attributes.extend(static_attributes)
for attribute in static_attributes:
attribute.buffer = buffer
# Create named attributes for each attribute
self.attributes = attributes
self.attribute_names = {}
for attribute in attributes:
if isinstance(attribute, vertexattribute.GenericAttribute):
index = attribute.index
# TODO create a name and use it (e.g. 'generic3')
# XXX this won't migrate; not documented.
if 'generic' not in self.attribute_names:
self.attribute_names['generic'] = {}
assert index not in self.attribute_names['generic'], \
'More than one generic attribute with index %d' % index
self.attribute_names['generic'][index] = attribute
elif isinstance(attribute, vertexattribute.MultiTexCoordAttribute):
# XXX this won't migrate; not documented.
texture = attribute.texture
if 'multi_tex_coords' not in self.attribute_names:
self.attribute_names['multi_tex_coords'] = {}
assert texture not in self.attribute_names['multi_tex_coords'],\
'More than one multi_tex_coord attribute for texture %d' % \
texture
self.attribute_names['multi_tex_coords'][texture] = attribute
else:
name = attribute.plural
assert name not in self.attributes, \
'More than one "%s" attribute given' % name
self.attribute_names[name] = attribute
def __del__(self):
# Break circular refs that Python GC seems to miss even when forced
# collection.
for attribute in self.attributes:
try:
del attribute.buffer
except AttributeError:
pass
def _safe_alloc(self, count):
'''Allocate vertices, resizing the buffers if necessary.'''
try:
return self.allocator.alloc(count)
except allocation.AllocatorMemoryException, e:
capacity = _nearest_pow2(e.requested_capacity)
self._version += 1
for buffer, _ in self.buffer_attributes:
buffer.resize(capacity * buffer.element_size)
self.allocator.set_capacity(capacity)
return self.allocator.alloc(count)
def _safe_realloc(self, start, count, new_count):
'''Reallocate vertices, resizing the buffers if necessary.'''
try:
return self.allocator.realloc(start, count, new_count)
except allocation.AllocatorMemoryException, e:
capacity = _nearest_pow2(e.requested_capacity)
self._version += 1
for buffer, _ in self.buffer_attributes:
buffer.resize(capacity * buffer.element_size)
self.allocator.set_capacity(capacity)
return self.allocator.realloc(start, count, new_count)
def create(self, count):
'''Create a `VertexList` in this domain.
:Parameters:
`count` : int
Number of vertices to create.
:rtype: `VertexList`
'''
start = self._safe_alloc(count)
return VertexList(self, start, count)
def draw(self, mode, vertex_list=None):
'''Draw vertices in the domain.
If `vertex_list` is not specified, all vertices in the domain are
drawn. This is the most efficient way to render primitives.
If `vertex_list` specifies a `VertexList`, only primitives in that
list will be drawn.
:Parameters:
`mode` : int
OpenGL drawing mode, e.g. ``GL_POINTS``, ``GL_LINES``, etc.
`vertex_list` : `VertexList`
Vertex list to draw, or ``None`` for all lists in this domain.
'''
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT)
for buffer, attributes in self.buffer_attributes:
buffer.bind()
for attribute in attributes:
attribute.enable()
attribute.set_pointer(attribute.buffer.ptr)
if vertexbuffer._workaround_vbo_finish:
glFinish()
if vertex_list is not None:
glDrawArrays(mode, vertex_list.start, vertex_list.count)
else:
starts, sizes = self.allocator.get_allocated_regions()
primcount = len(starts)
if primcount == 0:
pass
elif primcount == 1:
# Common case
glDrawArrays(mode, starts[0], sizes[0])
elif gl_info.have_version(1, 4):
starts = (GLint * primcount)(*starts)
sizes = (GLsizei * primcount)(*sizes)
glMultiDrawArrays(mode, starts, sizes, primcount)
else:
for start, size in zip(starts, sizes):
glDrawArrays(mode, start, size)
for buffer, _ in self.buffer_attributes:
buffer.unbind()
glPopClientAttrib()
def _is_empty(self):
return not self.allocator.starts
def __repr__(self):
return '<%s@%x %s>' % (self.__class__.__name__, id(self),
self.allocator)
class VertexList(object):
'''A list of vertices within a `VertexDomain`. Use
`VertexDomain.create` to construct this list.
'''
def __init__(self, domain, start, count):
# TODO make private
self.domain = domain
self.start = start
self.count = count
def get_size(self):
'''Get the number of vertices in the list.
:rtype: int
'''
return self.count
def get_domain(self):
'''Get the domain this vertex list belongs to.
:rtype: `VertexDomain`
'''
return self.domain
def draw(self, mode):
'''Draw this vertex list in the given OpenGL mode.
:Parameters:
`mode` : int
OpenGL drawing mode, e.g. ``GL_POINTS``, ``GL_LINES``, etc.
'''
self.domain.draw(mode, self)
def resize(self, count):
'''Resize this group.
:Parameters:
`count` : int
New number of vertices in the list.
'''
new_start = self.domain._safe_realloc(self.start, self.count, count)
if new_start != self.start:
# Copy contents to new location
for attribute in self.domain.attributes:
old = attribute.get_region(attribute.buffer,
self.start, self.count)
new = attribute.get_region(attribute.buffer,
new_start, self.count)
new.array[:] = old.array[:]
new.invalidate()
self.start = new_start
self.count = count
self._colors_cache_version = None
self._fog_coords_cache_version = None
self._edge_flags_cache_version = None
self._normals_cache_version = None
self._secondary_colors_cache_version = None
self._tex_coords_cache_version = None
self._vertices_cache_version = None
def delete(self):
'''Delete this group.'''
self.domain.allocator.dealloc(self.start, self.count)
def migrate(self, domain):
'''Move this group from its current domain and add to the specified
one. Attributes on domains must match. (In practice, used to change
parent state of some vertices).
:Parameters:
`domain` : `VertexDomain`
Domain to migrate this vertex list to.
'''
assert domain.attribute_names.keys() == \
self.domain.attribute_names.keys(), 'Domain attributes must match.'
new_start = domain._safe_alloc(self.count)
for key, old_attribute in self.domain.attribute_names.items():
old = old_attribute.get_region(old_attribute.buffer,
self.start, self.count)
new_attribute = domain.attribute_names[key]
new = new_attribute.get_region(new_attribute.buffer,
new_start, self.count)
new.array[:] = old.array[:]
new.invalidate()
self.domain.allocator.dealloc(self.start, self.count)
self.domain = domain
self.start = new_start
self._colors_cache_version = None
self._fog_coords_cache_version = None
self._edge_flags_cache_version = None
self._normals_cache_version = None
self._secondary_colors_cache_version = None
self._tex_coords_cache_version = None
self._vertices_cache_version = None
def _set_attribute_data(self, i, data):
attribute = self.domain.attributes[i]
# TODO without region
region = attribute.get_region(attribute.buffer, self.start, self.count)
region.array[:] = data
region.invalidate()
# ---
def _get_colors(self):
if (self._colors_cache_version != self.domain._version):
domain = self.domain
attribute = domain.attribute_names['colors']
self._colors_cache = attribute.get_region(
attribute.buffer, self.start, self.count)
self._colors_cache_version = domain._version
region = self._colors_cache
region.invalidate()
return region.array
def _set_colors(self, data):
self._get_colors()[:] = data
_colors_cache = None
_colors_cache_version = None
colors = property(_get_colors, _set_colors,
doc='''Array of color data.''')
# ---
def _get_fog_coords(self):
if (self._fog_coords_cache_version != self.domain._version):
domain = self.domain
attribute = domain.attribute_names['fog_coords']
self._fog_coords_cache = attribute.get_region(
attribute.buffer, self.start, self.count)
self._fog_coords_cache_version = domain._version
region = self._fog_coords_cache
region.invalidate()
return region.array
def _set_fog_coords(self, data):
self._get_fog_coords()[:] = data
_fog_coords_cache = None
_fog_coords_cache_version = None
fog_coords = property(_get_fog_coords, _set_fog_coords,
doc='''Array of fog coordinate data.''')
# ---
def _get_edge_flags(self):
if (self._edge_flags_cache_version != self.domain._version):
domain = self.domain
attribute = domain.attribute_names['edge_flags']
self._edge_flags_cache = attribute.get_region(
attribute.buffer, self.start, self.count)
self._edge_flags_cache_version = domain._version
region = self._edge_flags_cache
region.invalidate()
return region.array
def _set_edge_flags(self, data):
self._get_edge_flags()[:] = data
_edge_flags_cache = None
_edge_flags_cache_version = None
edge_flags = property(_get_edge_flags, _set_edge_flags,
doc='''Array of edge flag data.''')
# ---
def _get_normals(self):
if (self._normals_cache_version != self.domain._version):
domain = self.domain
attribute = domain.attribute_names['normals']
self._normals_cache = attribute.get_region(
attribute.buffer, self.start, self.count)
self._normals_cache_version = domain._version
region = self._normals_cache
region.invalidate()
return region.array
def _set_normals(self, data):
self._get_normals()[:] = data
_normals_cache = None
_normals_cache_version = None
normals = property(_get_normals, _set_normals,
doc='''Array of normal vector data.''')
# ---
def _get_secondary_colors(self):
if (self._secondary_colors_cache_version != self.domain._version):
domain = self.domain
attribute = domain.attribute_names['secondary_colors']
self._secondary_colors_cache = attribute.get_region(
attribute.buffer, self.start, self.count)
self._secondary_colors_cache_version = domain._version
region = self._secondary_colors_cache
region.invalidate()
return region.array
def _set_secondary_colors(self, data):
self._get_secondary_colors()[:] = data
_secondary_colors_cache = None
_secondary_colors_cache_version = None
secondary_colors = property(_get_secondary_colors, _set_secondary_colors,
doc='''Array of secondary color data.''')
# ---
_tex_coords_cache = None
_tex_coords_cache_version = None
def _get_tex_coords(self):
if (self._tex_coords_cache_version != self.domain._version):
domain = self.domain
attribute = domain.attribute_names['tex_coords']
self._tex_coords_cache = attribute.get_region(
attribute.buffer, self.start, self.count)
self._tex_coords_cache_version = domain._version
region = self._tex_coords_cache
region.invalidate()
return region.array
def _set_tex_coords(self, data):
self._get_tex_coords()[:] = data
tex_coords = property(_get_tex_coords, _set_tex_coords,
doc='''Array of texture coordinate data.''')
# ---
_vertices_cache = None
_vertices_cache_version = None
def _get_vertices(self):
if (self._vertices_cache_version != self.domain._version):
domain = self.domain
attribute = domain.attribute_names['vertices']
self._vertices_cache = attribute.get_region(
attribute.buffer, self.start, self.count)
self._vertices_cache_version = domain._version
region = self._vertices_cache
region.invalidate()
return region.array
def _set_vertices(self, data):
self._get_vertices()[:] = data
vertices = property(_get_vertices, _set_vertices,
doc='''Array of vertex coordinate data.''')
class IndexedVertexDomain(VertexDomain):
'''Management of a set of indexed vertex lists.
Construction of an indexed vertex domain is usually done with the
`create_indexed_domain` function.
'''
_initial_index_count = 16
def __init__(self, attribute_usages, index_gl_type=GL_UNSIGNED_INT):
super(IndexedVertexDomain, self).__init__(attribute_usages)
self.index_allocator = allocation.Allocator(self._initial_index_count)
self.index_gl_type = index_gl_type
self.index_c_type = vertexattribute._c_types[index_gl_type]
self.index_element_size = ctypes.sizeof(self.index_c_type)
self.index_buffer = vertexbuffer.create_mappable_buffer(
self.index_allocator.capacity * self.index_element_size,
target=GL_ELEMENT_ARRAY_BUFFER)
def _safe_index_alloc(self, count):
'''Allocate indices, resizing the buffers if necessary.'''
try:
return self.index_allocator.alloc(count)
except allocation.AllocatorMemoryException, e:
capacity = _nearest_pow2(e.requested_capacity)
self._version += 1
self.index_buffer.resize(capacity * self.index_element_size)
self.index_allocator.set_capacity(capacity)
return self.index_allocator.alloc(count)
def _safe_index_realloc(self, start, count, new_count):
'''Reallocate indices, resizing the buffers if necessary.'''
try:
return self.index_allocator.realloc(start, count, new_count)
except allocation.AllocatorMemoryException, e:
capacity = _nearest_pow2(e.requested_capacity)
self._version += 1
self.index_buffer.resize(capacity * self.index_element_size)
self.index_allocator.set_capacity(capacity)
return self.index_allocator.realloc(start, count, new_count)
def create(self, count, index_count):
'''Create an `IndexedVertexList` in this domain.
:Parameters:
`count` : int
Number of vertices to create
`index_count`
Number of indices to create
'''
start = self._safe_alloc(count)
index_start = self._safe_index_alloc(index_count)
return IndexedVertexList(self, start, count, index_start, index_count)
def get_index_region(self, start, count):
'''Get a region of the index buffer.
:Parameters:
`start` : int
Start of the region to map.
`count` : int
Number of indices to map.
:rtype: Array of int
'''
byte_start = self.index_element_size * start
byte_count = self.index_element_size * count
ptr_type = ctypes.POINTER(self.index_c_type * count)
return self.index_buffer.get_region(byte_start, byte_count, ptr_type)
def draw(self, mode, vertex_list=None):
'''Draw vertices in the domain.
If `vertex_list` is not specified, all vertices in the domain are
drawn. This is the most efficient way to render primitives.
If `vertex_list` specifies a `VertexList`, only primitives in that
list will be drawn.
:Parameters:
`mode` : int
OpenGL drawing mode, e.g. ``GL_POINTS``, ``GL_LINES``, etc.
`vertex_list` : `IndexedVertexList`
Vertex list to draw, or ``None`` for all lists in this domain.
'''
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT)
for buffer, attributes in self.buffer_attributes:
buffer.bind()
for attribute in attributes:
attribute.enable()
attribute.set_pointer(attribute.buffer.ptr)
self.index_buffer.bind()
if vertexbuffer._workaround_vbo_finish:
glFinish()
if vertex_list is not None:
glDrawElements(mode, vertex_list.index_count, self.index_gl_type,
self.index_buffer.ptr +
vertex_list.index_start * self.index_element_size)
else:
starts, sizes = self.index_allocator.get_allocated_regions()
primcount = len(starts)
if primcount == 0:
pass
elif primcount == 1:
# Common case
glDrawElements(mode, sizes[0], self.index_gl_type,
self.index_buffer.ptr + starts[0])
elif gl_info.have_version(1, 4):
starts = [s * self.index_element_size + self.index_buffer.ptr for s in starts]
starts = ctypes.cast((GLuint * primcount)(*starts), ctypes.POINTER(ctypes.c_void_p))
sizes = (GLsizei * primcount)(*sizes)
glMultiDrawElements(mode, sizes, GL_UNSIGNED_INT, starts,
primcount)
else:
for start, size in zip(starts, sizes):
glDrawElements(mode, size, self.index_gl_type,
self.index_buffer.ptr +
start * self.index_element_size)
self.index_buffer.unbind()
for buffer, _ in self.buffer_attributes:
buffer.unbind()
glPopClientAttrib()
class IndexedVertexList(VertexList):
'''A list of vertices within an `IndexedVertexDomain` that are indexed.
Use `IndexedVertexDomain.create` to construct this list.
'''
def __init__(self, domain, start, count, index_start, index_count):
super(IndexedVertexList, self).__init__(domain, start, count)
self.index_start = index_start
self.index_count = index_count
def draw(self, mode):
self.domain.draw(mode, self)
def resize(self, count, index_count):
'''Resize this group.
:Parameters:
`count` : int
New number of vertices in the list.
`index_count` : int
New number of indices in the list.
'''
old_start = self.start
super(IndexedVertexList, self).resize(count)
# Change indices (because vertices moved)
if old_start != self.start:
diff = self.start - old_start
self.indices[:] = map(lambda i: i + diff, self.indices)
# Resize indices
new_start = self.domain._safe_index_realloc(
self.index_start, self.index_count, index_count)
if new_start != self.index_start:
old = self.domain.get_index_region(
self.index_start, self.index_count)
new = self.domain.get_index_region(
self.index_start, self.index_count)
new.array[:] = old.array[:]
new.invalidate()
self.index_start = new_start
self.index_count = index_count
self._indices_cache_version = None
def delete(self):
'''Delete this group.'''
super(IndexedVertexList, self).delete()
self.domain.index_allocator.dealloc(self.index_start, self.index_count)
def _set_index_data(self, data):
# TODO without region
region = self.domain.get_index_region(
self.index_start, self.index_count)
region.array[:] = data
region.invalidate()
# ---
def _get_indices(self):
if self._indices_cache_version != self.domain._version:
domain = self.domain
self._indices_cache = domain.get_index_region(
self.index_start, self.index_count)
self._indices_cache_version = domain._version
region = self._indices_cache
region.invalidate()
return region.array
def _set_indices(self, data):
self._get_indices()[:] = data
_indices_cache = None
_indices_cache_version = None
indices = property(_get_indices, _set_indices,
doc='''Array of index data.''')
| bsd-3-clause |
robhudson/kuma | kuma/users/helpers.py | 12 | 4921 | from django.conf import settings
from django.contrib import admin
from jinja2 import escape, Markup, contextfunction
from jingo import register
from allauth.utils import get_request_param
from allauth.account.utils import user_display
from allauth.socialaccount import providers
from allauth.socialaccount.templatetags.socialaccount import get_providers
from honeypot.templatetags.honeypot import render_honeypot_field
from tower import ugettext as _
from kuma.core.urlresolvers import reverse
from kuma.core.helpers import datetimeformat
from .jobs import UserGravatarURLJob
@register.function
def gravatar_url(email, secure=True, size=220, rating='pg',
default=settings.DEFAULT_AVATAR):
job = UserGravatarURLJob()
return job.get(email, secure=secure, size=size,
rating=rating, default=default)
@register.function
@contextfunction
def ban_link(context, ban_user, banner_user):
"""Returns a link to ban a user"""
link = ''
if ban_user.id != banner_user.id and banner_user.has_perm('users.add_userban'):
active_ban = ban_user.active_ban
if active_ban:
url = reverse('admin:users_userban_change', args=(active_ban.id,))
title = _('Banned on {ban_date} by {ban_admin}.').format(ban_date=datetimeformat(context, active_ban.date, format='date', output='json'), ban_admin=active_ban.by)
link = '<a href="%s" class="button ban-link" title="%s">%s<i aria-hidden="true" class="icon-ban"></i></a>' % (url, title, _('Banned'))
else:
url = '%s?user=%s&by=%s' % (reverse('admin:users_userban_add'), ban_user.id, banner_user.id)
link = '<a href="%s" class="button negative ban-link">%s<i aria-hidden="true" class="icon-ban"></i></a>' % (url, _('Ban User'))
return Markup(link)
@register.function
def admin_link(user):
"""Returns a link to admin a user"""
url = reverse('admin:users_user_change', args=(user.id,),
current_app=admin.site.name)
link = ('<a href="%s" class="button neutral">%s'
'<i aria-hidden="true" class="icon-wrench"></i></a>' %
(url, _('Admin')))
return Markup(link)
@register.filter
def public_email(email):
"""Email address -> publicly displayable email."""
return Markup('<span class="email">%s</span>' % unicode_to_html(email))
def unicode_to_html(text):
"""Turns all unicode into html entities, e.g. E -> E."""
return ''.join([u'&#%s;' % ord(i) for i in text])
@register.function
def user_list(users):
"""Turn a list of users into a list of links to their profiles."""
link = u'<a href="%s">%s</a>'
list = u', '.join([link % (escape(u.get_absolute_url()), escape(u.username)) for
u in users])
return Markup(list)
# Returns a string representation of a user
register.function(user_display)
# Returns a list of social authentication providers.
register.function(get_providers)
@register.function
@contextfunction
def provider_login_url(context, provider_id, **params):
"""
{{ provider_login_url("github", next="/some/url") }}
{{ provider_login_url("persona", next="/some/other/url") }}
"""
request = context['request']
provider = providers.registry.by_id(provider_id)
auth_params = params.get('auth_params', None)
scope = params.get('scope', None)
process = params.get('process', None)
if scope is '':
del params['scope']
if auth_params is '':
del params['auth_params']
if 'next' not in params:
next = get_request_param(request, 'next')
if next:
params['next'] = next
elif process == 'redirect':
params['next'] = request.get_full_path()
else:
if not params['next']:
del params['next']
# get the login url and append params as url parameters
return Markup(provider.get_login_url(request, **params))
@register.function
@contextfunction
def providers_media_js(context):
"""
{{ providers_media_js() }}
"""
request = context['request']
return Markup(u'\n'.join([p.media_js(request)
for p in providers.registry.get_list()]))
@register.function
def social_accounts(user):
"""
{% set accounts = social_accounts(user) %}
Then:
{{ accounts.twitter }} -- a list of connected Twitter accounts
{{ accounts.twitter.0 }} -- the first Twitter account
{% if accounts %} -- if there is at least one social account
"""
accounts = {}
if not user.is_authenticated():
return accounts
for account in user.socialaccount_set.all().iterator():
providers = accounts.setdefault(account.provider, [])
providers.append(account)
return accounts
@register.inclusion_tag('honeypot/honeypot_field.html')
def honeypot_field(field_name=None):
return render_honeypot_field(field_name)
| mpl-2.0 |
ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/django-1.2/django/contrib/localflavor/jp/jp_prefectures.py | 543 | 2089 | from django.utils.translation import ugettext_lazy
JP_PREFECTURES = (
('hokkaido', ugettext_lazy('Hokkaido'),),
('aomori', ugettext_lazy('Aomori'),),
('iwate', ugettext_lazy('Iwate'),),
('miyagi', ugettext_lazy('Miyagi'),),
('akita', ugettext_lazy('Akita'),),
('yamagata', ugettext_lazy('Yamagata'),),
('fukushima', ugettext_lazy('Fukushima'),),
('ibaraki', ugettext_lazy('Ibaraki'),),
('tochigi', ugettext_lazy('Tochigi'),),
('gunma', ugettext_lazy('Gunma'),),
('saitama', ugettext_lazy('Saitama'),),
('chiba', ugettext_lazy('Chiba'),),
('tokyo', ugettext_lazy('Tokyo'),),
('kanagawa', ugettext_lazy('Kanagawa'),),
('yamanashi', ugettext_lazy('Yamanashi'),),
('nagano', ugettext_lazy('Nagano'),),
('niigata', ugettext_lazy('Niigata'),),
('toyama', ugettext_lazy('Toyama'),),
('ishikawa', ugettext_lazy('Ishikawa'),),
('fukui', ugettext_lazy('Fukui'),),
('gifu', ugettext_lazy('Gifu'),),
('shizuoka', ugettext_lazy('Shizuoka'),),
('aichi', ugettext_lazy('Aichi'),),
('mie', ugettext_lazy('Mie'),),
('shiga', ugettext_lazy('Shiga'),),
('kyoto', ugettext_lazy('Kyoto'),),
('osaka', ugettext_lazy('Osaka'),),
('hyogo', ugettext_lazy('Hyogo'),),
('nara', ugettext_lazy('Nara'),),
('wakayama', ugettext_lazy('Wakayama'),),
('tottori', ugettext_lazy('Tottori'),),
('shimane', ugettext_lazy('Shimane'),),
('okayama', ugettext_lazy('Okayama'),),
('hiroshima', ugettext_lazy('Hiroshima'),),
('yamaguchi', ugettext_lazy('Yamaguchi'),),
('tokushima', ugettext_lazy('Tokushima'),),
('kagawa', ugettext_lazy('Kagawa'),),
('ehime', ugettext_lazy('Ehime'),),
('kochi', ugettext_lazy('Kochi'),),
('fukuoka', ugettext_lazy('Fukuoka'),),
('saga', ugettext_lazy('Saga'),),
('nagasaki', ugettext_lazy('Nagasaki'),),
('kumamoto', ugettext_lazy('Kumamoto'),),
('oita', ugettext_lazy('Oita'),),
('miyazaki', ugettext_lazy('Miyazaki'),),
('kagoshima', ugettext_lazy('Kagoshima'),),
('okinawa', ugettext_lazy('Okinawa'),),
)
| bsd-3-clause |
abhishekarora12/ansible | lib/ansible/template/safe_eval.py | 14 | 4160 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import sys
from six.moves import builtins
from ansible import constants as C
from ansible.plugins import filter_loader, test_loader
def safe_eval(expr, locals={}, include_exceptions=False):
'''
This is intended for allowing things like:
with_items: a_list_variable
Where Jinja2 would return a string but we do not want to allow it to
call functions (outside of Jinja2, where the env is constrained). If
the input data to this function came from an untrusted (remote) source,
it should first be run through _clean_data_struct() to ensure the data
is further sanitized prior to evaluation.
Based on:
http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe
'''
# this is the whitelist of AST nodes we are going to
# allow in the evaluation. Any node type other than
# those listed here will raise an exception in our custom
# visitor class defined below.
SAFE_NODES = set(
(
ast.Add,
ast.BinOp,
ast.Call,
ast.Compare,
ast.Dict,
ast.Div,
ast.Expression,
ast.List,
ast.Load,
ast.Mult,
ast.Num,
ast.Name,
ast.Str,
ast.Sub,
ast.Tuple,
ast.UnaryOp,
)
)
# AST node types were expanded after 2.6
if not sys.version.startswith('2.6'):
SAFE_NODES.union(
set(
(ast.Set,)
)
)
filter_list = []
for filter in filter_loader.all():
filter_list.extend(filter.filters().keys())
test_list = []
for test in test_loader.all():
test_list.extend(test.tests().keys())
CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list + test_list
class CleansingNodeVisitor(ast.NodeVisitor):
def generic_visit(self, node, inside_call=False):
if type(node) not in SAFE_NODES:
raise Exception("invalid expression (%s)" % expr)
elif isinstance(node, ast.Call):
inside_call = True
elif isinstance(node, ast.Name) and inside_call:
if hasattr(builtins, node.id) and node.id not in CALL_WHITELIST:
raise Exception("invalid function: %s" % node.id)
# iterate over all child nodes
for child_node in ast.iter_child_nodes(node):
self.generic_visit(child_node, inside_call)
if not isinstance(expr, basestring):
# already templated to a datastructure, perhaps?
if include_exceptions:
return (expr, None)
return expr
cnv = CleansingNodeVisitor()
try:
parsed_tree = ast.parse(expr, mode='eval')
cnv.visit(parsed_tree)
compiled = compile(parsed_tree, expr, 'eval')
result = eval(compiled, {}, dict(locals))
if include_exceptions:
return (result, None)
else:
return result
except SyntaxError as e:
# special handling for syntax errors, we just return
# the expression string back as-is
if include_exceptions:
return (expr, None)
return expr
except Exception as e:
if include_exceptions:
return (expr, e)
return expr
| gpl-3.0 |
nerginer/GrovePi | Projects/tilt_buzzer/tilt_buzzer.py | 6 | 2405 | # tilt_buzzer.py
#
# This is an project using the Grove Switch, Buzzer and accelerometer from the GrovePi starter kit
#
# In this project, the buzzer starts making a sound when the accelerometer is held perpendicular and the Switch is on
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
'''
import time
from grovepi import *
import math
buzzer_pin = 2 #Port for buzzer
switch_pin = 4 #Port for switch
pinMode(buzzer_pin,"OUTPUT") # Assign mode for buzzer as output
pinMode(switch_pin,"INPUT") # Assign mode for switch as input
while True:
try:
switch_status= digitalRead(switch_pin) #Read the switch status
if switch_status: #If the switch is in HIGH position, run the program
accl = acc_xyz() # Get the value from the accelerometer
print "\nX:",accl[0],"\tY:",accl[1],"\tZ:",accl[2],
if accl[0] > 16: # If the value on X-axis is greater than the Threshold, start the buzzer
digitalWrite(buzzer_pin,1)
print "\tBuzzing",
else: #Else stop the buzzer
digitalWrite(buzzer_pin,0)
else: #If switch is in Off position, print "Off" on the screen
print "Off"
time.sleep(.1)
except KeyboardInterrupt: # Stop the buzzer before stopping
digitalWrite(buzzer_pin,0)
break
except (IOError,TypeError) as e:
print "Error" | mit |
firebitsbr/raspberry_pwn | src/pentest/sqlmap/tamper/randomcase.py | 7 | 1413 | #!/usr/bin/env python
"""
Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import re
from lib.core.common import randomRange
from lib.core.data import kb
from lib.core.enums import PRIORITY
__priority__ = PRIORITY.NORMAL
def dependencies():
pass
def tamper(payload, **kwargs):
"""
Replaces each keyword character with random case value
Tested against:
* Microsoft SQL Server 2005
* MySQL 4, 5.0 and 5.5
* Oracle 10g
* PostgreSQL 8.3, 8.4, 9.0
Notes:
* Useful to bypass very weak and bespoke web application firewalls
that has poorly written permissive regular expressions
* This tamper script should work against all (?) databases
>>> import random
>>> random.seed(0)
>>> tamper('INSERT')
'INseRt'
"""
retVal = payload
if payload:
for match in re.finditer(r"[A-Za-z_]+", retVal):
word = match.group()
if word.upper() in kb.keywords:
while True:
_ = ""
for i in xrange(len(word)):
_ += word[i].upper() if randomRange(0, 1) else word[i].lower()
if len(_) > 1 and _ not in (_.lower(), _.upper()):
break
retVal = retVal.replace(word, _)
return retVal
| gpl-3.0 |
maximusmai/sqlalchemy-migrate | migrate/versioning/template.py | 62 | 2874 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import shutil
import sys
from pkg_resources import resource_filename
from migrate.versioning.config import *
from migrate.versioning import pathed
class Collection(pathed.Pathed):
"""A collection of templates of a specific type"""
_mask = None
def get_path(self, file):
return os.path.join(self.path, str(file))
class RepositoryCollection(Collection):
_mask = '%s'
class ScriptCollection(Collection):
_mask = '%s.py_tmpl'
class ManageCollection(Collection):
_mask = '%s.py_tmpl'
class SQLScriptCollection(Collection):
_mask = '%s.py_tmpl'
class Template(pathed.Pathed):
"""Finds the paths/packages of various Migrate templates.
:param path: Templates are loaded from migrate package
if `path` is not provided.
"""
pkg = 'migrate.versioning.templates'
def __new__(cls, path=None):
if path is None:
path = cls._find_path(cls.pkg)
return super(Template, cls).__new__(cls, path)
def __init__(self, path=None):
if path is None:
path = Template._find_path(self.pkg)
super(Template, self).__init__(path)
self.repository = RepositoryCollection(os.path.join(path, 'repository'))
self.script = ScriptCollection(os.path.join(path, 'script'))
self.manage = ManageCollection(os.path.join(path, 'manage'))
self.sql_script = SQLScriptCollection(os.path.join(path, 'sql_script'))
@classmethod
def _find_path(cls, pkg):
"""Returns absolute path to dotted python package."""
tmp_pkg = pkg.rsplit('.', 1)
if len(tmp_pkg) != 1:
return resource_filename(tmp_pkg[0], tmp_pkg[1])
else:
return resource_filename(tmp_pkg[0], '')
def _get_item(self, collection, theme=None):
"""Locates and returns collection.
:param collection: name of collection to locate
:param type_: type of subfolder in collection (defaults to "_default")
:returns: (package, source)
:rtype: str, str
"""
item = getattr(self, collection)
theme_mask = getattr(item, '_mask')
theme = theme_mask % (theme or 'default')
return item.get_path(theme)
def get_repository(self, *a, **kw):
"""Calls self._get_item('repository', *a, **kw)"""
return self._get_item('repository', *a, **kw)
def get_script(self, *a, **kw):
"""Calls self._get_item('script', *a, **kw)"""
return self._get_item('script', *a, **kw)
def get_sql_script(self, *a, **kw):
"""Calls self._get_item('sql_script', *a, **kw)"""
return self._get_item('sql_script', *a, **kw)
def get_manage(self, *a, **kw):
"""Calls self._get_item('manage', *a, **kw)"""
return self._get_item('manage', *a, **kw)
| mit |
eayunstack/python-cinderclient | cinderclient/v1/qos_specs.py | 16 | 4799 | # Copyright (c) 2013 eBay Inc.
# Copyright (c) OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
QoS Specs interface.
"""
from cinderclient import base
class QoSSpecs(base.Resource):
"""QoS specs entity represents quality-of-service parameters/requirements.
A QoS specs is a set of parameters or requirements for quality-of-service
purpose, which can be associated with volume types (for now). In future,
QoS specs may be extended to be associated other entities, such as single
volume.
"""
def __repr__(self):
return "<QoSSpecs: %s>" % self.name
def delete(self):
return self.manager.delete(self)
class QoSSpecsManager(base.ManagerWithFind):
"""
Manage :class:`QoSSpecs` resources.
"""
resource_class = QoSSpecs
def list(self):
"""Get a list of all qos specs.
:rtype: list of :class:`QoSSpecs`.
"""
return self._list("/qos-specs", "qos_specs")
def get(self, qos_specs):
"""Get a specific qos specs.
:param qos_specs: The ID of the :class:`QoSSpecs` to get.
:rtype: :class:`QoSSpecs`
"""
return self._get("/qos-specs/%s" % base.getid(qos_specs), "qos_specs")
def delete(self, qos_specs, force=False):
"""Delete a specific qos specs.
:param qos_specs: The ID of the :class:`QoSSpecs` to be removed.
:param force: Flag that indicates whether to delete target qos specs
if it was in-use.
"""
self._delete("/qos-specs/%s?force=%s" %
(base.getid(qos_specs), force))
def create(self, name, specs):
"""Create a qos specs.
:param name: Descriptive name of the qos specs, must be unique
:param specs: A dict of key/value pairs to be set
:rtype: :class:`QoSSpecs`
"""
body = {
"qos_specs": {
"name": name,
}
}
body["qos_specs"].update(specs)
return self._create("/qos-specs", body, "qos_specs")
def set_keys(self, qos_specs, specs):
"""Update a qos specs with new specifications.
:param qos_specs: The ID of qos specs
:param specs: A dict of key/value pairs to be set
:rtype: :class:`QoSSpecs`
"""
body = {
"qos_specs": {}
}
body["qos_specs"].update(specs)
return self._update("/qos-specs/%s" % qos_specs, body)
def unset_keys(self, qos_specs, specs):
"""Update a qos specs with new specifications.
:param qos_specs: The ID of qos specs
:param specs: A list of key to be unset
:rtype: :class:`QoSSpecs`
"""
body = {'keys': specs}
return self._update("/qos-specs/%s/delete_keys" % qos_specs,
body)
def get_associations(self, qos_specs):
"""Get associated entities of a qos specs.
:param qos_specs: The id of the :class: `QoSSpecs`
:return: a list of entities that associated with specific qos specs.
"""
return self._list("/qos-specs/%s/associations" % base.getid(qos_specs),
"qos_associations")
def associate(self, qos_specs, vol_type_id):
"""Associate a volume type with specific qos specs.
:param qos_specs: The qos specs to be associated with
:param vol_type_id: The volume type id to be associated with
"""
self.api.client.get("/qos-specs/%s/associate?vol_type_id=%s" %
(base.getid(qos_specs), vol_type_id))
def disassociate(self, qos_specs, vol_type_id):
"""Disassociate qos specs from volume type.
:param qos_specs: The qos specs to be associated with
:param vol_type_id: The volume type id to be associated with
"""
self.api.client.get("/qos-specs/%s/disassociate?vol_type_id=%s" %
(base.getid(qos_specs), vol_type_id))
def disassociate_all(self, qos_specs):
"""Disassociate all entities from specific qos specs.
:param qos_specs: The qos specs to be associated with
"""
self.api.client.get("/qos-specs/%s/disassociate_all" %
base.getid(qos_specs))
| apache-2.0 |
40123248/w17b_test | static/Brython3.1.0-20150301-090019/Lib/multiprocessing/dummy/__init__.py | 693 | 4380 | #
# Support for the API of the multiprocessing package using threads
#
# multiprocessing/dummy/__init__.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__all__ = [
'Process', 'current_process', 'active_children', 'freeze_support',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'
]
#
# Imports
#
import threading
import sys
import weakref
#brython fix me
#import array
from multiprocessing.dummy.connection import Pipe
from threading import Lock, RLock, Semaphore, BoundedSemaphore
from threading import Event, Condition, Barrier
from queue import Queue
#
#
#
class DummyProcess(threading.Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
threading.Thread.__init__(self, group, target, name, args, kwargs)
self._pid = None
self._children = weakref.WeakKeyDictionary()
self._start_called = False
self._parent = current_process()
def start(self):
assert self._parent is current_process()
self._start_called = True
if hasattr(self._parent, '_children'):
self._parent._children[self] = None
threading.Thread.start(self)
@property
def exitcode(self):
if self._start_called and not self.is_alive():
return 0
else:
return None
#
#
#
Process = DummyProcess
current_process = threading.current_thread
current_process()._children = weakref.WeakKeyDictionary()
def active_children():
children = current_process()._children
for p in list(children):
if not p.is_alive():
children.pop(p, None)
return list(children)
def freeze_support():
pass
#
#
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = list(self.__dict__.items())
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return 'Namespace(%s)' % str.join(', ', temp)
dict = dict
list = list
#brython fix me
#def Array(typecode, sequence, lock=True):
# return array.array(typecode, sequence)
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def _get(self):
return self._value
def _set(self, value):
self._value = value
value = property(_get, _set)
def __repr__(self):
return '<%r(%r, %r)>'%(type(self).__name__,self._typecode,self._value)
def Manager():
return sys.modules[__name__]
def shutdown():
pass
def Pool(processes=None, initializer=None, initargs=()):
from multiprocessing.pool import ThreadPool
return ThreadPool(processes, initializer, initargs)
JoinableQueue = Queue
| gpl-3.0 |
iuliat/nova | nova/objects/dns_domain.py | 29 | 2606 | # Copyright (C) 2014, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import db
from nova import objects
from nova.objects import base
from nova.objects import fields
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class DNSDomain(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'domain': fields.StringField(),
'scope': fields.StringField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'project_id': fields.StringField(nullable=True),
}
@staticmethod
def _from_db_object(context, vif, db_vif):
for field in vif.fields:
vif[field] = db_vif[field]
vif._context = context
vif.obj_reset_changes()
return vif
@base.remotable_classmethod
def get_by_domain(cls, context, domain):
db_dnsd = db.dnsdomain_get(context, domain)
if db_dnsd:
return cls._from_db_object(context, cls(), db_dnsd)
@base.remotable_classmethod
def register_for_zone(cls, context, domain, zone):
db.dnsdomain_register_for_zone(context, domain, zone)
@base.remotable_classmethod
def register_for_project(cls, context, domain, project):
db.dnsdomain_register_for_project(context, domain, project)
@base.remotable_classmethod
def delete_by_domain(cls, context, domain):
db.dnsdomain_unregister(context, domain)
@base.NovaObjectRegistry.register
class DNSDomainList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'objects': fields.ListOfObjectsField('DNSDomain'),
}
obj_relationships = {
'objects': [('1.0', '1.0')],
}
@base.remotable_classmethod
def get_all(cls, context):
db_domains = db.dnsdomain_get_all(context)
return base.obj_make_list(context, cls(context), objects.DNSDomain,
db_domains)
| apache-2.0 |
myarjunar/inasafe | safe/gui/tools/wizard/step_kw25_classification.py | 3 | 7047 | # coding=utf-8
"""InaSAFE Wizard Step Classifications."""
# noinspection PyPackageRequirements
from PyQt4 import QtCore
from PyQt4.QtGui import QListWidgetItem
from safe.utilities.i18n import tr
from safe import messaging as m
from safe.common.exceptions import InvalidWizardStep
from safe.definitions.layer_purposes import (
layer_purpose_hazard, layer_purpose_exposure)
from safe.definitions.layer_modes import (
layer_mode_classified, layer_mode_continuous)
from safe.gui.tools.wizard.wizard_step import WizardStep
from safe.gui.tools.wizard.wizard_step import get_wizard_step_ui_class
from safe.gui.tools.wizard.wizard_strings import classification_question
from safe.definitions.utilities import definition, get_classifications
__copyright__ = "Copyright 2016, The InaSAFE Project"
__license__ = "GPL version 3"
__email__ = "info@inasafe.org"
__revision__ = '$Format:%H$'
FORM_CLASS = get_wizard_step_ui_class(__file__)
class StepKwClassification(WizardStep, FORM_CLASS):
"""InaSAFE Wizard Step Classifications."""
def is_ready_to_next_step(self):
"""Check if the step is complete. If so, there is
no reason to block the Next button.
:returns: True if new step may be enabled.
:rtype: bool
"""
return bool(self.selected_classification())
def get_next_step(self):
"""Find the proper step when user clicks the Next button.
:returns: The step to be switched to
:rtype: WizardStep instance or None
"""
layer_mode = self.parent.step_kw_layermode.selected_layermode()
if layer_mode == layer_mode_classified:
new_step = self.parent.step_kw_classify
elif layer_mode == layer_mode_continuous:
new_step = self.parent.step_kw_threshold
else:
message = tr('Layer mode should be continuous or classified')
raise InvalidWizardStep(message)
return new_step
def classifications_for_layer(self):
"""Return a list of valid classifications for a layer.
:returns: A list where each value represents a valid classification.
:rtype: list
"""
subcategory_key = self.parent.step_kw_subcategory.\
selected_subcategory()['key']
layer_purpose = self.parent.step_kw_purpose.selected_purpose()
if layer_purpose in [layer_purpose_hazard, layer_purpose_exposure]:
classifications = []
selected_unit = self.parent.step_kw_unit.selected_unit()
for classification in get_classifications(subcategory_key):
if selected_unit is None:
# we are using classified data, so let's allow all
# classifications
classifications.append(classification)
elif 'multiple_units' not in classification:
# this classification is not multiple unit aware, so let's
# allow it
classifications.append(classification)
elif selected_unit in classification['multiple_units']:
# we are using continuous data, and this classification
# supports the chosen unit so we allow it
classifications.append(classification)
return classifications
else:
# There are no classifications for non exposure and hazard
# defined yet
return []
def on_lstClassifications_itemSelectionChanged(self):
"""Update classification description label and unlock the Next button.
.. note:: This is an automatic Qt slot
executed when the field selection changes.
"""
self.clear_further_steps()
classification = self.selected_classification()
# Exit if no selection
if not classification:
return
# Set description label
self.lblDescribeClassification.setText(classification["description"])
# Enable the next button
self.parent.pbnNext.setEnabled(True)
def selected_classification(self):
"""Obtain the classification selected by user.
:returns: Metadata of the selected classification.
:rtype: dict, None
"""
item = self.lstClassifications.currentItem()
try:
return definition(item.data(QtCore.Qt.UserRole))
except (AttributeError, NameError):
return None
def clear_further_steps(self):
"""Clear all further steps in order to properly calculate the prev step
"""
self.parent.step_kw_classify.treeClasses.clear()
def set_widgets(self):
"""Set widgets on the Classification tab."""
self.clear_further_steps()
purpose = self.parent.step_kw_purpose.selected_purpose()['name']
subcategory = self.parent.step_kw_subcategory.\
selected_subcategory()['name']
self.lstClassifications.clear()
self.lblDescribeClassification.setText('')
self.lblSelectClassification.setText(
classification_question % (subcategory, purpose))
classifications = self.classifications_for_layer()
for classification in classifications:
if not isinstance(classification, dict):
classification = definition(classification)
item = QListWidgetItem(
classification['name'],
self.lstClassifications)
item.setData(QtCore.Qt.UserRole, classification['key'])
self.lstClassifications.addItem(item)
# Set values based on existing keywords (if already assigned)
classification_keyword = self.parent.get_existing_keyword(
'classification')
if classification_keyword:
classifications = []
for index in xrange(self.lstClassifications.count()):
item = self.lstClassifications.item(index)
classifications.append(item.data(QtCore.Qt.UserRole))
if classification_keyword in classifications:
self.lstClassifications.setCurrentRow(
classifications.index(classification_keyword))
self.auto_select_one_item(self.lstClassifications)
@property
def step_name(self):
"""Get the human friendly name for the wizard step.
:returns: The name of the wizard step.
:rtype: str
"""
return tr('Classification Step')
def help_content(self):
"""Return the content of help for this step wizard.
We only needs to re-implement this method in each wizard step.
:returns: A message object contains help.
:rtype: m.Message
"""
message = m.Message()
message.add(m.Paragraph(tr(
'In this wizard step: {step_name}, you will be able to set the '
'classification of the layer that is being assigned in this '
'wizard.'
).format(step_name=self.step_name)))
return message
| gpl-3.0 |
takeshineshiro/cinder | cinder/api/contrib/backups.py | 7 | 14949 | # Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2014 TrilioData, Inc
# Copyright (c) 2015 EMC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The backups api."""
from oslo_log import log as logging
import webob
from webob import exc
from cinder.api import common
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.views import backups as backup_views
from cinder.api import xmlutil
from cinder import backup as backupAPI
from cinder import exception
from cinder.i18n import _, _LI
from cinder import utils
LOG = logging.getLogger(__name__)
def make_backup(elem):
elem.set('id')
elem.set('status')
elem.set('size')
elem.set('container')
elem.set('parent_id')
elem.set('volume_id')
elem.set('object_count')
elem.set('availability_zone')
elem.set('created_at')
elem.set('name')
elem.set('description')
elem.set('fail_reason')
def make_backup_restore(elem):
elem.set('backup_id')
elem.set('volume_id')
elem.set('volume_name')
def make_backup_export_import_record(elem):
elem.set('backup_service')
elem.set('backup_url')
class BackupTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('backup', selector='backup')
make_backup(root)
alias = Backups.alias
namespace = Backups.namespace
return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace})
class BackupsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('backups')
elem = xmlutil.SubTemplateElement(root, 'backup', selector='backups')
make_backup(elem)
alias = Backups.alias
namespace = Backups.namespace
return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace})
class BackupRestoreTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('restore', selector='restore')
make_backup_restore(root)
alias = Backups.alias
namespace = Backups.namespace
return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace})
class BackupExportImportTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('backup-record',
selector='backup-record')
make_backup_export_import_record(root)
alias = Backups.alias
namespace = Backups.namespace
return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace})
class CreateDeserializer(wsgi.MetadataXMLDeserializer):
def default(self, string):
dom = utils.safe_minidom_parse_string(string)
backup = self._extract_backup(dom)
return {'body': {'backup': backup}}
def _extract_backup(self, node):
backup = {}
backup_node = self.find_first_child_named(node, 'backup')
attributes = ['container', 'display_name',
'display_description', 'volume_id',
'parent_id']
for attr in attributes:
if backup_node.getAttribute(attr):
backup[attr] = backup_node.getAttribute(attr)
return backup
class RestoreDeserializer(wsgi.MetadataXMLDeserializer):
def default(self, string):
dom = utils.safe_minidom_parse_string(string)
restore = self._extract_restore(dom)
return {'body': {'restore': restore}}
def _extract_restore(self, node):
restore = {}
restore_node = self.find_first_child_named(node, 'restore')
if restore_node.getAttribute('volume_id'):
restore['volume_id'] = restore_node.getAttribute('volume_id')
return restore
class BackupImportDeserializer(wsgi.MetadataXMLDeserializer):
def default(self, string):
dom = utils.safe_minidom_parse_string(string)
backup = self._extract_backup(dom)
retval = {'body': {'backup-record': backup}}
return retval
def _extract_backup(self, node):
backup = {}
backup_node = self.find_first_child_named(node, 'backup-record')
attributes = ['backup_service', 'backup_url']
for attr in attributes:
if backup_node.getAttribute(attr):
backup[attr] = backup_node.getAttribute(attr)
return backup
class BackupsController(wsgi.Controller):
"""The Backups API controller for the OpenStack API."""
_view_builder_class = backup_views.ViewBuilder
def __init__(self):
self.backup_api = backupAPI.API()
super(BackupsController, self).__init__()
@wsgi.serializers(xml=BackupTemplate)
def show(self, req, id):
"""Return data about the given backup."""
LOG.debug('show called for member %s', id)
context = req.environ['cinder.context']
try:
backup = self.backup_api.get(context, backup_id=id)
req.cache_db_backup(backup)
except exception.BackupNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
return self._view_builder.detail(req, backup)
def delete(self, req, id):
"""Delete a backup."""
LOG.debug('Delete called for member %s.', id)
context = req.environ['cinder.context']
LOG.info(_LI('Delete backup with id: %s'), id, context=context)
try:
backup = self.backup_api.get(context, id)
self.backup_api.delete(context, backup)
except exception.BackupNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
except exception.InvalidBackup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
return webob.Response(status_int=202)
@wsgi.serializers(xml=BackupsTemplate)
def index(self, req):
"""Returns a summary list of backups."""
return self._get_backups(req, is_detail=False)
@wsgi.serializers(xml=BackupsTemplate)
def detail(self, req):
"""Returns a detailed list of backups."""
return self._get_backups(req, is_detail=True)
@staticmethod
def _get_backup_filter_options():
"""Return volume search options allowed by non-admin."""
return ('name', 'status', 'volume_id')
def _get_backups(self, req, is_detail):
"""Returns a list of backups, transformed through view builder."""
context = req.environ['cinder.context']
filters = req.params.copy()
marker, limit, offset = common.get_pagination_params(filters)
sort_keys, sort_dirs = common.get_sort_params(filters)
utils.remove_invalid_filter_options(context,
filters,
self._get_backup_filter_options())
if 'name' in filters:
filters['display_name'] = filters['name']
del filters['name']
backups = self.backup_api.get_all(context, search_opts=filters,
marker=marker,
limit=limit,
offset=offset,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
)
req.cache_db_backups(backups.objects)
if is_detail:
backups = self._view_builder.detail_list(req, backups.objects)
else:
backups = self._view_builder.summary_list(req, backups.objects)
return backups
# TODO(frankm): Add some checks here including
# - whether requested volume_id exists so we can return some errors
# immediately
# - maybe also do validation of swift container name
@wsgi.response(202)
@wsgi.serializers(xml=BackupTemplate)
@wsgi.deserializers(xml=CreateDeserializer)
def create(self, req, body):
"""Create a new backup."""
LOG.debug('Creating new backup %s', body)
self.assert_valid_body(body, 'backup')
context = req.environ['cinder.context']
backup = body['backup']
try:
volume_id = backup['volume_id']
except KeyError:
msg = _("Incorrect request body format")
raise exc.HTTPBadRequest(explanation=msg)
container = backup.get('container', None)
self.validate_name_and_description(backup)
name = backup.get('name', None)
description = backup.get('description', None)
incremental = backup.get('incremental', False)
force = backup.get('force', False)
LOG.info(_LI("Creating backup of volume %(volume_id)s in container"
" %(container)s"),
{'volume_id': volume_id, 'container': container},
context=context)
try:
new_backup = self.backup_api.create(context, name, description,
volume_id, container,
incremental, None, force)
except exception.InvalidVolume as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.VolumeNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
except exception.ServiceNotFound as error:
raise exc.HTTPInternalServerError(explanation=error.msg)
retval = self._view_builder.summary(req, dict(new_backup))
return retval
@wsgi.response(202)
@wsgi.serializers(xml=BackupRestoreTemplate)
@wsgi.deserializers(xml=RestoreDeserializer)
def restore(self, req, id, body):
"""Restore an existing backup to a volume."""
LOG.debug('Restoring backup %(backup_id)s (%(body)s)',
{'backup_id': id, 'body': body})
self.assert_valid_body(body, 'restore')
context = req.environ['cinder.context']
restore = body['restore']
volume_id = restore.get('volume_id', None)
name = restore.get('name', None)
LOG.info(_LI("Restoring backup %(backup_id)s to volume %(volume_id)s"),
{'backup_id': id, 'volume_id': volume_id},
context=context)
try:
new_restore = self.backup_api.restore(context,
backup_id=id,
volume_id=volume_id,
name=name)
except exception.InvalidInput as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.InvalidVolume as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.InvalidBackup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.BackupNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
except exception.VolumeNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
except exception.VolumeSizeExceedsAvailableQuota as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.msg, headers={'Retry-After': 0})
except exception.VolumeLimitExceeded as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.msg, headers={'Retry-After': 0})
retval = self._view_builder.restore_summary(
req, dict(new_restore))
return retval
@wsgi.response(200)
@wsgi.serializers(xml=BackupExportImportTemplate)
def export_record(self, req, id):
"""Export a backup."""
LOG.debug('export record called for member %s.', id)
context = req.environ['cinder.context']
try:
backup_info = self.backup_api.export_record(context, id)
except exception.BackupNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
except exception.InvalidBackup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
retval = self._view_builder.export_summary(
req, dict(backup_info))
LOG.debug('export record output: %s.', retval)
return retval
@wsgi.response(201)
@wsgi.serializers(xml=BackupTemplate)
@wsgi.deserializers(xml=BackupImportDeserializer)
def import_record(self, req, body):
"""Import a backup."""
LOG.debug('Importing record from %s.', body)
self.assert_valid_body(body, 'backup-record')
context = req.environ['cinder.context']
import_data = body['backup-record']
# Verify that body elements are provided
try:
backup_service = import_data['backup_service']
backup_url = import_data['backup_url']
except KeyError:
msg = _("Incorrect request body format.")
raise exc.HTTPBadRequest(explanation=msg)
LOG.debug('Importing backup using %(service)s and url %(url)s.',
{'service': backup_service, 'url': backup_url})
try:
new_backup = self.backup_api.import_record(context,
backup_service,
backup_url)
except exception.BackupNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
except exception.InvalidBackup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.ServiceNotFound as error:
raise exc.HTTPInternalServerError(explanation=error.msg)
retval = self._view_builder.summary(req, dict(new_backup))
LOG.debug('import record output: %s.', retval)
return retval
class Backups(extensions.ExtensionDescriptor):
"""Backups support."""
name = 'Backups'
alias = 'backups'
namespace = 'http://docs.openstack.org/volume/ext/backups/api/v1'
updated = '2012-12-12T00:00:00+00:00'
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
Backups.alias, BackupsController(),
collection_actions={'detail': 'GET', 'import_record': 'POST'},
member_actions={'restore': 'POST', 'export_record': 'GET',
'action': 'POST'})
resources.append(res)
return resources
| apache-2.0 |
AZCompTox/AZOrange | orange/OrangeWidgets/Data/OWDataDomain.py | 2 | 19433 | """
<name>Select Attributes</name>
<description>Manual selection of attributes.<br>Aside from a data set with the selected attributes, the widget also outputs an "Attribute List" which can be used with another "Select Attribute" widget to make the same variable selection</description>
<icon>icons/SelectAttributes.png</icon>
<priority>1100</priority>
<contact>Peter Juvan (peter.juvan@fri.uni-lj.si) Changed by Pedro Almeida</contact>
"""
from AZutilities import dataUtilities
from OWTools import *
from OWWidget import *
import OWGUI,OWGUIEx
class OWDataDomain(OWWidget):
contextHandlers = {"": DomainContextHandler("", [ContextField("chosenAttributes",
DomainContextHandler.RequiredList,
selected="selectedChosen", reservoir="inputAttributes"),
ContextField("classAttribute",
DomainContextHandler.RequiredList,
selected="selectedClass", reservoir="inputAttributes"),
ContextField("metaAttributes",
DomainContextHandler.RequiredList,
selected="selectedMeta", reservoir="inputAttributes")
])}
def __init__(self,parent = None, signalManager = None):
OWWidget.__init__(self, parent, signalManager, "Data Domain",wantMainArea = 0) #initialize base class
self.inputs = [("Examples", ExampleTable, self.onDataInput), ("Attribute Subset", AttributeList, self.onAttributeList)]
self.outputs = [("Examples", ExampleTable), ("Classified Examples", ExampleTable), ("Selected Attributes", AttributeList)]
buttonWidth = 50
applyButtonWidth = 101
self.data = None
self.receivedAttrList = []
self.selectedInput = []
self.inputAttributes = []
self.selectedChosen = []
self.chosenAttributes = []
self.selectedClass = []
self.classAttribute = []
self.metaAttributes = []
self.selectedMeta = []
self.loadSettings()
self.usedAttributes = {}
self.userWarned = False
import sip
sip.delete(self.controlArea.layout())
gl=QGridLayout()
self.controlArea.setLayout(gl)
gl.setMargin(0)
boxAvail = OWGUI.widgetBox(self,'Available Attributes')
gl.addWidget(boxAvail, 0,0,3,1)
self.filterInputAttrs = OWGUIEx.lineEditFilter(boxAvail, self, None, useRE = 1, emptyText = "filter attributes...", callback = self.setInputAttributes, caseSensitive = 0)
self.inputAttributesList = OWGUI.listBox(boxAvail, self, "selectedInput", "inputAttributes", callback = self.onSelectionChange, selectionMode = QListWidget.ExtendedSelection)
self.filterInputAttrs.listbox = self.inputAttributesList
vbAttr = OWGUI.widgetBox(self, addToLayout = 0)
gl.addWidget(vbAttr, 0,1)
self.attributesButtonUp = OWGUI.button(vbAttr, self, "Up", self.onAttributesButtonUpClick)
self.attributesButtonUp.setMaximumWidth(buttonWidth)
self.attributesButton = OWGUI.button(vbAttr, self, ">",self.onAttributesButtonClicked)
self.attributesButton.setMaximumWidth(buttonWidth)
self.attributesButtonDown = OWGUI.button(vbAttr, self, "Down", self.onAttributesButtonDownClick)
self.attributesButtonDown.setMaximumWidth(buttonWidth)
boxAttr = OWGUI.widgetBox(self,'Attributes', addToLayout = 0)
gl.addWidget(boxAttr, 0,2)
self.attributesList = OWGUI.listBox(boxAttr, self, "selectedChosen", "chosenAttributes", callback = self.onSelectionChange, selectionMode = QListWidget.ExtendedSelection)
self.classButton = OWGUI.button(self, self, ">", self.onClassButtonClicked, addToLayout = 0)
self.classButton.setMaximumWidth(buttonWidth)
gl.addWidget(self.classButton, 1,1)
boxClass = OWGUI.widgetBox(self,'Class', addToLayout = 0)
boxClass.setFixedHeight(55)
gl.addWidget(boxClass, 1,2)
self.classList = OWGUI.listBox(boxClass, self, "selectedClass", "classAttribute", callback = self.onSelectionChange, selectionMode = QListWidget.ExtendedSelection)
vbMeta = OWGUI.widgetBox(self, addToLayout = 0)
gl.addWidget(vbMeta, 2,1)
self.metaButtonUp = OWGUI.button(vbMeta, self, "Up", self.onMetaButtonUpClick)
self.metaButtonUp.setMaximumWidth(buttonWidth)
self.metaButton = OWGUI.button(vbMeta, self, ">",self.onMetaButtonClicked)
self.metaButton.setMaximumWidth(buttonWidth)
self.metaButtonDown = OWGUI.button(vbMeta, self, "Down", self.onMetaButtonDownClick)
self.metaButtonDown.setMaximumWidth(buttonWidth)
boxMeta = OWGUI.widgetBox(self,'Meta Attributes', addToLayout = 0)
gl.addWidget(boxMeta, 2,2)
self.metaList = OWGUI.listBox(boxMeta, self, "selectedMeta", "metaAttributes", callback = self.onSelectionChange, selectionMode = QListWidget.ExtendedSelection)
boxApply = OWGUI.widgetBox(self, addToLayout = 0, orientation = "horizontal", addSpace = 1) #QHBox(ca)
gl.addWidget(boxApply, 3,0,1,3)
self.applyButton = OWGUI.button(boxApply, self, "Apply", callback = self.setOutput)
self.applyButton.setEnabled(False)
self.applyButton.setMaximumWidth(applyButtonWidth)
self.resetButton = OWGUI.button(boxApply, self, "Reset", callback = self.reset)
self.resetButton.setMaximumWidth(applyButtonWidth)
infoBox = OWGUI.widgetBox(self,"Info" ,addToLayout = 0, orientation = "horizontal", addSpace = 1)
gl.addWidget(infoBox, 5,0,1,3)
OWGUI.widgetLabel(infoBox, 'Aside from a data set with the selected attributes, \nthe widget also outputs an "Attribute List" which can be used with\n another "Select Attribute" widget to make the same variable selection')
gl.setRowStretch(0, 4)
gl.setRowStretch(1, 0)
gl.setRowStretch(2, 2)
self.icons = self.createAttributeIconDict()
self.inChange = False
self.resize(400,480)
def onAttributeList(self, attrList):
##scPA
#The received attrList must be a list being the first element a list of
#selected attributes, the second a list with one elements, the class attribute (may be an empty list)
#and the third element a list of metaAttributes Ex: [[a,b,c,d], [g], [e,f]]
if attrList and type(attrList)==list:
if type(attrList[0]) != list:
self.receivedAttrList =[[attrList]]
else:
self.receivedAttrList = attrList
self.updateSelection()
else:
self.receivedAttrList = []
##ecPA
def onSelectionChange(self):
if not self.inChange:
self.inChange = True
for lb, co in [(self.inputAttributesList, "selectedInput"),
(self.attributesList, "selectedChosen"),
(self.classList, "selectedClass"),
(self.metaList, "selectedMeta")]:
if not lb.hasFocus():
setattr(self, co, [])
self.inChange = False
self.updateInterfaceState()
def onDataInput(self, data): ##scPA Added parameter reloadData##ecPA
self.userWarned = False
self.data = data
self.updateSelection()
def updateSelection(self):
##scPA - Commented because if the data changes, it will assume as the same dada
#if self.data and data and self.data.checksum() == data.checksum() and not reloadData:
# return # we received the same dataset again
self.closeContext()
#local variables for the list boxes. the actual list boxes variables must be only
#changed once. If not done this way, the GUI intergace will not be properly updated!
locatMetaList = []
localAttrList = []
localClassList = []
localInputList = []
if self.data:
domain = self.data.domain
metaIds = domain.getmetas().keys()
metaIds.sort()
self.allAttributes = [(attr.name, attr.varType) for attr in domain] + [(domain[i].name, domain[i].varType) for i in metaIds]
#Make attributes selection based only on name and case insensitive
allAttributesNames = [x[0] for x in self.allAttributes]
##scPA
if self.receivedAttrList:
if len(self.receivedAttrList)>=1 and self.receivedAttrList[0]:
chosenAttr = [x.name for x in self.receivedAttrList[0]]
for attr in chosenAttr:
if attr in allAttributesNames:
localAttrList.append(self.allAttributes[allAttributesNames.index(attr)])
if len(self.receivedAttrList) >= 2 and self.receivedAttrList[1]:
classAttr = self.receivedAttrList[1][0].name
if classAttr in allAttributesNames:
localClassList = [self.allAttributes[allAttributesNames.index(classAttr)]]
if len(self.receivedAttrList)>=3 and self.receivedAttrList[2]:
metaAttr=[x.name for x in self.receivedAttrList[2]]
for attr in metaAttr:
if attr in allAttributesNames:
locatMetaList.append(self.allAttributes[allAttributesNames.index(attr)])
else:
localAttrList = [(a.name, a.varType) for a in domain.attributes]
if domain.classVar:
localClassList = [(domain.classVar.name, domain.classVar.varType)]
locatMetaList = [(a.name, a.varType) for a in domain.getmetas().values()]
if not self.receivedAttrList:
self.openContext("",self.data)
else:
self.metaAttributes = locatMetaList
self.chosenAttributes = localAttrList
self.classAttribute = localClassList
self.inputAttributes = localInputList
self.usedAttributes = dict.fromkeys(self.chosenAttributes + self.classAttribute + self.metaAttributes, 1)
self.setInputAttributes()
self.setOutput()
self.updateInterfaceState()
#self.metaList.triggerUpdate(True)
#self.attributesList.triggerUpdate(True)
#self.inputAttributesList.triggerUpdate(True)
#self.classList.triggerUpdate(True)
def setOutput(self):
if self.data:
self.applyButton.setEnabled(False)
attributes = []
attributes.append([self.data.domain[x[0]] for x in self.chosenAttributes])
if self.classAttribute:
attributes.append([self.data.domain[self.classAttribute[0][0]] ])
classVar = attributes[1][0]
else:
attributes.append([])
classVar = None
attributes.append([self.data.domain[x[0]] for x in self.metaAttributes])
self.send("Selected Attributes", attributes)
domain = orange.Domain(attributes[0], classVar)
for meta in self.metaAttributes:
domain.addmeta(orange.newmetaid(), self.data.domain[meta[0]])
newdata = dataUtilities.DataTable(domain, self.data)
newdata.name = self.data.name
if not self.userWarned and len(newdata.domain.getmetas()) != 0:
QMessageBox.warning( None, "Select - Meta Attributes", "There are meta-attributes present in the dataset.\nThe presence of meta-Attributes in datasets used with Learners/Classifiers\nrequires the use of considerably more ram memory!" , QMessageBox.Ok)
self.userWarned = True
if len(newdata.domain) != 0:
self.send("Examples", newdata)
else:
self.send("Examples", None)
self.send("Classified Examples", None)
return
if classVar:
self.send("Classified Examples", newdata)
else:
self.send("Classified Examples", None)
else:
self.send("Examples", None)
self.send("Classified Examples", None)
def reset(self):
data = self.data
self.data = None
self.onDataInput(data)
def disableButtons(self, *arg):
for b in arg:
b.setEnabled(False)
def setButton(self, button, dir):
button.setText(dir)
button.setEnabled(True)
def updateInterfaceState(self):
if self.selectedInput:
self.setButton(self.attributesButton, ">")
self.setButton(self.metaButton, ">")
self.disableButtons(self.attributesButtonUp, self.attributesButtonDown, self.metaButtonUp, self.metaButtonDown)
if len(self.selectedInput) == 1 and self.inputAttributes[self.selectedInput[0]][1] in [orange.VarTypes.Discrete, orange.VarTypes.Continuous]:
self.setButton(self.classButton, ">")
else:
self.classButton.setEnabled(False)
elif self.selectedChosen:
self.setButton(self.attributesButton, "<")
self.disableButtons(self.classButton, self.metaButton, self.metaButtonUp, self.metaButtonDown)
mini, maxi = min(self.selectedChosen), max(self.selectedChosen)
cons = maxi - mini == len(self.selectedChosen) - 1
self.attributesButtonUp.setEnabled(cons and mini)
self.attributesButtonDown.setEnabled(cons and maxi < len(self.chosenAttributes)-1)
elif self.selectedClass:
self.setButton(self.classButton, "<")
self.disableButtons(self.attributesButtonUp, self.attributesButtonDown, self.metaButtonUp, self.metaButtonDown,
self.attributesButton, self.metaButton)
elif self.selectedMeta:
self.setButton(self.metaButton, "<")
self.disableButtons(self.attributesButton, self.classButton, self.attributesButtonDown, self.attributesButtonUp)
mini, maxi, leni = min(self.selectedMeta), max(self.selectedMeta), len(self.selectedMeta)
cons = maxi - mini == leni - 1
self.metaButtonUp.setEnabled(cons and mini)
self.metaButtonDown.setEnabled(cons and maxi < len(self.metaAttributes)-1)
else:
self.disableButtons(self.attributesButtonUp, self.attributesButtonDown, self.metaButtonUp, self.metaButtonDown,
self.attributesButton, self.metaButton, self.classButton)
def splitSelection(self, alist, selected):
selected.sort()
i, sele = 0, selected[0]
selList, restList = [], []
for j, attr in enumerate(alist):
if j == sele:
selList.append(attr)
i += 1
sele = i<len(selected) and selected[i] or None
else:
restList.append(attr)
return selList, restList
def setInputAttributes(self):
self.selectedInput = []
if self.data:
self.inputAttributes = filter(lambda x:not self.usedAttributes.has_key(x), self.allAttributes)
else:
self.inputAttributes = []
self.filterInputAttrs.setAllListItems()
self.filterInputAttrs.updateListBoxItems(callCallback = 0)
def removeFromUsed(self, attributes):
for attr in attributes:
del self.usedAttributes[attr]
self.setInputAttributes()
def addToUsed(self, attributes):
self.usedAttributes.update(dict.fromkeys(attributes))
self.setInputAttributes()
def onAttributesButtonClicked(self):
if self.selectedInput:
selList, restList = self.splitSelection(self.inputAttributes, self.selectedInput)
self.chosenAttributes = self.chosenAttributes + selList
self.addToUsed(selList)
else:
selList, restList = self.splitSelection(self.chosenAttributes, self.selectedChosen)
self.chosenAttributes = restList
self.removeFromUsed(selList)
self.updateInterfaceState()
self.applyButton.setEnabled(True)
def onClassButtonClicked(self):
if self.selectedInput:
selected = self.inputAttributes[self.selectedInput[0]]
if self.classAttribute:
self.removeFromUsed(self.classAttribute)
self.addToUsed([selected])
self.classAttribute = [selected]
else:
self.removeFromUsed(self.classAttribute)
self.selectedClass = []
self.classAttribute = []
self.updateInterfaceState()
self.applyButton.setEnabled(True)
def onMetaButtonClicked(self):
if not self.userWarned:
QMessageBox.warning( self, "Select - Meta Attributes", "The presence of meta-Attributes in datasets used with Learners/Classifiers\nrequires the use of considerably more ram memory!" , QMessageBox.Ok)
self.userWarned = True
if self.selectedInput:
selList, restList = self.splitSelection(self.inputAttributes, self.selectedInput)
self.metaAttributes = self.metaAttributes + selList
self.addToUsed(selList)
else:
selList, restList = self.splitSelection(self.metaAttributes, self.selectedMeta)
self.metaAttributes = restList
self.removeFromUsed(selList)
self.updateInterfaceState()
self.applyButton.setEnabled(True)
def moveSelection(self, labels, selection, dir):
labs = getattr(self, labels)
sel = getattr(self, selection)
mini, maxi = min(sel), max(sel)+1
if dir == -1:
setattr(self, labels, labs[:mini-1] + labs[mini:maxi] + [labs[mini-1]] + labs[maxi:])
else:
setattr(self, labels, labs[:mini] + [labs[maxi]] + labs[mini:maxi] + labs[maxi+1:])
setattr(self, selection, map(lambda x:x+dir, sel))
self.updateInterfaceState()
self.applyButton.setEnabled(True)
def onMetaButtonUpClick(self):
self.moveSelection("metaAttributes", "selectedMeta", -1)
def onMetaButtonDownClick(self):
self.moveSelection("metaAttributes", "selectedMeta", 1)
def onAttributesButtonUpClick(self):
self.moveSelection("chosenAttributes", "selectedChosen", -1)
def onAttributesButtonDownClick(self):
self.moveSelection("chosenAttributes", "selectedChosen", 1)
if __name__=="__main__":
import sys
data = dataUtilities.DataTable(r'..\..\doc\datasets\iris.tab')
# add meta attribute
data.domain.addmeta(orange.newmetaid(), orange.StringVariable("name"))
for ex in data:
ex["name"] = str(ex.getclass())
a=QApplication(sys.argv)
ow=OWDataDomain()
a.setMainWidget(ow)
ow.show()
ow.onDataInput(data)
a.exec_loop()
ow.saveSettings()
| lgpl-3.0 |
omniscale/wmtsproxy | wmtsproxy/wmtsproxy/wmtsparse.py | 2 | 10937 | import re
import sys
from xml.etree import ElementTree as etree
from mapproxy.util.ext.wmsparse.util import resolve_ns
from mapproxy.util.py import reraise_exception
from .exceptions import CapabilitiesError
class WMTSCapabilities(object):
_default_namespace = 'http://www.opengis.net/wmts/1.0'
_namespaces = {
'xlink': 'http://www.w3.org/1999/xlink',
'ows': 'http://www.opengis.net/ows/1.1',
'none': ''
}
def __init__(self, tree):
if tree.getroot().tag != '{http://www.opengis.net/wmts/1.0}Capabilities':
raise CapabilitiesError('Not a WMTS capabilities document')
self.tree = tree
self._service = None
self._layers = None
self._matrix_sets = None
self._operations = None
def resolve_ns(self, xpath):
return resolve_ns(xpath, self._namespaces, self._default_namespace)
def find(self, tree, xpath):
return tree.find(self.resolve_ns(xpath))
def findall(self, tree, xpath):
return tree.findall(self.resolve_ns(xpath))
def findtext(self, tree, xpath):
return tree.findtext(self.resolve_ns(xpath))
def attrib(self, elem, name):
try:
return elem.attrib[self.resolve_ns(name)]
except KeyError:
return None
@property
def service(self):
if self._service is not None:
return self._service
self._service = dict(
title = self.findtext(self.tree, 'ows:ServiceIdentification/ows:Title'),
service_type = self.findtext(self.tree, 'ows:ServiceIdentification/ows:ServiceType'),
service_type_version = self.findtext(self.tree, 'ows:ServiceIdentification/ows:ServiceTypeVersion'),
provider_name = self.findtext(self.tree, 'ows:ServiceProvider/ows:ProviderName'),
provider_individual_name = self.findtext(self.tree, 'ows:ServiceProvider/ows:ServiceContact/ows:IndividualName')
)
elem = self.find(self.tree, 'ows:ServiceProvider/ows:ProviderSite')
if elem is not None:
self._service['provider_site'] = self.attrib(elem, 'xlink:href')
return self._service
@property
def operations(self):
if self._operations is not None:
return self._operations
self._operations = {}
operation_elems = self.findall(self.tree, 'ows:OperationsMetadata/ows:Operation')
for operation_elem in operation_elems:
operation = self.attrib(operation_elem, 'none:name')
self._operations[operation] = {}
url_elems = self.findall(operation_elem, 'ows:DCP/ows:HTTP/ows:Get')
for url_elem in url_elems:
mode = self.findtext(url_elem, 'ows:Constraint/ows:AllowedValues/ows:Value')
if mode in self._operations[operation]:
continue
self._operations[operation][mode] = self.attrib(url_elem, 'xlink:href')
return self._operations
@property
def matrix_sets(self):
if self._matrix_sets is not None:
return self._matrix_sets
self._matrix_sets = {}
tile_matrix_sets_elems = self.findall(self.tree, 'Contents/TileMatrixSet')
for tile_matrix_sets_elem in tile_matrix_sets_elems:
identifier = self.findtext(tile_matrix_sets_elem, 'ows:Identifier')
supported_crs = self.findtext(tile_matrix_sets_elem, 'ows:SupportedCRS')
tile_matrices = []
tile_matrix_elems = self.findall(tile_matrix_sets_elem, 'TileMatrix')
for tile_matrix_elem in tile_matrix_elems:
tile_width = int(self.findtext(tile_matrix_elem, 'TileWidth'))
tile_height = int(self.findtext(tile_matrix_elem, 'TileHeight'))
matrix_width = int(self.findtext(tile_matrix_elem, 'MatrixWidth'))
matrix_height = int(self.findtext(tile_matrix_elem, 'MatrixHeight'))
tile_matrices.append(dict(
id = self.findtext(tile_matrix_elem, 'ows:Identifier'),
top_left = top_left_corner_to_coord(self.findtext(tile_matrix_elem, 'TopLeftCorner'), supported_crs),
tile_size = (tile_width, tile_height),
grid_size = (matrix_width, matrix_height),
scale_denom = float(self.findtext(tile_matrix_elem, 'ScaleDenominator'))
))
self._matrix_sets[identifier] = {
'id': identifier,
'crs': supported_crs,
'tile_matrices': tile_matrices
}
return self._matrix_sets
@property
def layers(self):
if self._layers is not None:
return self._layers
self._layers = {}
layer_elems = self.findall(self.tree, 'Contents/Layer')
if len(layer_elems) == 0:
raise CapabilitiesError('Document contains no layer')
for layer_elem in layer_elems:
layer_id = self.findtext(layer_elem, 'ows:Identifier')
title = self.findtext(layer_elem, 'ows:Title')
_bbox_lower_corner = [float(x) for x in self.findtext(layer_elem, 'ows:WGS84BoundingBox/ows:LowerCorner').split(' ')]
_bbox_upper_corner = [float(x) for x in self.findtext(layer_elem, 'ows:WGS84BoundingBox/ows:UpperCorner').split(' ')]
bbox = _bbox_lower_corner + _bbox_upper_corner
formats = []
format_elems = self.findall(layer_elem, 'Format')
for format_elem in format_elems:
formats.append(format_elem.text)
info_formats = []
info_format_elems = self.findall(layer_elem, 'InfoFormat')
for info_format_elem in info_format_elems:
info_formats.append(info_format_elem.text)
styles = []
default_style = None
style_elems = self.findall(layer_elem, 'Style')
for style_elem in style_elems:
default = self.attrib(style_elem, 'none:isDefault') == 'true' or False
style_title = self.findtext(style_elem, 'ows:Title')
style_id = self.findtext(style_elem, 'ows:Identifier')
_style = {
'id': style_id,
'title': style_title,
'default': default
}
styles.append(_style)
if _style['default']:
default_style = _style
dimensions = []
dimension_elems = self.findall(layer_elem, 'Dimension')
for dimension_elem in dimension_elems:
dimensions.append({
'id': self.findtext(dimension_elem, 'ows:Identifier'),
'default': self.findtext(dimension_elem, 'Default'),
'current': self.findtext(dimension_elem, 'Current'),
'value': self.findtext(dimension_elem, 'Value')
})
matrix_sets = []
matrix_set_elems = self.findall(layer_elem, 'TileMatrixSetLink')
for matrix_set_elem in matrix_set_elems:
matrix_set_identifier = self.findtext(matrix_set_elem, 'TileMatrixSet')
if not matrix_set_identifier in self.matrix_sets.keys():
raise CapabilitiesError('Matrix set required by layer not defined in capabilities document')
matrix_sets.append(self.matrix_sets[matrix_set_identifier])
self._layers[layer_id] = {
'title': title,
'bbox': bbox,
'formats': formats,
'info_formats': info_formats,
'matrix_sets': matrix_sets,
'styles': styles,
'default_style': default_style,
'url_template': self._wmts_url_template(layer_elem, dimensions),
'dimensions': dimensions
}
return self._layers
def _exists_operation_mode(self, operation, mode):
return self.operations and operation in self.operations.keys() and mode in self.operations[operation].keys()
def _wmts_url_template(self, layer_elem, dimensions):
url_template = None
resource_elem = self.find(layer_elem, 'ResourceURL')
if resource_elem is not None:
url_template = self.attrib(resource_elem, 'none:template')
url_template = url_template.replace('{Style}', '%(style)s')
url_template = url_template.replace('{TileMatrixSet}', '%(tile_matrix_set)s')
url_template = url_template.replace('{TileMatrix}', '%%(z)s')
url_template = url_template.replace('{TileRow}', '%%(y)s')
url_template = url_template.replace('{TileCol}', '%%(x)s')
url_template = _replace_dimensions(url_template, dimensions)
elif self._exists_operation_mode('GetTile', 'KVP'):
url_template = self.operations['GetTile']['KVP']
url_template += 'SERVICE=WMTS'
url_template += '&REQUEST=GetTile'
url_template += '&VERSION=1.0.0'
url_template += '&LAYER=%(layer)s'
url_template += '&TILEMATRIXSET=%(tile_matrix_set)s'
url_template += '&TILEMATRIX=%%(z)s'
url_template += '&TILEROW=%%(y)s'
url_template += '&TILECOL=%%(x)s'
url_template += '&FORMAT=%(format)s'
return url_template
def _add_dimensions(url_template, dimensions):
dimensions = ''
for dimension in dimensions:
dimensions += '/%%(%s)s' % dimension['id']
return url_template.replace('%(dimensions)s', dimensions)
def _replace_dimensions(url_template, dimensions):
for dimension in dimensions:
dimension_re = re.compile(re.escape('{%s}' % dimension['id']), re.IGNORECASE)
url_template = dimension_re.sub('%%(%s)s' % dimension['id'], url_template)
return url_template
def top_left_corner_to_coord(top_left_corner, crs):
coord = tuple(float(x) for x in top_left_corner.split(' '))
if crs in [
'CRS:84',
'EPSG:900913',
'EPSG:25831',
'EPSG:25832',
'EPSG:25833',
'urn:ogc:def:crs:EPSG::900913',
'urn:ogc:def:crs:EPSG::25831',
'urn:ogc:def:crs:EPSG::25832',
'urn:ogc:def:crs:EPSG::25833',
'urn:ogc:def:crs:EPSG:6.18:3:900913',
'urn:ogc:def:crs:EPSG:6.18:3:25831',
'urn:ogc:def:crs:EPSG:6.18:3:25832',
'urn:ogc:def:crs:EPSG:6.18:3:25833',
'urn:ogc:def:crs:OGC:1.3:CRS84'
]:
return (coord[1], coord[0])
return coord
def parse_capabilities(fileobj):
if isinstance(fileobj, basestring):
fileobj = open(fileobj)
try:
tree = etree.parse(fileobj)
except Exception as ex:
reraise_exception(CapabilitiesError('Could not open capabilities document', ex.args[0]), sys.exc_info())
return WMTSCapabilities(tree)
| apache-2.0 |
nishad-jobsglobal/odoo-marriot | addons/hr_attendance/report/__init__.py | 375 | 1071 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import attendance_errors
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
keithroe/vtkoptix | ThirdParty/Twisted/twisted/scripts/tap2deb.py | 27 | 7227 | # -*- test-case-name: twisted.scripts.test.test_tap2deb -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
tap2deb creates Debian packages which wrap .tap files.
"""
import os
import sys
import shutil
import subprocess
from email.utils import formatdate as now
from twisted.python import usage
from twisted.python.filepath import FilePath
class MyOptions(usage.Options):
optFlags = [["unsigned", "u"]]
optParameters = [["tapfile", "t", "twistd.tap"],
["maintainer", "m", "",
"The maintainer's name and email in a specific format: "
"'John Doe <johndoe@example.com>'"],
["protocol", "p", ""],
["description", "e", ""],
["long_description", "l", ""],
["set-version", "V", "1.0"],
["debfile", "d", None],
["type", "y", "tap", "Type of configuration: 'tap', 'xml', "
"'source' or 'python' for .tac files"]]
compData = usage.Completions(
optActions={
"type": usage.CompleteList(["tap", "xml", "source", "python"]),
"debfile": usage.CompleteFiles("*.deb")}
)
def postOptions(self):
if not self["maintainer"]:
raise usage.UsageError("maintainer must be specified.")
type_dict = {
'tap': 'file',
'python': 'python',
'source': 'source',
'xml': 'xml',
}
def run(args=None):
"""
Parses the configuration options in C{args} and runs C{dpkg-buildpackage}
to create a .deb file.
@param args: List of strings representing the C{tap2deb} configuration
options.
@type args: L{list}
"""
try:
config = MyOptions()
config.parseOptions(args)
except usage.error as ue:
sys.exit("%s: %s" % (sys.argv[0], ue))
tapFile = config['tapfile']
baseTapFile = os.path.basename(config['tapfile'])
protocol = (config['protocol'] or os.path.splitext(baseTapFile)[0])
debFile = config['debfile'] or 'twisted-' + protocol
version = config['set-version']
maintainer = config['maintainer']
description = config['description'] or (
'A Twisted-based server for %(protocol)s' % vars())
longDescription = config['long_description'] or\
'Automatically created by tap2deb'
twistdOption = type_dict[config['type']]
date = now()
directory = debFile + '-' + version
pythonVersion = '%s.%s' % sys.version_info[:2]
buildDir = FilePath('.build').child(directory)
if buildDir.exists():
buildDir.remove()
debianDir = buildDir.child('debian')
debianDir.child('source').makedirs()
shutil.copy(tapFile, buildDir.path)
debianDir.child('README.Debian').setContent(
'''This package was auto-generated by tap2deb\n''')
debianDir.child('conffiles').setContent(
'''\
/etc/init.d/%(debFile)s
/etc/default/%(debFile)s
/etc/%(baseTapFile)s
''' % vars())
debianDir.child('default').setContent(
'''\
pidfile=/var/run/%(debFile)s.pid
rundir=/var/lib/%(debFile)s/
file=/etc/%(tapFile)s
logfile=/var/log/%(debFile)s.log
''' % vars())
debianDir.child('init.d').setContent(
'''\
#!/bin/sh
PATH=/sbin:/bin:/usr/sbin:/usr/bin
pidfile=/var/run/%(debFile)s.pid \
rundir=/var/lib/%(debFile)s/ \
file=/etc/%(tapFile)s \
logfile=/var/log/%(debFile)s.log
[ -r /etc/default/%(debFile)s ] && . /etc/default/%(debFile)s
test -x /usr/bin/twistd%(pythonVersion)s || exit 0
test -r $file || exit 0
test -r /usr/share/%(debFile)s/package-installed || exit 0
case "$1" in
start)
echo -n "Starting %(debFile)s: twistd"
start-stop-daemon --start --quiet --exec /usr/bin/twistd%(pythonVersion)s -- \
--pidfile=$pidfile \
--rundir=$rundir \
--%(twistdOption)s=$file \
--logfile=$logfile
echo "."
;;
stop)
echo -n "Stopping %(debFile)s: twistd"
start-stop-daemon --stop --quiet \
--pidfile $pidfile
echo "."
;;
restart)
$0 stop
$0 start
;;
force-reload)
$0 restart
;;
*)
echo "Usage: /etc/init.d/%(debFile)s {start|stop|restart|force-reload}" >&2
exit 1
;;
esac
exit 0
''' % vars())
debianDir.child('init.d').chmod(0755)
debianDir.child('postinst').setContent(
'''\
#!/bin/sh
update-rc.d %(debFile)s defaults >/dev/null
invoke-rc.d %(debFile)s start
''' % vars())
debianDir.child('prerm').setContent(
'''\
#!/bin/sh
invoke-rc.d %(debFile)s stop
''' % vars())
debianDir.child('postrm').setContent(
'''\
#!/bin/sh
if [ "$1" = purge ]; then
update-rc.d %(debFile)s remove >/dev/null
fi
''' % vars())
debianDir.child('changelog').setContent(
'''\
%(debFile)s (%(version)s) unstable; urgency=low
* Created by tap2deb
-- %(maintainer)s %(date)s
''' % vars())
debianDir.child('control').setContent(
'''\
Source: %(debFile)s
Section: net
Priority: extra
Maintainer: %(maintainer)s
Build-Depends-Indep: debhelper
Standards-Version: 3.5.6
Package: %(debFile)s
Architecture: all
Depends: python%(pythonVersion)s-twisted
Description: %(description)s
%(longDescription)s
''' % vars())
debianDir.child('copyright').setContent(
'''\
This package was auto-debianized by %(maintainer)s on
%(date)s
It was auto-generated by tap2deb
Upstream Author(s):
Moshe Zadka <moshez@twistedmatrix.com> -- tap2deb author
Copyright:
Insert copyright here.
''' % vars())
debianDir.child('dirs').setContent(
'''\
etc/init.d
etc/default
var/lib/%(debFile)s
usr/share/doc/%(debFile)s
usr/share/%(debFile)s
''' % vars())
debianDir.child('rules').setContent(
'''\
#!/usr/bin/make -f
export DH_COMPAT=1
build: build-stamp
build-stamp:
dh_testdir
touch build-stamp
clean:
dh_testdir
dh_testroot
rm -f build-stamp install-stamp
dh_clean
install: install-stamp
install-stamp: build-stamp
dh_testdir
dh_testroot
dh_clean -k
dh_installdirs
# Add here commands to install the package into debian/tmp.
cp %(baseTapFile)s debian/tmp/etc/
cp debian/init.d debian/tmp/etc/init.d/%(debFile)s
cp debian/default debian/tmp/etc/default/%(debFile)s
cp debian/copyright debian/tmp/usr/share/doc/%(debFile)s/
cp debian/README.Debian debian/tmp/usr/share/doc/%(debFile)s/
touch debian/tmp/usr/share/%(debFile)s/package-installed
touch install-stamp
binary-arch: build install
binary-indep: build install
dh_testdir
dh_testroot
dh_strip
dh_compress
dh_installchangelogs
dh_fixperms
dh_installdeb
dh_shlibdeps
dh_gencontrol
dh_md5sums
dh_builddeb
source diff:
@echo >&2 'source and diff are obsolete - use dpkg-source -b'; false
binary: binary-indep binary-arch
.PHONY: build clean binary-indep binary-arch binary install
''' % vars())
debianDir.child('rules').chmod(0755)
args = ["dpkg-buildpackage", "-rfakeroot"]
if config['unsigned']:
args = args + ['-uc', '-us']
# Build deb
job = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, cwd=buildDir.path)
stdout, _ = job.communicate()
| bsd-3-clause |
hkariti/mopidy | tests/mpd/test_dispatcher.py | 17 | 1578 | from __future__ import absolute_import, unicode_literals
import unittest
import pykka
from mopidy import core
from mopidy.internal import deprecation
from mopidy.mpd.dispatcher import MpdDispatcher
from mopidy.mpd.exceptions import MpdAckError
from tests import dummy_backend
class MpdDispatcherTest(unittest.TestCase):
def setUp(self): # noqa: N802
config = {
'mpd': {
'password': None,
'command_blacklist': ['disabled'],
}
}
self.backend = dummy_backend.create_proxy()
self.dispatcher = MpdDispatcher(config=config)
with deprecation.ignore():
self.core = core.Core.start(backends=[self.backend]).proxy()
def tearDown(self): # noqa: N802
pykka.ActorRegistry.stop_all()
def test_call_handler_for_unknown_command_raises_exception(self):
with self.assertRaises(MpdAckError) as cm:
self.dispatcher._call_handler('an_unknown_command with args')
self.assertEqual(
cm.exception.get_mpd_ack(),
'ACK [5@0] {} unknown command "an_unknown_command"')
def test_handling_unknown_request_yields_error(self):
result = self.dispatcher.handle_request('an unhandled request')
self.assertEqual(result[0], 'ACK [5@0] {} unknown command "an"')
def test_handling_blacklisted_command(self):
result = self.dispatcher.handle_request('disabled')
self.assertEqual(result[0], 'ACK [0@0] {disabled} "disabled" has been '
'disabled in the server')
| apache-2.0 |
Mhynlo/SickRage | lib/pgi/codegen/construct.py | 19 | 5385 | # Copyright 2013 Christoph Reiter
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
"""Create a gobject constructor object for a specific combination
of property name using g_object_new.
Compared to g_object_newv, this saves us two function calls per parameter.
"""
from .backend import get_backend
from .utils import CodeBlock
from pgi.clib.gir import GITypeTag, GIInfoType
from pgi.util import unescape_parameter, import_attribute
class ConstructorSetter(object):
TAG = None
py_type = object
out_var = ""
desc = ""
def setup(self):
pass
@classmethod
def get_class(cls, type_):
return cls
def __init__(self, prop_name, type_, backend):
self.type = type_
self.backend = backend
self.name = prop_name
def get_type(self):
return self.backend.get_type(self.type, self.desc, may_be_null=True)
def set(self, name):
return None, name
class BaseInterfaceSetter(ConstructorSetter):
TAG = GITypeTag.INTERFACE
py_type = object
def setup(self):
iface = self.type.get_interface()
try:
self.py_type = import_attribute(iface.namespace, iface.name)
except ImportError:
# fall back to object
pass
@classmethod
def get_class(cls, type_):
iface = type_.get_interface()
iface_type = iface.type.value
if iface_type in [GIInfoType.ENUM, GIInfoType.FLAGS]:
return EnumFlagsSetter
elif iface_type == GIInfoType.OBJECT:
return ObjectSetter
elif iface_type == GIInfoType.STRUCT:
return StructSetter
raise NotImplementedError(iface.type)
def set(self, name):
var = self.get_type()
out = var.pack_in(name)
self.out_var = out
return var.block, out
class ObjectSetter(BaseInterfaceSetter):
pass
class StructSetter(BaseInterfaceSetter):
pass
class EnumFlagsSetter(BaseInterfaceSetter):
pass
class BasicSetter(ConstructorSetter):
def set(self, name):
var = self.get_type()
out = var.pack_in(name)
self.out_var = out
return var.block, out
class BoolSetter(BasicSetter):
TAG = GITypeTag.BOOLEAN
py_type = int
class Int32Setter(BasicSetter):
TAG = GITypeTag.INT32
py_type = int
class UInt32Setter(BasicSetter):
TAG = GITypeTag.UINT32
py_type = int
class DoubleSetter(BasicSetter):
TAG = GITypeTag.DOUBLE
py_type = float
class UTF8Argument(BasicSetter):
TAG = GITypeTag.UTF8
py_type = str
_classes = {}
def _find_classes():
global _classes
for var in globals().values():
if not isinstance(var, type):
continue
if issubclass(var, ConstructorSetter) and var is not ConstructorSetter:
_classes[var.TAG] = var
_find_classes()
def get_construct_class(arg_type):
global _classes
tag_value = arg_type.tag.value
try:
cls = _classes[tag_value]
except KeyError:
raise NotImplementedError(
"%r constructor not implemented" % arg_type.tag)
else:
return cls.get_class(arg_type)
def build_constructor_docstring(cls, args):
from .funcgen import get_type_name
arg_docs = []
for arg in args:
type_name = get_type_name(arg.py_type)
arg_docs.append(u"%s: %s" % (arg.name, type_name))
cls_name = u"%s.%s" % (cls.__module__, cls.__name__)
return u"%s(%s) -> %s" % (cls_name, u", ".join(arg_docs), cls_name)
def _generate_constructor(cls, names, backend):
gtype = cls.__gtype__
specs = cls.props
body = CodeBlock()
in_args = []
instances = []
backend.var.add_blacklist(names)
for name in names:
try:
spec = getattr(specs, name)
except AttributeError:
raise TypeError("Property %r not supported" % name)
type_ = spec._info.get_type()
const = get_construct_class(type_)
real_name = unescape_parameter(name)
instance = const(real_name, type_, backend)
instance.setup()
instance.desc = "%s.%s property '%s'" % (
cls.__module__, cls.__name__, real_name)
instances.append(instance)
in_args.append(name)
block, out = instance.set(name)
block.write_into(body)
call_block, return_var = backend.get_constructor(gtype, instances)
docstring = build_constructor_docstring(cls, instances)
main, var = backend.parse("""
def _init_($values):
'''$docstring'''
$func_body
$call_block
return $return_var
""", values=", ".join(in_args), call_block=call_block,
func_body=body, return_var=return_var, docstring=docstring)
func = main.compile()["_init_"]
func._code = main
return func
def generate_constructor(cls, names):
# The generated code depends on the gtype / class and the order
# of the arguments that can be passed to it.
cache = cls._constructors
if names in cache:
return cache[names]
elif len(cache) > 3:
cache.clear()
backend = get_backend("ctypes")()
return _generate_constructor(cls, names, backend)
| gpl-3.0 |
max0d41/ThugBrowser | src/DOM/Sidebar.py | 9 | 1718 | #!/usr/bin/env python
#
# Sidebar.py
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
import logging
log = logging.getLogger("Thug")
class Sidebar(object):
def __init__(self):
self._providers = set()
self._engines = set()
self._favorites = set()
self._generators = set()
def addMicrosummaryGenerator(self, generatorURL):
self._generators.add(generatorURL)
def addPanel(self, title, URL, customizeURL):
self._favorites.add((title, URL, customizeURL))
def addPersistentPanel(self, title, URL, customizeURL):
self._favorites.add((title, URL, customizeURL))
def addSearchEngine(self, engineURL, iconURL, message, suggestedCategory):
self._enginess.add((engineURL, iconURL, message, suggestedCategory))
def AddSearchProvider(self, URL):
self._providers.add(URL)
def IsSearchProviderInstalled(self, URL):
if URL in self._providers:
return 1 # A matching search provider is installed, but it is not the default.
return 0 # No installed search provider was found with the specified prefix
| gpl-2.0 |
xiaoshaozi52/shadowsocks | shadowsocks/local.py | 1015 | 2248 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import sys
import os
import logging
import signal
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../'))
from shadowsocks import shell, daemon, eventloop, tcprelay, udprelay, asyncdns
def main():
shell.check_python()
# fix py2exe
if hasattr(sys, "frozen") and sys.frozen in \
("windows_exe", "console_exe"):
p = os.path.dirname(os.path.abspath(sys.executable))
os.chdir(p)
config = shell.get_config(True)
daemon.daemon_exec(config)
try:
logging.info("starting local at %s:%d" %
(config['local_address'], config['local_port']))
dns_resolver = asyncdns.DNSResolver()
tcp_server = tcprelay.TCPRelay(config, dns_resolver, True)
udp_server = udprelay.UDPRelay(config, dns_resolver, True)
loop = eventloop.EventLoop()
dns_resolver.add_to_loop(loop)
tcp_server.add_to_loop(loop)
udp_server.add_to_loop(loop)
def handler(signum, _):
logging.warn('received SIGQUIT, doing graceful shutting down..')
tcp_server.close(next_tick=True)
udp_server.close(next_tick=True)
signal.signal(getattr(signal, 'SIGQUIT', signal.SIGTERM), handler)
def int_handler(signum, _):
sys.exit(1)
signal.signal(signal.SIGINT, int_handler)
daemon.set_user(config.get('user', None))
loop.run()
except Exception as e:
shell.print_exception(e)
sys.exit(1)
if __name__ == '__main__':
main()
| apache-2.0 |
atsaki/libcloud | libcloud/utils/logging.py | 64 | 1750 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Taken from https://github.com/Kami/python-extra-log-formatters
from __future__ import absolute_import
import logging
__all__ = [
'ExtraLogFormatter'
]
class ExtraLogFormatter(logging.Formatter):
"""
Custom log formatter which attaches all the attributes from the "extra"
dictionary which start with an underscore to the end of the log message.
For example:
extra={'_id': 'user-1', '_path': '/foo/bar'}
"""
def format(self, record):
custom_attributes = dict([(k, v) for k, v in record.__dict__.items()
if k.startswith('_')])
custom_attributes = self._dict_to_str(custom_attributes)
msg = logging.Formatter.format(self, record)
msg = '%s (%s)' % (msg, custom_attributes)
return msg
def _dict_to_str(self, dictionary):
result = ['%s=%s' % (k[1:], str(v)) for k, v in dictionary.items()]
result = ','.join(result)
return result
| apache-2.0 |
piorrro33/pcsx2 | 3rdparty/freetype/src/tools/docmaker/formatter.py | 45 | 6195 | #
# formatter.py
#
# Convert parsed content blocks to a structured document (library file).
#
# Copyright 2002-2016 by
# David Turner.
#
# This file is part of the FreeType project, and may only be used,
# modified, and distributed under the terms of the FreeType project
# license, LICENSE.TXT. By continuing to use, modify, or distribute
# this file you indicate that you have read the license and
# understand and accept it fully.
#
# This is the base Formatter class. Its purpose is to convert a content
# processor's data into specific documents (i.e., table of contents, global
# index, and individual API reference indices).
#
# You need to sub-class it to output anything sensible. For example, the
# file `tohtml.py' contains the definition of the `HtmlFormatter' sub-class
# to output HTML.
#
from sources import *
from content import *
from utils import *
################################################################
##
## FORMATTER CLASS
##
class Formatter:
def __init__( self, processor ):
self.processor = processor
self.identifiers = {}
self.chapters = processor.chapters
self.sections = processor.sections.values()
self.block_index = []
# store all blocks in a dictionary
self.blocks = []
for section in self.sections:
for block in section.blocks.values():
self.add_identifier( block.name, block )
# add enumeration values to the index, since this is useful
for markup in block.markups:
if markup.tag == 'values':
for field in markup.fields:
self.add_identifier( field.name, block )
self.block_index = self.identifiers.keys()
self.block_index.sort( key = index_key )
# also add section names to dictionary (without making them appear
# in the index)
for section in self.sections:
self.add_identifier( section.name, section )
def add_identifier( self, name, block ):
if name in self.identifiers:
# duplicate name!
sys.stderr.write( "WARNING: duplicate definition for"
+ " '" + name + "' "
+ "in " + block.location() + ", "
+ "previous definition in "
+ self.identifiers[name].location()
+ "\n" )
else:
self.identifiers[name] = block
#
# formatting the table of contents
#
def toc_enter( self ):
pass
def toc_chapter_enter( self, chapter ):
pass
def toc_section_enter( self, section ):
pass
def toc_section_exit( self, section ):
pass
def toc_chapter_exit( self, chapter ):
pass
def toc_index( self, index_filename ):
pass
def toc_exit( self ):
pass
def toc_dump( self, toc_filename = None, index_filename = None ):
output = None
if toc_filename:
output = open_output( toc_filename )
self.toc_enter()
for chap in self.processor.chapters:
self.toc_chapter_enter( chap )
for section in chap.sections:
self.toc_section_enter( section )
self.toc_section_exit( section )
self.toc_chapter_exit( chap )
self.toc_index( index_filename )
self.toc_exit()
if output:
close_output( output )
#
# formatting the index
#
def index_enter( self ):
pass
def index_name_enter( self, name ):
pass
def index_name_exit( self, name ):
pass
def index_exit( self ):
pass
def index_dump( self, index_filename = None ):
output = None
if index_filename:
output = open_output( index_filename )
self.index_enter()
for name in self.block_index:
self.index_name_enter( name )
self.index_name_exit( name )
self.index_exit()
if output:
close_output( output )
#
# formatting a section
#
def section_enter( self, section ):
pass
def block_enter( self, block ):
pass
def markup_enter( self, markup, block = None ):
pass
def field_enter( self, field, markup = None, block = None ):
pass
def field_exit( self, field, markup = None, block = None ):
pass
def markup_exit( self, markup, block = None ):
pass
def block_exit( self, block ):
pass
def section_exit( self, section ):
pass
def section_dump( self, section, section_filename = None ):
output = None
if section_filename:
output = open_output( section_filename )
self.section_enter( section )
for name in section.block_names:
skip_entry = 0
try:
block = self.identifiers[name]
# `block_names' can contain field names also,
# which we filter out
for markup in block.markups:
if markup.tag == 'values':
for field in markup.fields:
if field.name == name:
skip_entry = 1
except:
skip_entry = 1 # this happens e.g. for `/empty/' entries
if skip_entry:
continue
self.block_enter( block )
for markup in block.markups[1:]: # always ignore first markup!
self.markup_enter( markup, block )
for field in markup.fields:
self.field_enter( field, markup, block )
self.field_exit( field, markup, block )
self.markup_exit( markup, block )
self.block_exit( block )
self.section_exit( section )
if output:
close_output( output )
def section_dump_all( self ):
for section in self.sections:
self.section_dump( section )
# eof
| gpl-2.0 |
27629678/Protobuf | gtest/test/gtest_output_test.py | 1733 | 12005 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the text output of Google C++ Testing Framework.
SYNOPSIS
gtest_output_test.py --build_dir=BUILD/DIR --gengolden
# where BUILD/DIR contains the built gtest_output_test_ file.
gtest_output_test.py --gengolden
gtest_output_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
import gtest_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS'
IS_WINDOWS = os.name == 'nt'
# TODO(vladl@google.com): remove the _lin suffix.
GOLDEN_NAME = 'gtest_output_test_golden_lin.txt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_output_test_')
# At least one command we exercise must not have the
# --gtest_internal_skip_environment_and_ad_hoc_tests flag.
COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests'])
COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes'])
COMMAND_WITH_TIME = ({}, [PROGRAM_PATH,
'--gtest_print_time',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=FatalFailureTest.*:LoggingTest.*'])
COMMAND_WITH_DISABLED = (
{}, [PROGRAM_PATH,
'--gtest_also_run_disabled_tests',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=*DISABLED_*'])
COMMAND_WITH_SHARDING = (
{'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'},
[PROGRAM_PATH,
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=PassingTest.*'])
GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveLocations(test_output):
"""Removes all file location info from a Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE_NAME:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\: ', r'\1:#: ', test_output)
def RemoveStackTraceDetails(output):
"""Removes all stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n',
'Stack trace: (omitted)\n\n', output)
def RemoveStackTraces(output):
"""Removes all traces of stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output)
def RemoveTime(output):
"""Removes all time information from a Google Test program's output."""
return re.sub(r'\(\d+ ms', '(? ms', output)
def RemoveTypeInfoDetails(test_output):
"""Removes compiler-specific type info from Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with type information normalized to canonical form.
"""
# some compilers output the name of type 'unsigned int' as 'unsigned'
return re.sub(r'unsigned int', 'unsigned', test_output)
def NormalizeToCurrentPlatform(test_output):
"""Normalizes platform specific output details for easier comparison."""
if IS_WINDOWS:
# Removes the color information that is not present on Windows.
test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output)
# Changes failure message headers into the Windows format.
test_output = re.sub(r': Failure\n', r': error: ', test_output)
# Changes file(line_number) to file:line_number.
test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output)
return test_output
def RemoveTestCounts(output):
"""Removes test counts from a Google Test program's output."""
output = re.sub(r'\d+ tests?, listed below',
'? tests, listed below', output)
output = re.sub(r'\d+ FAILED TESTS',
'? FAILED TESTS', output)
output = re.sub(r'\d+ tests? from \d+ test cases?',
'? tests from ? test cases', output)
output = re.sub(r'\d+ tests? from ([a-zA-Z_])',
r'? tests from \1', output)
return re.sub(r'\d+ tests?\.', '? tests.', output)
def RemoveMatchingTests(test_output, pattern):
"""Removes output of specified tests from a Google Test program's output.
This function strips not only the beginning and the end of a test but also
all output in between.
Args:
test_output: A string containing the test output.
pattern: A regex string that matches names of test cases or
tests to remove.
Returns:
Contents of test_output with tests whose names match pattern removed.
"""
test_output = re.sub(
r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % (
pattern, pattern),
'',
test_output)
return re.sub(r'.*%s.*\n' % pattern, '', test_output)
def NormalizeOutput(output):
"""Normalizes output (the output of gtest_output_test_.exe)."""
output = ToUnixLineEnding(output)
output = RemoveLocations(output)
output = RemoveStackTraceDetails(output)
output = RemoveTime(output)
return output
def GetShellCommandOutput(env_cmd):
"""Runs a command in a sub-process, and returns its output in a string.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
Returns:
A string with the command's combined standard and diagnostic output.
"""
# Spawns cmd in a sub-process, and gets its standard I/O file objects.
# Set and save the environment properly.
environ = os.environ.copy()
environ.update(env_cmd[0])
p = gtest_test_utils.Subprocess(env_cmd[1], env=environ)
return p.output
def GetCommandOutput(env_cmd):
"""Runs a command and returns its output with all file location
info stripped off.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
"""
# Disables exception pop-ups on Windows.
environ, cmdline = env_cmd
environ = dict(environ) # Ensures we are modifying a copy.
environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1'
return NormalizeOutput(GetShellCommandOutput((environ, cmdline)))
def GetOutputOfAllCommands():
"""Returns concatenated output from several representative commands."""
return (GetCommandOutput(COMMAND_WITH_COLOR) +
GetCommandOutput(COMMAND_WITH_TIME) +
GetCommandOutput(COMMAND_WITH_DISABLED) +
GetCommandOutput(COMMAND_WITH_SHARDING))
test_list = GetShellCommandOutput(COMMAND_LIST_TESTS)
SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list
SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list
SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list
SUPPORTS_STACK_TRACES = False
CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and
SUPPORTS_TYPED_TESTS and
SUPPORTS_THREADS)
class GTestOutputTest(gtest_test_utils.TestCase):
def RemoveUnsupportedTests(self, test_output):
if not SUPPORTS_DEATH_TESTS:
test_output = RemoveMatchingTests(test_output, 'DeathTest')
if not SUPPORTS_TYPED_TESTS:
test_output = RemoveMatchingTests(test_output, 'TypedTest')
test_output = RemoveMatchingTests(test_output, 'TypedDeathTest')
test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest')
if not SUPPORTS_THREADS:
test_output = RemoveMatchingTests(test_output,
'ExpectFailureWithThreadsTest')
test_output = RemoveMatchingTests(test_output,
'ScopedFakeTestPartResultReporterTest')
test_output = RemoveMatchingTests(test_output,
'WorksConcurrently')
if not SUPPORTS_STACK_TRACES:
test_output = RemoveStackTraces(test_output)
return test_output
def testOutput(self):
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'rb')
# A mis-configured source control system can cause \r appear in EOL
# sequences when we read the golden file irrespective of an operating
# system used. Therefore, we need to strip those \r's from newlines
# unconditionally.
golden = ToUnixLineEnding(golden_file.read())
golden_file.close()
# We want the test to pass regardless of certain features being
# supported or not.
# We still have to remove type name specifics in all cases.
normalized_actual = RemoveTypeInfoDetails(output)
normalized_golden = RemoveTypeInfoDetails(golden)
if CAN_GENERATE_GOLDEN_FILE:
self.assertEqual(normalized_golden, normalized_actual)
else:
normalized_actual = NormalizeToCurrentPlatform(
RemoveTestCounts(normalized_actual))
normalized_golden = NormalizeToCurrentPlatform(
RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden)))
# This code is very handy when debugging golden file differences:
if os.getenv('DEBUG_GTEST_OUTPUT_TEST'):
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_actual.txt'), 'wb').write(
normalized_actual)
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_golden.txt'), 'wb').write(
normalized_golden)
self.assertEqual(normalized_golden, normalized_actual)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
if CAN_GENERATE_GOLDEN_FILE:
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
else:
message = (
"""Unable to write a golden file when compiled in an environment
that does not support all the required features (death tests, typed tests,
and multiple threads). Please generate the golden file using a binary built
with those features enabled.""")
sys.stderr.write(message)
sys.exit(1)
else:
gtest_test_utils.Main()
| bsd-3-clause |
turbokongen/home-assistant | homeassistant/components/wink/fan.py | 7 | 3313 | """Support for Wink fans."""
import pywink
from homeassistant.components.fan import (
SPEED_HIGH,
SPEED_LOW,
SPEED_MEDIUM,
SUPPORT_DIRECTION,
SUPPORT_SET_SPEED,
FanEntity,
)
from . import DOMAIN, WinkDevice
SPEED_AUTO = "auto"
SPEED_LOWEST = "lowest"
SUPPORTED_FEATURES = SUPPORT_DIRECTION + SUPPORT_SET_SPEED
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Wink platform."""
for fan in pywink.get_fans():
if fan.object_id() + fan.name() not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkFanDevice(fan, hass)])
class WinkFanDevice(WinkDevice, FanEntity):
"""Representation of a Wink fan."""
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.hass.data[DOMAIN]["entities"]["fan"].append(self)
def set_direction(self, direction: str) -> None:
"""Set the direction of the fan."""
self.wink.set_fan_direction(direction)
def set_speed(self, speed: str) -> None:
"""Set the speed of the fan."""
self.wink.set_state(True, speed)
#
# The fan entity model has changed to use percentages and preset_modes
# instead of speeds.
#
# Please review
# https://developers.home-assistant.io/docs/core/entity/fan/
#
def turn_on(
self,
speed: str = None,
percentage: int = None,
preset_mode: str = None,
**kwargs,
) -> None:
"""Turn on the fan."""
self.wink.set_state(True, speed)
def turn_off(self, **kwargs) -> None:
"""Turn off the fan."""
self.wink.set_state(False)
@property
def is_on(self):
"""Return true if the entity is on."""
return self.wink.state()
@property
def speed(self) -> str:
"""Return the current speed."""
current_wink_speed = self.wink.current_fan_speed()
if SPEED_AUTO == current_wink_speed:
return SPEED_AUTO
if SPEED_LOWEST == current_wink_speed:
return SPEED_LOWEST
if SPEED_LOW == current_wink_speed:
return SPEED_LOW
if SPEED_MEDIUM == current_wink_speed:
return SPEED_MEDIUM
if SPEED_HIGH == current_wink_speed:
return SPEED_HIGH
return None
@property
def current_direction(self):
"""Return direction of the fan [forward, reverse]."""
return self.wink.current_fan_direction()
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
wink_supported_speeds = self.wink.fan_speeds()
supported_speeds = []
if SPEED_AUTO in wink_supported_speeds:
supported_speeds.append(SPEED_AUTO)
if SPEED_LOWEST in wink_supported_speeds:
supported_speeds.append(SPEED_LOWEST)
if SPEED_LOW in wink_supported_speeds:
supported_speeds.append(SPEED_LOW)
if SPEED_MEDIUM in wink_supported_speeds:
supported_speeds.append(SPEED_MEDIUM)
if SPEED_HIGH in wink_supported_speeds:
supported_speeds.append(SPEED_HIGH)
return supported_speeds
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORTED_FEATURES
| apache-2.0 |
JSeam2/IsoGraph | genetic/genetic_algo.py | 1 | 10666 | """
Implementation referenced from
https://github.com/handcraftsman/GeneticAlgorithmsWithPython/blob/master/ch02/genetic.py
"""
import random
from qutip import *
import numpy as np
import pandas as pd
from functools import reduce
import datetime
import time
import pickle
import copy
# QUTIP NOTES
# hadamard = "SNOT"
# CZ = "CSIGN"
# RZ = "RZ"
def make_circuit(theta_val, save_image = False):
"""
Input theta values to create quantum circuit
[layer 1], [layer 2]. so on
the length of each layer should equal length of
input state
The theta list will act as our genome
theta_val: 2D numpy array
return 2D numpy array of matrices
"""
qc = QubitCircuit(N = len(theta_val[0]))
for i in range(len(theta_val)):
# ADD H gates
qc.add_1q_gate("SNOT", start = 0, end = qc.N)
# add RZ theta gates
for k in range(len(theta_val[0])):
qc.add_1q_gate("RZ", start = k, end = k + 1,
arg_value = theta_val[i][k],
arg_label = theta_val[i][k])
for k in range(len(theta_val[0]) - 1):
qc.add_gate("CSIGN",
targets = [k],
controls = [k+1])
# add a hadamard at the end
qc.add_1q_gate("SNOT", start = 0, end = qc.N)
# produce image
if save_image:
qc.png
return reduce(lambda x, y: x * y, qc.propagators())
def generate_initial_population(N, data, population_size = 20, depth = 5):
"""
population size is the number of individuals in the population
N refers to the number of nodes
depth refers to then number of layers
population_size: int
N: int
"""
genes = []
while len(genes) < population_size:
# we add a +1 to the circuit as use a |0> qubit for measurement
genes.append(np.random.uniform(-np.pi, np.pi, [depth, N*2 + 1]))
fitness, acc = get_fitness(genes, data)
return PopulationPool(genes, fitness, acc)
def generate_children(parent, data, take_best,
population_size = 20,
mutation_rate = 0.05):
"""
produce children with mutations
parent: PopulationPool class
mutation_rate: float probability that value will be mutated
crossover_rate: float probability that genes will cross with another gene
"""
child_genes = []
best_parent_genes = parent.genes[:take_best]
while len(child_genes) < population_size:
# randomly pick a parent
parentA_gene = random.choice(best_parent_genes)
# randomly pick another parent
parentB_gene = random.choice(best_parent_genes)
# crossover the gene at a random point
rand_point = random.randint(0, parentA_gene.shape[0])
if random.random() <= mutation_rate:
# Crossover
if parentB_gene.shape[0] < rand_point:
child_gene = np.vstack((parentA_gene[0:rand_point],
parentB_gene[rand_point, parentB_gene.shape[0]]))
else :
child_gene = parentA_gene
# randomly change values in the array
mask = np.random.randint(0,2,size=child_gene.shape).astype(np.bool)
r = np.random.uniform(-np.pi, np.pi, size = child_gene.shape)
child_gene[mask] = r[mask]
else:
child_gene = parentA_gene
child_genes.append(child_gene)
fitness, acc = get_fitness(child_genes, data)
return PopulationPool(child_genes, fitness, acc)
def evaluate(input_str, circuit):
"""
Evaluate input sequence of bits
Include an additional ancilla qubit in input
for measurement
"""
pass
def get_fitness(genes, data):
"""
Pass in gene and run through the various isomorphic graphs
gene: list of np array
data: panda dataframe from pkl file
returns list of fitness
"""
# total number of samples
num_sample = data.shape[0]
# select upper diagonal ignoring zeros in the middle
size = data["G1"][0].shape[0]
upper = np.triu_indices(size, 1)
# create projector we project to 0 standard basis
projector = basis(2,0) * basis(2,0).dag()
for i in range(size * 2):
projector = tensor(projector, identity(2))
fitness_list = []
acc_list = []
for gene in genes:
loss = 0
correct = 0
# make circuit using the genes
circuit = make_circuit(gene, False)
for index, row in data.iterrows():
#if index % 2500 == 0:
# print("running {}".format(index))
# add a |0> to the last qubit as we will use
# it for measurements
combined = row["G1"][upper].tolist()[0] + \
row["G2"][upper].tolist()[0]
combined.append("0")
int_comb = [int(i) for i in combined]
inputval = bra(int_comb)
result = inputval * circuit
density = result.dag() * result
# We will use the logisitc regression loss function
# as we are dealing with a classification problem
# compare this expectation with result
# expectation here refers to the likelihood of getting 0
expectation = expect(projector, density)
actual = row["is_iso"]
loss += -1 * actual * np.log(1 - expectation) \
- (1 - actual) * np.log(expectation)
if expectation <= 0.50:
# this is 1
prediction = 1
else:
prediction = 0
if prediction == actual:
correct += 1
ave_loss = loss/num_sample
fitness_list.append(ave_loss)
accuracy = correct/num_sample
acc_list.append(accuracy)
return fitness_list, acc_list
def get_best(N, data, num_epoch = 10,
population_size = 20,
take_best = 5,
depth = 5,
mutation_rate = 0.05):
"""
N refers to the number of nodes
Population size refers to the number of individuals in the population
Take_best refers to the number of top individuals we take
depth refers to how deep the quantum circuit should go
mutation_rate refers to the probability of children mutating
"""
assert take_best >= 2
assert population_size >= 2
assert take_best <= population_size
def display(pool):
print("Time: {} \t Best Score: {} \t Best Acc: {}".format(datetime.datetime.now(),
pool.fitness[0],
pool.accuracy[0]))
parent = generate_initial_population(N, data, population_size, depth)
parent.sort()
print("Seed Population")
display(parent)
# take best
for i in range(num_epoch):
child = generate_children(parent, data, take_best, population_size,
mutation_rate)
child.sort()
print()
print("Child")
print("Epoch {}".format(i))
display(child)
# if the parent best fitness is greater than child.fitness get the
# let the child be the parent to get next generation
if parent.fitness[0] > child.fitness[0]:
parent = copy.deepcopy(child)
print("Parent is now the child, New Parent:")
display(parent)
else:
print("Parent retained, Current Parent:")
display(parent)
return parent.genes
class PopulationPool:
def __init__(self, genes, fitness, accuracy):
"""
genes : list of genes
fitness : list of fitness
accuracy : list of accuracy
"""
self.genes = genes
self.fitness = fitness
self.accuracy = accuracy
def sort(self):
"""
returns list of genes sorted by fitness in increasing order
"""
self.genes = [x for _,x in sorted(zip(self.fitness, self.genes))]
self.accuracy = [x for _,x in sorted(zip(self.fitness, self.accuracy))]
if __name__ == "__main__":
print("Start Program")
df = pd.read_pickle("3_node_10000.pkl")
print("Depth = 10")
out_genes = get_best(N=3,
data = df,
num_epoch = 50,
population_size = 20,
take_best = 5,
depth = 10,
mutation_rate = 0.05)
with open("save1.pkl", "wb") as f:
pickle.dump(out_genes,f)
print("==========================")
print("Depth = 15")
out_genes = get_best(N=3,
data = df,
num_epoch = 50,
population_size = 20,
take_best = 5,
depth = 15,
mutation_rate = 0.05)
with open("save2.pkl", "wb") as f:
pickle.dump(out_genes,f)
print("==========================")
print("Depth = 20")
out_genes = get_best(N=3,
data = df,
num_epoch = 50,
population_size = 20,
take_best = 5,
depth = 20,
mutation_rate = 0.05)
with open("save3.pkl", "wb") as f:
pickle.dump(out_genes,f)
print("==========================")
print("Depth = 25")
out_genes = get_best(N=3,
data = df,
num_epoch = 50,
population_size = 20,
take_best = 5,
depth = 25,
mutation_rate = 0.05)
with open("save4.pkl", "wb") as f:
pickle.dump(out_genes,f)
print("==========================")
print("Depth = 30")
out_genes = get_best(N=3,
data = df,
num_epoch = 50,
population_size = 20,
take_best = 5,
depth = 30,
mutation_rate = 0.05)
with open("save5.pkl", "wb") as f:
pickle.dump(out_genes,f)
# to open
#with open("save.pkl", "rb") as f:
# save_genes = pickle.load(f)
# total number of samples
#num_sample = df.shape[0]
## select upper diagonal ignoring zeros in the middle
#size = df["G1"][0].shape[0]
#upper = np.triu_indices(size, 1)
## create projector we project to 0 standard basis
#projector = basis(2,0) * basis(2,0).dag()
#for i in range((size * 2) - 1):
# projector = tensor(projector, identity(2))
#fitness_list = []
#acc_list = []
#parent = generate_initial_population(3, df, 2, 3)
#for gene in parent:
# loss = 0
# correct = 0
# # make circuit using the genes
# circuit = make_circuit(gene, True)
# break
| mit |
nathangeffen/tbonline-old | tbonlineproject/external/south/db/postgresql_psycopg2.py | 21 | 2609 |
from django.db import connection, models
from south.db import generic
class DatabaseOperations(generic.DatabaseOperations):
"""
PsycoPG2 implementation of database operations.
"""
backend_name = "postgres"
def rename_column(self, table_name, old, new):
if old == new:
# Short-circuit out
return []
self.execute('ALTER TABLE %s RENAME COLUMN %s TO %s;' % (
self.quote_name(table_name),
self.quote_name(old),
self.quote_name(new),
))
def rename_table(self, old_table_name, table_name):
"will rename the table and an associated ID sequence and primary key index"
# First, rename the table
generic.DatabaseOperations.rename_table(self, old_table_name, table_name)
# Then, try renaming the ID sequence
# (if you're using other AutoFields... your problem, unfortunately)
self.commit_transaction()
self.start_transaction()
try:
generic.DatabaseOperations.rename_table(self, old_table_name+"_id_seq", table_name+"_id_seq")
except:
if self.debug:
print " ~ No such sequence (ignoring error)"
self.rollback_transaction()
else:
self.commit_transaction()
self.start_transaction()
# Rename primary key index, will not rename other indices on
# the table that are used by django (e.g. foreign keys). Until
# figure out how, you need to do this yourself.
try:
generic.DatabaseOperations.rename_table(self, old_table_name+"_pkey", table_name+ "_pkey")
except:
if self.debug:
print " ~ No such primary key (ignoring error)"
self.rollback_transaction()
else:
self.commit_transaction()
self.start_transaction()
def rename_index(self, old_index_name, index_name):
"Rename an index individually"
generic.DatabaseOperations.rename_table(self, old_index_name, index_name)
def _db_type_for_alter_column(self, field):
"""
Returns a field's type suitable for ALTER COLUMN.
Strips CHECKs from PositiveSmallIntegerField) and PositiveIntegerField
@param field: The field to generate type for
"""
super_result = super(DatabaseOperations, self)._db_type_for_alter_column(field)
if isinstance(field, models.PositiveSmallIntegerField) or isinstance(field, models.PositiveIntegerField):
return super_result.split(" ")[0]
return super_result
| mit |
philippeowagner/django-timesheet | src/timesheet/views.py | 2 | 1599 | import datetime, time
from django.http import Http404
from django.db.models import Sum
from django.template import RequestContext
from django.contrib.auth.models import User
from django.shortcuts import render_to_response
from timesheet.models import Timesheet
def week_archive(request, username, year, week,
template_name='timesheet/week_archive.html'):
"""Week archive page of timesheet.
:param template_name: Add a custom template.
"""
try:
week_begining = datetime.date(*time.strptime(year + '-0-' + week,
'%Y-%w-%U')[:3])
week_ending = week_begining + datetime.timedelta(days=7)
except ValueError:
raise Http404
try:
user = User.objects.get(username__iexact=username)
except User.DoesNotExist:
raise Http404
queryset = Timesheet.objects.filter(person=user, date__gte=week_begining,
date__lt=week_ending).order_by('date', 'time')
timesheet = []
date = week_begining
while date <= week_ending:
date_queryset = queryset.filter(date=date)
timesheet += [{
'date': date,
'jobs': date_queryset,
'hours': date_queryset.aggregate(Sum('hours'))['hours__sum'],
'pceo': date_queryset.aggregate(Sum('pceo'))['pceo__sum'],
},]
date = date + datetime.timedelta(days=1)
week_hours = queryset.aggregate(Sum('hours'))['hours__sum']
week_pceo = queryset.aggregate(Sum('pceo'))['pceo__sum']
context = {
'timesheet': timesheet,
'week_begining': week_begining,
'week_ending': week_ending,
'week_hours': week_hours,
'week_pceo': week_pceo,
}
return render_to_response(template_name, context,
context_instance=RequestContext(request))
| bsd-3-clause |
rackerlabs/base-image-blueprints | debian_9_pvhvm_heat.py | 1 | 5404 | #!/usr/bin/env python3
# 6/27/2019
# Author: Kevin McJunkin
# Use away if this is somehow relevant to ya
import os
import subprocess
import shutil
import stat
# Install required packages via yum
def install_packages():
package_list = ['python3-pip',
'gcc',
'git',
'python3-dev',
'libyaml-dev',
'libssl-dev',
'libffi-dev',
'libxml2-dev',
'libxslt-dev',
'puppet']
print('Installing packages')
did_package = False
try:
os.system('apt-get update')
print('Installing packages')
os.system('apt-get install -y {}'.format(" ".join(package_list)))
print('Successful\n')
did_package = True
except Exception:
print('Unsuccessful')
exit(1)
return did_package
# Install required packages via pip
def pip_down():
print('\nInstalling OpenStack HEAT requirements via pip')
os_list = ['os-collect-config',
'os-apply-config',
'os-refresh-config',
'dib-utils',
'gitpython']
try:
print('Installing decorator')
os.system('pip3 install -U decorator')
print('Installing ansible')
os.system('pip3 install ansible==2.4.3.0')
print('Installing ansible success')
os.system('pip3 install {}'.format(" ".join(os_list)))
print('Successful')
did_pip = True
except Exception as e:
print('Pip Install Unsuccessful {}'.format(e))
exit(1)
return did_pip
# Remove git repo if it exist (should never come up but might as well)
# Clone git repo that has all our configuration files
def git_configuration():
did_git = False
try:
import git
try:
shutil.rmtree('hotstrap/')
except OSError:
pass
print('\nCloning down configuration files')
git.Git('./').clone('git://github.com/rockymccamey/hotstrap.git')
did_git = True
except Exception as e:
print('Git configuration failure {}'.format(e))
exit(1)
return did_git
# Move configuration files to the proper location on the OS
# ...and use a really ghetto create directory for the move
# chmod files properly
def configurate():
file_list = [
'opt/stack/os-config-refresh/configure.d/20-os-apply-config',
'opt/stack/os-config-refresh/configure.d/55-heat-config',
'usr/bin/heat-config-notify',
'var/lib/heat-config/hooks/ansible',
'var/lib/heat-config/hooks/script',
'var/lib/heat-config/hooks/puppet',
'etc/os-collect-config.conf',
'usr/libexec/os-apply-config/templates/var/run/heat-config/heat-config', # noqa: E501
'usr/libexec/os-apply-config/templates/etc/os-collect-config.conf'
]
print('Moving configuration files to the proper locations\n\n')
did_configure = False
try:
for file in file_list:
directory = os.path.dirname('/' + file)
if not os.path.exists(directory):
os.makedirs(directory)
print('hotstrap/' + file + '\t->\t' + '/' + file)
shutil.move('hotstrap/' + file, '/' + file)
for i in range(3):
os.chmod(
'/' + file_list[i],
stat.S_IRUSR + stat.S_IWUSR + stat.S_IXUSR)
for i in range(3, 6):
os.chmod(
'/' + file_list[i],
stat.S_IRUSR + stat.S_IWUSR + stat.S_IXUSR + stat.S_IRGRP +
stat.S_IXGRP + stat.S_IROTH + stat.S_IXOTH)
did_configure = True
except Exception:
print('Configurate failure')
exit(1)
return did_configure
# Run os-collect to propagate the config & run it again
# Then run start_config to create/enable the os-collect service
# Also clean up the git repo cause it is dead to us
def jiggle_some_things():
try:
print('\nRunning os-collect-config & ensuring os-collect-config-exist')
os.system('os-collect-config --one-time --debug')
os.system('cat /etc/os-collect-config.conf')
os.system('os-collect-config --one-time --debug')
print('\nEnsuring everything is running & enabled on boot')
subprocess.call('hotstrap/start_config_agent.sh')
print('\nCleaning up git folder')
shutil.rmtree('hotstrap/')
did_jiggle = True
except Exception:
print('Jiggle failure')
exit(1)
return did_jiggle
# Ensure we don't get rekt by cloud-init next boot
def delete_some_other_things():
try:
print('Ensuring no cloud-init references exist')
os.system('rm -rf /var/lib/cloud/instance')
os.system('rm -rf /var/lib/cloud/instances/*')
os.system('rm -rf /var/lib/cloud/data/*')
os.system('rm -rf /var/lib/cloud/sem/config_scripts_per_once.once')
os.system('rm -rf /var/log/cloud-init.log')
os.system('rm -rf /var/log/cloud-init-output.log')
print('\n\n\nDone!')
did_delete = True
except Exception:
print('Delete failure')
exit(1)
return did_delete
did_package = install_packages()
did_pip = pip_down()
did_git = git_configuration()
did_configure = configurate()
did_jiggle = jiggle_some_things()
did_delete = delete_some_other_things()
| apache-2.0 |
timoschwarzer/blendworks | BlendWorks Server/python/Lib/lib2to3/pygram.py | 170 | 1114 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Export the Python grammar and symbols."""
# Python imports
import os
# Local imports
from .pgen2 import token
from .pgen2 import driver
from . import pytree
# The grammar file
_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt")
_PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__),
"PatternGrammar.txt")
class Symbols(object):
def __init__(self, grammar):
"""Initializer.
Creates an attribute for each grammar symbol (nonterminal),
whose value is the symbol's type (an int >= 256).
"""
for name, symbol in grammar.symbol2number.items():
setattr(self, name, symbol)
python_grammar = driver.load_grammar(_GRAMMAR_FILE)
python_symbols = Symbols(python_grammar)
python_grammar_no_print_statement = python_grammar.copy()
del python_grammar_no_print_statement.keywords["print"]
pattern_grammar = driver.load_grammar(_PATTERN_GRAMMAR_FILE)
pattern_symbols = Symbols(pattern_grammar)
| gpl-2.0 |
olevinsky/django-guardian | guardian/utils.py | 10 | 4610 | """
django-guardian helper functions.
Functions defined within this module should be considered as django-guardian's
internal functionality. They are **not** guaranteed to be stable - which means
they actual input parameters/output type may change in future releases.
"""
import os
import logging
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.models import User, AnonymousUser, Group
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseForbidden, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext, TemplateDoesNotExist
from django.utils.http import urlquote
from guardian.conf import settings as guardian_settings
from guardian.exceptions import NotUserNorGroup
from itertools import chain
logger = logging.getLogger(__name__)
abspath = lambda *p: os.path.abspath(os.path.join(*p))
def get_anonymous_user():
"""
Returns ``User`` instance (not ``AnonymousUser``) depending on
``ANONYMOUS_USER_ID`` configuration.
"""
return User.objects.get(id=guardian_settings.ANONYMOUS_USER_ID)
def get_identity(identity):
"""
Returns (user_obj, None) or (None, group_obj) tuple depending on what is
given. Also accepts AnonymousUser instance but would return ``User``
instead - it is convenient and needed for authorization backend to support
anonymous users.
:param identity: either ``User`` or ``Group`` instance
:raises ``NotUserNorGroup``: if cannot return proper identity instance
**Examples**::
>>> user = User.objects.create(username='joe')
>>> get_identity(user)
(<User: joe>, None)
>>> group = Group.objects.create(name='users')
>>> get_identity(group)
(None, <Group: users>)
>>> anon = AnonymousUser()
>>> get_identity(anon)
(<User: AnonymousUser>, None)
>>> get_identity("not instance")
...
NotUserNorGroup: User/AnonymousUser or Group instance is required (got )
"""
if isinstance(identity, AnonymousUser):
identity = get_anonymous_user()
if isinstance(identity, User):
return identity, None
elif isinstance(identity, Group):
return None, identity
raise NotUserNorGroup("User/AnonymousUser or Group instance is required "
"(got %s)" % identity)
def get_403_or_None(request, perms, obj=None, login_url=None,
redirect_field_name=None, return_403=False, accept_global_perms=False):
login_url = login_url or settings.LOGIN_URL
redirect_field_name = redirect_field_name or REDIRECT_FIELD_NAME
# Handles both original and with object provided permission check
# as ``obj`` defaults to None
has_permissions = False
# global perms check first (if accept_global_perms)
if accept_global_perms:
has_permissions = all(request.user.has_perm(perm) for perm in perms)
# if still no permission granted, try obj perms
if not has_permissions:
has_permissions = all(request.user.has_perm(perm, obj) for perm in perms)
if not has_permissions:
if return_403:
if guardian_settings.RENDER_403:
try:
response = render_to_response(
guardian_settings.TEMPLATE_403, {},
RequestContext(request))
response.status_code = 403
return response
except TemplateDoesNotExist, e:
if settings.DEBUG:
raise e
elif guardian_settings.RAISE_403:
raise PermissionDenied
return HttpResponseForbidden()
else:
path = urlquote(request.get_full_path())
tup = login_url, redirect_field_name, path
return HttpResponseRedirect("%s?%s=%s" % tup)
def clean_orphan_obj_perms():
"""
Seeks and removes all object permissions entries pointing at non-existing
targets.
Returns number of removed objects.
"""
from guardian.models import UserObjectPermission
from guardian.models import GroupObjectPermission
deleted = 0
# TODO: optimise
for perm in chain(UserObjectPermission.objects.all(),
GroupObjectPermission.objects.all()):
if perm.content_object is None:
logger.debug("Removing %s (pk=%d)" % (perm, perm.pk))
perm.delete()
deleted += 1
logger.info("Total removed orphan object permissions instances: %d" %
deleted)
return deleted
| bsd-2-clause |
supriyasingh01/github_basics | Internetworking Distributed Project/finalProject/ovs/pox-master/tests/unit/lib/epoll_select_test.py | 7 | 2478 | #!/usr/bin/env python
import unittest
import sys
import os.path
import SocketServer
import threading
import socket
import signal
from copy import copy
sys.path.append(os.path.dirname(__file__) + "/../../..")
from pox.lib.epoll_select import EpollSelect
class TCPEcho(SocketServer.StreamRequestHandler):
def handle(self):
data = self.rfile.readline()
print "got data: %s" % data
self.wfile.write(data)
class ForkingTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
def start(self):
self.pid = os.fork()
if self.pid == 0:
# child
self.serve_forever()
def stop(self):
os.kill(self.pid, signal.SIGKILL)
def sort_fdlists(rl,wl,xl) :
key = lambda(x): x.fileno() if hasattr(x, "fileno") else x
return (
sorted(rl, key=key),
sorted(wl, key=key),
sorted(xl, key=key)
)
class EpollSelectTest(unittest.TestCase):
def setUp(self):
self.es = EpollSelect()
self.server = ForkingTCPServer(("localhost", 0), TCPEcho)
self.ip, self.port = self.server.server_address
self.server.start()
def tearDown(self):
self.es.close()
self.server.stop()
def test_create(self):
pass
def test_read_one_socket(self):
c = socket.create_connection( (self.ip, self.port))
ret = self.es.select([c], [], [c], 0.1)
self.assertEqual(([],[],[]), ret)
# socket is ready to send?
ret = self.es.select([c], [c], [c], 0.1)
self.assertEqual(([],[c],[]), ret)
# send stuff
c.send("Hallo\n")
# now we have something to read, right?
ret = self.es.select([c], [], [c], 0.5)
self.assertEqual(([c],[],[]), ret)
def test_write_more_sockets(self):
c1 = socket.create_connection( (self.ip, self.port))
c2 = socket.create_connection( (self.ip, self.port))
c3 = socket.create_connection( (self.ip, self.port))
# note don't throw away the socket -- else it will be garbage collected
raw = c3.fileno()
seq = [ [c1], [c2], [c1,c2], [c1,c2, raw], [c1], [raw]]
check = lambda a,b: self.assertEqual(sort_fdlists(*a), sort_fdlists(*b))
#just the writes
for sockets in seq:
check(([],sockets,[]),self.es.select(sockets, sockets, sockets, 0))
# writes and reads in different order
for sockets in seq:
check( ([],[],[]), self.es.select(sockets, [], sockets, 0))
check( ([],sockets,[]), self.es.select(sockets, sockets, sockets, 0))
if __name__ == '__main__':
unittest.main()
| cc0-1.0 |
stevenmizuno/QGIS | python/plugins/processing/algs/qgis/QgisAlgorithmProvider.py | 3 | 12156 | # -*- coding: utf-8 -*-
"""
***************************************************************************
QgisAlgorithmProvider.py
---------------------
Date : December 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'December 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
try:
import plotly # NOQA
hasPlotly = True
except:
hasPlotly = False
from qgis.core import (QgsApplication,
QgsProcessingProvider)
from processing.script import ScriptUtils
from .QgisAlgorithm import QgisAlgorithm
from .AddTableField import AddTableField
from .Aggregate import Aggregate
from .Aspect import Aspect
from .BasicStatistics import BasicStatisticsForField
from .CheckValidity import CheckValidity
from .ConcaveHull import ConcaveHull
from .CreateAttributeIndex import CreateAttributeIndex
from .CreateConstantRaster import CreateConstantRaster
from .Datasources2Vrt import Datasources2Vrt
from .DefineProjection import DefineProjection
from .Delaunay import Delaunay
from .DeleteColumn import DeleteColumn
from .DeleteDuplicateGeometries import DeleteDuplicateGeometries
from .DeleteHoles import DeleteHoles
from .DensifyGeometries import DensifyGeometries
from .DensifyGeometriesInterval import DensifyGeometriesInterval
from .Difference import Difference
from .EliminateSelection import EliminateSelection
from .ExecuteSQL import ExecuteSQL
from .Explode import Explode
from .ExportGeometryInfo import ExportGeometryInfo
from .ExtendLines import ExtendLines
from .ExtentFromLayer import ExtentFromLayer
from .ExtractSpecificVertices import ExtractSpecificVertices
from .FieldPyculator import FieldsPyculator
from .FieldsCalculator import FieldsCalculator
from .FieldsMapper import FieldsMapper
from .FindProjection import FindProjection
from .GeometryConvert import GeometryConvert
from .GeometryByExpression import GeometryByExpression
from .Grid import Grid
from .Heatmap import Heatmap
from .Hillshade import Hillshade
from .HubDistanceLines import HubDistanceLines
from .HubDistancePoints import HubDistancePoints
from .HypsometricCurves import HypsometricCurves
from .IdwInterpolation import IdwInterpolation
from .ImportIntoPostGIS import ImportIntoPostGIS
from .ImportIntoSpatialite import ImportIntoSpatialite
from .Intersection import Intersection
from .KeepNBiggestParts import KeepNBiggestParts
from .LinesToPolygons import LinesToPolygons
from .MinimumBoundingGeometry import MinimumBoundingGeometry
from .NearestNeighbourAnalysis import NearestNeighbourAnalysis
from .OffsetLine import OffsetLine
from .Orthogonalize import Orthogonalize
from .PointDistance import PointDistance
from .PointOnSurface import PointOnSurface
from .PointsAlongGeometry import PointsAlongGeometry
from .PointsDisplacement import PointsDisplacement
from .PointsFromLines import PointsFromLines
from .PointsFromPolygons import PointsFromPolygons
from .PointsInPolygon import PointsInPolygon
from .PointsLayerFromTable import PointsLayerFromTable
from .PointsToPaths import PointsToPaths
from .PoleOfInaccessibility import PoleOfInaccessibility
from .Polygonize import Polygonize
from .PolygonsToLines import PolygonsToLines
from .PostGISExecuteSQL import PostGISExecuteSQL
from .RandomExtract import RandomExtract
from .RandomExtractWithinSubsets import RandomExtractWithinSubsets
from .RandomPointsAlongLines import RandomPointsAlongLines
from .RandomPointsExtent import RandomPointsExtent
from .RandomPointsLayer import RandomPointsLayer
from .RandomPointsPolygons import RandomPointsPolygons
from .RandomSelection import RandomSelection
from .RandomSelectionWithinSubsets import RandomSelectionWithinSubsets
from .Rasterize import RasterizeAlgorithm
from .RasterCalculator import RasterCalculator
from .RasterLayerStatistics import RasterLayerStatistics
from .RectanglesOvalsDiamondsFixed import RectanglesOvalsDiamondsFixed
from .RectanglesOvalsDiamondsVariable import RectanglesOvalsDiamondsVariable
from .RegularPoints import RegularPoints
from .Relief import Relief
from .ReverseLineDirection import ReverseLineDirection
from .Ruggedness import Ruggedness
from .SelectByAttribute import SelectByAttribute
from .SelectByExpression import SelectByExpression
from .ServiceAreaFromLayer import ServiceAreaFromLayer
from .ServiceAreaFromPoint import ServiceAreaFromPoint
from .SetMValue import SetMValue
from .SetRasterStyle import SetRasterStyle
from .SetVectorStyle import SetVectorStyle
from .SetZValue import SetZValue
from .ShortestPathLayerToPoint import ShortestPathLayerToPoint
from .ShortestPathPointToLayer import ShortestPathPointToLayer
from .ShortestPathPointToPoint import ShortestPathPointToPoint
from .SingleSidedBuffer import SingleSidedBuffer
from .Slope import Slope
from .SnapGeometries import SnapGeometriesToLayer
from .SpatialiteExecuteSQL import SpatialiteExecuteSQL
from .SpatialIndex import SpatialIndex
from .SpatialJoin import SpatialJoin
from .SpatialJoinSummary import SpatialJoinSummary
from .StatisticsByCategories import StatisticsByCategories
from .SumLines import SumLines
from .SymmetricalDifference import SymmetricalDifference
from .TextToFloat import TextToFloat
from .TinInterpolation import TinInterpolation
from .TopoColors import TopoColor
from .TruncateTable import TruncateTable
from .Union import Union
from .UniqueValues import UniqueValues
from .VariableDistanceBuffer import VariableDistanceBuffer
from .VectorSplit import VectorSplit
from .VoronoiPolygons import VoronoiPolygons
from .ZonalStatistics import ZonalStatistics
pluginPath = os.path.normpath(os.path.join(
os.path.split(os.path.dirname(__file__))[0], os.pardir))
class QgisAlgorithmProvider(QgsProcessingProvider):
def __init__(self):
super().__init__()
self.algs = []
self.externalAlgs = []
def getAlgs(self):
algs = [AddTableField(),
Aggregate(),
Aspect(),
BasicStatisticsForField(),
CheckValidity(),
ConcaveHull(),
CreateAttributeIndex(),
CreateConstantRaster(),
Datasources2Vrt(),
DefineProjection(),
Delaunay(),
DeleteColumn(),
DeleteDuplicateGeometries(),
DeleteHoles(),
DensifyGeometries(),
DensifyGeometriesInterval(),
Difference(),
EliminateSelection(),
ExecuteSQL(),
Explode(),
ExportGeometryInfo(),
ExtendLines(),
ExtentFromLayer(),
ExtractSpecificVertices(),
FieldsCalculator(),
FieldsMapper(),
FieldsPyculator(),
FindProjection(),
GeometryByExpression(),
GeometryConvert(),
Grid(),
Heatmap(),
Hillshade(),
HubDistanceLines(),
HubDistancePoints(),
HypsometricCurves(),
IdwInterpolation(),
ImportIntoPostGIS(),
ImportIntoSpatialite(),
Intersection(),
KeepNBiggestParts(),
LinesToPolygons(),
MinimumBoundingGeometry(),
NearestNeighbourAnalysis(),
OffsetLine(),
Orthogonalize(),
PointDistance(),
PointOnSurface(),
PointsAlongGeometry(),
PointsDisplacement(),
PointsFromLines(),
PointsFromPolygons(),
PointsInPolygon(),
PointsLayerFromTable(),
PointsToPaths(),
PoleOfInaccessibility(),
Polygonize(),
PolygonsToLines(),
PostGISExecuteSQL(),
RandomExtract(),
RandomExtractWithinSubsets(),
RandomPointsAlongLines(),
RandomPointsExtent(),
RandomPointsLayer(),
RandomPointsPolygons(),
RandomSelection(),
RandomSelectionWithinSubsets(),
RasterCalculator(),
RasterizeAlgorithm(),
RasterLayerStatistics(),
RectanglesOvalsDiamondsFixed(),
RectanglesOvalsDiamondsVariable(),
RegularPoints(),
Relief(),
ReverseLineDirection(),
Ruggedness(),
SelectByAttribute(),
SelectByExpression(),
ServiceAreaFromLayer(),
ServiceAreaFromPoint(),
SetMValue(),
SetRasterStyle(),
SetVectorStyle(),
SetZValue(),
ShortestPathLayerToPoint(),
ShortestPathPointToLayer(),
ShortestPathPointToPoint(),
SingleSidedBuffer(),
Slope(),
SnapGeometriesToLayer(),
SpatialiteExecuteSQL(),
SpatialIndex(),
SpatialJoin(),
SpatialJoinSummary(),
StatisticsByCategories(),
SumLines(),
SymmetricalDifference(),
TextToFloat(),
TinInterpolation(),
TopoColor(),
TruncateTable(),
Union(),
UniqueValues(),
VariableDistanceBuffer(),
VectorSplit(),
VoronoiPolygons(),
ZonalStatistics()
]
if hasPlotly:
from .BarPlot import BarPlot
from .BoxPlot import BoxPlot
from .MeanAndStdDevPlot import MeanAndStdDevPlot
from .PolarPlot import PolarPlot
from .RasterLayerHistogram import RasterLayerHistogram
from .VectorLayerHistogram import VectorLayerHistogram
from .VectorLayerScatterplot import VectorLayerScatterplot
from .VectorLayerScatterplot3D import VectorLayerScatterplot3D
algs.extend([BarPlot(),
BoxPlot(),
MeanAndStdDevPlot(),
PolarPlot(),
RasterLayerHistogram(),
VectorLayerHistogram(),
VectorLayerScatterplot(),
VectorLayerScatterplot3D()])
# to store algs added by 3rd party plugins as scripts
#folder = os.path.join(os.path.dirname(__file__), 'scripts')
#scripts = ScriptUtils.loadFromFolder(folder)
#for script in scripts:
# script.allowEdit = False
#algs.extend(scripts)
return algs
def id(self):
return 'qgis'
def name(self):
return 'QGIS'
def icon(self):
return QgsApplication.getThemeIcon("/providerQgis.svg")
def svgIconPath(self):
return QgsApplication.iconPath("providerQgis.svg")
def loadAlgorithms(self):
self.algs = self.getAlgs()
for a in self.algs:
self.addAlgorithm(a)
for a in self.externalAlgs:
self.addAlgorithm(a)
def supportsNonFileBasedOutput(self):
return True
| gpl-2.0 |
joelpmichael/peer2backup | libs/key.py | 1 | 10615 | # -*- coding: utf_8 -*-
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
# Crypto Key Database
import os
import sqlite3
import shutil
import uuid
import random
import base64
import Crypto.PublicKey.RSA
import Crypto.Random.random
import Crypto.Cipher.PKCS1_OAEP
import Crypto.Hash.SHA256
# increment keydb_version on DB schema changes
keydb_version = 2017090101
# main key db
class KeyDb:
def __init__(self,dbpath):
self._dbpath = dbpath
# create new DB if it doesn't exist
if not os.path.isfile(self._dbpath):
db_create(self._dbpath)
# connect to db
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
# enable cell size checking
c.execute('PRAGMA cell_size_check = 1')
# optimize and quick-check on open
c.execute('PRAGMA quick_check')
check_result = c.fetchone()[0]
if check_result != 'ok':
raise ValueError("DB Check failed: " + check_result)
c.execute('PRAGMA optimize')
# check current db version against code version
# perform upgrade if necessary
c.execute('PRAGMA user_version')
current_db_version = c.fetchone()[0]
conn.close()
if current_db_version < keydb_version:
self._Upgrade(current_db_version)
def New(self,parent_key_id=None,bits=2048,password=None,expiry='+2 years'):
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
new_uuid = str(uuid.uuid4())
key_priv = Crypto.PublicKey.RSA.generate(bits)
key_pub = key_priv.publickey()
store_password = None
if parent_key_id:
store_password = base64.standard_b64encode(self.Encrypt(parent_key_id,password))
c.execute('DELETE FROM pubkey WHERE key_id=?', (new_uuid,))
c.execute('DELETE FROM privkey WHERE key_id=?', (new_uuid,))
c.execute('INSERT INTO pubkey (key_id, key_expiry, key) \
VALUES (?, datetime(\'now\', ?), ?)',
(new_uuid, expiry, key_pub.exportKey(),)
)
c.execute('INSERT INTO privkey (key_id, key_unlock_key_id, key_unlock_password, key) \
VALUES (?, ?, ?, ?)',
(new_uuid, parent_key_id, store_password, key_priv.exportKey(passphrase=password),)
)
conn.close()
return new_uuid
def Del(self,key_id):
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
conn.close()
def Check(self,key_id):
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
conn.close()
def HttpImport(self,data):
pass
def HttpExport(self,data):
pass
def ImportPubkey(self,key_id,key):
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
conn.close()
def ExportPubkey(self,key_id):
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
conn.close()
def Encrypt(self,key_id,data):
# RSA PubKey Encryption of data
# fetch public key
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
c.execute('SELECT key FROM pubkey WHERE key_id = ? AND key_expiry > datetime(\'now\')', (key_id,))
row = c.fetchone()
key_pub = None
if not row:
raise ValueError("Key not found in database")
# create RSA key object
key_pub = Crypto.PublicKey.RSA.importKey(row[0])
# RSA encryption
cipher = Crypto.Cipher.PKCS1_OAEP.new(key_pub, hashAlgo=Crypto.Hash.SHA256)
message = cipher.encrypt(data.encode('utf-8'))
conn.close()
return message
def Decrypt(self,key_id,password,data):
# RSA PubKey Decryption of data
# fetch public key
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
c.execute('SELECT key FROM privkey WHERE key_id = ?', (key_id,))
row = c.fetchone()
key_priv = None
if not row:
raise ValueError("Key not found in database")
# create RSA key object
key = row[0]
key_priv = Crypto.PublicKey.RSA.importKey(key,passphrase=password)
if not key_priv:
raise ValueError("Key could not be loaded, bad password?")
# RSA encryption
cipher = Crypto.Cipher.PKCS1_OAEP.new(key_priv, hashAlgo=Crypto.Hash.SHA256)
message = cipher.decrypt(data)
conn.close()
return message.decode('utf-8')
def Sign(self,key_id,password,data):
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
conn.close()
def Verify(self,key_id,data):
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
conn.close()
def KeyPassword(self,key_id):
# return the password stored in the db for the key (should be encrypted)
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
c.execute('SELECT key_unlock_password FROM privkey WHERE key_id = ?', (key_id,))
row = c.fetchone()
conn.close()
if not row:
return None
else:
return base64.standard_b64decode(row[0])
def _Upgrade(self,current_db_version):
# connect to DB handle
conn = sqlite3.connect(self._dbpath)
conn.isolation_level = None
c = conn.cursor()
# current_db_version == 0 means DB is brand new
# If not brand new, back it up and perform full checks
if current_db_version > 0:
c.execute('PRAGMA database_list')
dbpath = c.fetchone()[2]
# back up DB before modifying
# lock the entire DB
# see https://sqlite.org/pragma.html#pragma_locking_mode
c.execute('PRAGMA locking_mode = EXCLUSIVE')
# write some data to obtain an exclusive lock
c.execute('CREATE TABLE __temp_upgrade (temp INT)')
c.execute('INSERT INTO __temp_upgrade (temp) values (1)')
c.execute('SELECT * FROM __temp_upgrade')
c.execute('DROP TABLE __temp_upgrade')
c.execute('PRAGMA query_only = 1')
# copy DB file while we have an exclusive lock
backupdbpath = dbpath + '-backup-v' + str(current_db_version)
shutil.copyfile(dbpath, backupdbpath)
# unlock & write again to release exclusive lock
c.execute('PRAGMA query_only = 0')
c.execute('PRAGMA locking_mode = NORMAL')
c.execute('CREATE TABLE __temp_upgrade (temp INT)')
c.execute('INSERT INTO __temp_upgrade (temp) values (1)')
c.execute('SELECT * FROM __temp_upgrade')
c.execute('DROP TABLE __temp_upgrade')
# perform integrity check
c.execute('PRAGMA integrity_check')
check_result = c.fetchone()[0]
if check_result != 'ok':
raise ValueError("DB Check failed: " + check_result)
# perform upgrades
# IMPORTANT: upgrades are performed IN ORDER
# remember to set current_db_version to the new version
# Example:
#if current_db_version < 2017090101:
# c.execute('CREATE TABLE foo(bar INT, baz TEXT)')
# c.execute('PRAGMA user_version = 2017090101')
# current_db_version = 2017090101
#
#if current_db_version < 2017090102:
# c.execute('alter table foo add column blah text')
# c.execute('PRAGMA user_version = 2017090102')
# current_db_version = 2017090102
# version 2017090101
# initial version
if current_db_version < 2017090101:
c.execute('CREATE TABLE privkey (key_id TEXT PRIMARY KEY NOT NULL, key TEXT, key_unlock_key_id TEXT, key_unlock_password TEXT)')
c.execute('CREATE TABLE pubkey (key_id TEXT PRIMARY KEY NOT NULL, key TEXT, key_expiry TEXT)')
c.execute('PRAGMA user_version = 2017090101')
current_db_version = 2017090101
# End of upgrades, run an optimize and vacuum too
c.execute('PRAGMA optimize')
c.execute('VACUUM')
conn.close()
# in-memory password storage scrambling function for key passwords
class KeyPw:
def __init__(self):
# possible characters for randomly-generated passwords (typable ASCII)
self.pwchars = list('~!@#$%^&*()_+1234567890-=QWERTYUIOP{}|qwertyuiop[]\\ASDFGHJKL:"asdfghjkl;\'ZXCVBNM<>?zxcvbnm,./ ')
# create RSA key pair to use during this session to encrypt key passwords
self._session_key_priv = Crypto.PublicKey.RSA.generate(1024)
self._session_key_pub = self._session_key_priv.publickey()
def New(self,length=32):
# generate password of length (default 32) characters from list in self.pwchars
# max length is 128 characters (1024 bits in session RSA key)
maxbytes = self._session_key_priv.size() / 8
if length > maxbytes:
raise ValueError("Length must not be larger than RSA key size")
new_password = []
for i in range(length):
new_password.append(Crypto.Random.random.choice(self.pwchars))
newpw = ''.join(new_password)
return newpw
def SessionEncrypt(self,plainpw):
cipher = Crypto.Cipher.PKCS1_OAEP.new(self._session_key_pub, hashAlgo=Crypto.Hash.SHA256)
message = cipher.encrypt(plainpw.encode('utf-8'))
return message
def SessionDecrypt(self,encpw):
cipher = Crypto.Cipher.PKCS1_OAEP.new(self._session_key_priv, hashAlgo=Crypto.Hash.SHA256)
message = cipher.decrypt(encpw)
return message.decode('utf-8')
def db_create(dbpath):
conn = sqlite3.connect(dbpath)
conn.isolation_level = None
c = conn.cursor()
# set initial version to 0
# so first upgrade doesn't bother backing up
c.execute('PRAGMA user_version = 0')
# enable cell size checking
c.execute('PRAGMA cell_size_check = 1')
# set 4k page size
c.execute('PRAGMA page_size = 4096')
# set UTF-8 encoding
c.execute('PRAGMA encoding = "UTF-8"')
# vacuum to make page size stick
c.execute('VACUUM')
conn.close()
| apache-2.0 |
atul-bhouraskar/django | django/contrib/auth/admin.py | 89 | 8687 | from django.conf import settings
from django.conf.urls import url
from django.contrib import admin, messages
from django.contrib.admin.options import IS_POPUP_VAR
from django.contrib.admin.utils import unquote
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.forms import (
AdminPasswordChangeForm, UserChangeForm, UserCreationForm,
)
from django.contrib.auth.models import Group, User
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.db import transaction
from django.http import Http404, HttpResponseRedirect
from django.template.response import TemplateResponse
from django.utils.decorators import method_decorator
from django.utils.encoding import force_text
from django.utils.html import escape
from django.utils.translation import ugettext, ugettext_lazy as _
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
csrf_protect_m = method_decorator(csrf_protect)
sensitive_post_parameters_m = method_decorator(sensitive_post_parameters())
@admin.register(Group)
class GroupAdmin(admin.ModelAdmin):
search_fields = ('name',)
ordering = ('name',)
filter_horizontal = ('permissions',)
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
if db_field.name == 'permissions':
qs = kwargs.get('queryset', db_field.remote_field.model.objects)
# Avoid a major performance hit resolving permission names which
# triggers a content_type load:
kwargs['queryset'] = qs.select_related('content_type')
return super(GroupAdmin, self).formfield_for_manytomany(
db_field, request=request, **kwargs)
@admin.register(User)
class UserAdmin(admin.ModelAdmin):
add_form_template = 'admin/auth/user/add_form.html'
change_user_password_template = None
fieldsets = (
(None, {'fields': ('username', 'password')}),
(_('Personal info'), {'fields': ('first_name', 'last_name', 'email')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'password1', 'password2'),
}),
)
form = UserChangeForm
add_form = UserCreationForm
change_password_form = AdminPasswordChangeForm
list_display = ('username', 'email', 'first_name', 'last_name', 'is_staff')
list_filter = ('is_staff', 'is_superuser', 'is_active', 'groups')
search_fields = ('username', 'first_name', 'last_name', 'email')
ordering = ('username',)
filter_horizontal = ('groups', 'user_permissions',)
def get_fieldsets(self, request, obj=None):
if not obj:
return self.add_fieldsets
return super(UserAdmin, self).get_fieldsets(request, obj)
def get_form(self, request, obj=None, **kwargs):
"""
Use special form during user creation
"""
defaults = {}
if obj is None:
defaults['form'] = self.add_form
defaults.update(kwargs)
return super(UserAdmin, self).get_form(request, obj, **defaults)
def get_urls(self):
return [
url(
r'^(.+)/password/$',
self.admin_site.admin_view(self.user_change_password),
name='auth_user_password_change',
),
] + super(UserAdmin, self).get_urls()
def lookup_allowed(self, lookup, value):
# See #20078: we don't want to allow any lookups involving passwords.
if lookup.startswith('password'):
return False
return super(UserAdmin, self).lookup_allowed(lookup, value)
@sensitive_post_parameters_m
@csrf_protect_m
@transaction.atomic
def add_view(self, request, form_url='', extra_context=None):
# It's an error for a user to have add permission but NOT change
# permission for users. If we allowed such users to add users, they
# could create superusers, which would mean they would essentially have
# the permission to change users. To avoid the problem entirely, we
# disallow users from adding users if they don't have change
# permission.
if not self.has_change_permission(request):
if self.has_add_permission(request) and settings.DEBUG:
# Raise Http404 in debug mode so that the user gets a helpful
# error message.
raise Http404(
'Your user does not have the "Change user" permission. In '
'order to add users, Django requires that your user '
'account have both the "Add user" and "Change user" '
'permissions set.')
raise PermissionDenied
if extra_context is None:
extra_context = {}
username_field = self.model._meta.get_field(self.model.USERNAME_FIELD)
defaults = {
'auto_populated_fields': (),
'username_help_text': username_field.help_text,
}
extra_context.update(defaults)
return super(UserAdmin, self).add_view(request, form_url,
extra_context)
@sensitive_post_parameters_m
def user_change_password(self, request, id, form_url=''):
if not self.has_change_permission(request):
raise PermissionDenied
user = self.get_object(request, unquote(id))
if user is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_text(self.model._meta.verbose_name),
'key': escape(id),
})
if request.method == 'POST':
form = self.change_password_form(user, request.POST)
if form.is_valid():
form.save()
change_message = self.construct_change_message(request, form, None)
self.log_change(request, user, change_message)
msg = ugettext('Password changed successfully.')
messages.success(request, msg)
update_session_auth_hash(request, form.user)
return HttpResponseRedirect(
reverse(
'%s:auth_%s_change' % (
self.admin_site.name,
user._meta.model_name,
),
args=(user.pk,),
)
)
else:
form = self.change_password_form(user)
fieldsets = [(None, {'fields': list(form.base_fields)})]
adminForm = admin.helpers.AdminForm(form, fieldsets, {})
context = {
'title': _('Change password: %s') % escape(user.get_username()),
'adminForm': adminForm,
'form_url': form_url,
'form': form,
'is_popup': (IS_POPUP_VAR in request.POST or
IS_POPUP_VAR in request.GET),
'add': True,
'change': False,
'has_delete_permission': False,
'has_change_permission': True,
'has_absolute_url': False,
'opts': self.model._meta,
'original': user,
'save_as': False,
'show_save': True,
}
context.update(admin.site.each_context(request))
request.current_app = self.admin_site.name
return TemplateResponse(request,
self.change_user_password_template or
'admin/auth/user/change_password.html',
context)
def response_add(self, request, obj, post_url_continue=None):
"""
Determines the HttpResponse for the add_view stage. It mostly defers to
its superclass implementation but is customized because the User model
has a slightly different workflow.
"""
# We should allow further modification of the user just added i.e. the
# 'Save' button should behave like the 'Save and continue editing'
# button except in two scenarios:
# * The user has pressed the 'Save and add another' button
# * We are adding a user in a popup
if '_addanother' not in request.POST and IS_POPUP_VAR not in request.POST:
request.POST['_continue'] = 1
return super(UserAdmin, self).response_add(request, obj,
post_url_continue)
| bsd-3-clause |
sffjunkie/astral | src/test/test_sun_elevation_adjustment.py | 1 | 1206 | # -*- coding: utf-8 -*-
from datetime import datetime
import freezegun
import pytest
from almost_equal import datetime_almost_equal
from astral.geocoder import database, lookup
from astral.location import Location
from astral.sun import adjust_to_horizon, adjust_to_obscuring_feature, sunrise
class TestElevationAdjustment:
def test_Float_Positive(self):
adjustment = adjust_to_horizon(12000)
assert adjustment == pytest.approx(3.517744168209966)
def test_Float_Negative(self):
adjustment = adjust_to_horizon(-1)
assert adjustment == pytest.approx(0)
def test_Tuple_0(self):
adjustment = adjust_to_obscuring_feature((0.0, 100.0))
assert adjustment == 0.0
def test_Tuple_45deg(self):
adjustment = adjust_to_obscuring_feature((10.0, 10.0))
assert adjustment == pytest.approx(45.0)
def test_Tuple_30deg(self):
adjustment = adjust_to_obscuring_feature((3.0, 4.0))
assert adjustment == pytest.approx(53.130102354156)
def test_Tuple_neg45deg(self):
adjustment = adjust_to_obscuring_feature((-10.0, 10.0))
assert adjustment == pytest.approx(-45.0)
| apache-2.0 |
t3dev/odoo | addons/l10n_in/report/exempted_gst_report.py | 1 | 4273 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, tools
class L10nInExemptedReport(models.Model):
_name = "l10n_in.exempted.report"
_description = "Exempted Gst Supplied Statistics"
_auto = False
account_move_id = fields.Many2one('account.move', string="Account Move")
partner_id = fields.Many2one('res.partner', string="Customer")
out_supply_type = fields.Char(string="Outward Supply Type")
in_supply_type = fields.Char(string="Inward Supply Type")
nil_rated_amount = fields.Float("Nil rated supplies")
exempted_amount = fields.Float("Exempted")
non_gst_supplies = fields.Float("Non GST Supplies")
date = fields.Date("Date")
company_id = fields.Many2one('res.company', string="Company")
journal_id = fields.Many2one('account.journal', string="Journal")
def _select(self):
select_str = """SELECT aml.id AS id,
aml.partner_id AS partner_id,
aml.date_maturity AS date,
aml.balance * (CASE WHEN aj.type = 'sale' THEN -1 ELSE 1 END) AS price_total,
am.journal_id,
aj.company_id,
aml.move_id as account_move_id,
(CASE WHEN p.state_id = cp.state_id
THEN (CASE WHEN p.vat IS NOT NULL
THEN 'Intra-State supplies to registered persons'
ELSE 'Intra-State supplies to unregistered persons'
END)
WHEN p.state_id != cp.state_id
THEN (CASE WHEN p.vat IS NOT NULL
THEN 'Inter-State supplies to registered persons'
ELSE 'Inter-State supplies to unregistered persons'
END)
END) AS out_supply_type,
(CASE WHEN p.state_id = cp.state_id
THEN 'Intra-State supplies'
WHEN p.state_id != cp.state_id
THEN 'Inter-State supplies'
END) AS in_supply_type,
(CASE WHEN (
SELECT MAX(account_tax_id) FROM account_move_line_account_tax_rel
JOIN account_tax at ON at.id = account_tax_id
WHERE account_move_line_id = aml.id AND at.tax_group_id IN
((SELECT res_id FROM ir_model_data WHERE module='l10n_in' AND name='nil_rated_group'))
) IS NOT NULL
THEN aml.balance * (CASE WHEN aj.type = 'sale' THEN -1 ELSE 1 END)
ELSE 0
END) AS nil_rated_amount,
(CASE WHEN (
SELECT MAX(account_tax_id) FROM account_move_line_account_tax_rel
JOIN account_tax at ON at.id = account_tax_id
WHERE account_move_line_id = aml.id AND at.tax_group_id IN
((SELECT res_id FROM ir_model_data WHERE module='l10n_in' AND name='exempt_group'))
) IS NOT NULL
THEN aml.balance * (CASE WHEN aj.type = 'sale' THEN -1 ELSE 1 END)
ELSE 0
END) AS exempted_amount,
(CASE WHEN (
SELECT MAX(account_tax_id) FROM account_move_line_account_tax_rel
WHERE account_move_line_id = aml.id
) IS NULL
THEN aml.balance * (CASE WHEN aj.type = 'sale' THEN -1 ELSE 1 END)
ELSE 0
END) AS non_gst_supplies
"""
return select_str
def _from(self):
from_str = """FROM account_move_line aml
JOIN account_move am ON am.id = aml.move_id
JOIN account_account aa ON aa.id = aml.account_id
JOIN account_journal aj ON aj.id = am.journal_id
JOIN res_company c ON c.id = aj.company_id
LEFT JOIN res_partner cp ON cp.id = c.partner_id
LEFT JOIN res_partner p ON p.id = am.partner_id
LEFT JOIN res_country pc ON pc.id = p.country_id
WHERE aa.internal_type = 'other' and aml.tax_line_id IS NULL
"""
return from_str
@api.model_cr
def init(self):
tools.drop_view_if_exists(self.env.cr, self._table)
self._cr.execute("""CREATE OR REPLACE VIEW %s AS (%s %s)""" % (
self._table, self._select(), self._from()))
| gpl-3.0 |
asadziach/tensorflow | tensorflow/contrib/learn/python/learn/graph_actions.py | 14 | 28607 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""High level operations on graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import sys
import threading
import time
import numpy as np
from six import reraise
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.framework.python.ops import ops as contrib_ops
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import monitors as monitors_lib
from tensorflow.core.framework import summary_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import session_manager as session_manager_lib
from tensorflow.python.training import summary_io
from tensorflow.python.training import supervisor as tf_supervisor
from tensorflow.python.util.deprecation import deprecated
# Singleton for SummaryWriter per logdir folder.
_SUMMARY_WRITERS = {}
# Lock protecting _SUMMARY_WRITERS
_summary_writer_lock = threading.Lock()
_graph_action_deprecation = deprecated(
'2017-02-15',
'graph_actions.py will be deleted. Use tf.train.* utilities instead. '
'You can use learn/estimators/estimator.py as an example.')
@_graph_action_deprecation
def clear_summary_writers():
"""Clear cached summary writers. Currently only used for unit tests."""
return summary_io.SummaryWriterCache.clear()
def get_summary_writer(logdir):
"""Returns single SummaryWriter per logdir in current run.
Args:
logdir: str, folder to write summaries.
Returns:
Existing `SummaryWriter` object or new one if never wrote to given
directory.
"""
return summary_io.SummaryWriterCache.get(logdir)
def _make_saver(graph, keep_checkpoint_max=5):
vars_to_save = (graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) +
graph.get_collection(ops.GraphKeys.SAVEABLE_OBJECTS))
if vars_to_save:
return tf_saver.Saver(vars_to_save,
sharded=True,
max_to_keep=keep_checkpoint_max)
else:
return None
def _restore_from_checkpoint(session, graph, checkpoint_path, saver=None):
logging.info('Loading model from checkpoint: %s.', checkpoint_path)
saver = saver or _make_saver(graph)
if saver:
saver.restore(session, checkpoint_path)
else:
logging.info('No variables found in graph, not creating Saver() object.')
def _run_with_monitors(session, step, tensors, feed_dict, monitors):
"""Runs session for given tensors with monitor callbacks."""
for monitor in monitors:
tensors += monitor.step_begin(step)
tensors = list(set(tensors))
outputs = session.run(tensors, feed_dict=feed_dict)
outputs = dict(zip(
[t.name if isinstance(t, ops.Tensor) else t for t in tensors],
outputs))
should_stop = False
for monitor in monitors:
induce_stop = monitor.step_end(step, outputs)
should_stop = should_stop or induce_stop
return outputs, should_stop
@_graph_action_deprecation
def train(graph,
output_dir,
train_op,
loss_op,
global_step_tensor=None,
init_op=None,
init_feed_dict=None,
init_fn=None,
log_every_steps=10,
supervisor_is_chief=True,
supervisor_master='',
supervisor_save_model_secs=600,
keep_checkpoint_max=5,
supervisor_save_summaries_steps=100,
feed_fn=None,
steps=None,
fail_on_nan_loss=True,
monitors=None,
max_steps=None):
"""Train a model.
Given `graph`, a directory to write outputs to (`output_dir`), and some ops,
run a training loop. The given `train_op` performs one step of training on the
model. The `loss_op` represents the objective function of the training. It is
expected to increment the `global_step_tensor`, a scalar integer tensor
counting training steps. This function uses `Supervisor` to initialize the
graph (from a checkpoint if one is available in `output_dir`), write summaries
defined in the graph, and write regular checkpoints as defined by
`supervisor_save_model_secs`.
Training continues until `global_step_tensor` evaluates to `max_steps`, or, if
`fail_on_nan_loss`, until `loss_op` evaluates to `NaN`. In that case the
program is terminated with exit code 1.
Args:
graph: A graph to train. It is expected that this graph is not in use
elsewhere.
output_dir: A directory to write outputs to.
train_op: An op that performs one training step when run.
loss_op: A scalar loss tensor.
global_step_tensor: A tensor representing the global step. If none is given,
one is extracted from the graph using the same logic as in `Supervisor`.
init_op: An op that initializes the graph. If `None`, use `Supervisor`'s
default.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
init_fn: Optional callable passed to Supervisor to initialize the model.
log_every_steps: Output logs regularly. The logs contain timing data and the
current loss.
supervisor_is_chief: Whether the current process is the chief supervisor in
charge of restoring the model and running standard services.
supervisor_master: The master string to use when preparing the session.
supervisor_save_model_secs: Save a checkpoint every
`supervisor_save_model_secs` seconds when training.
keep_checkpoint_max: The maximum number of recent checkpoint files to
keep. As new files are created, older files are deleted. If None or 0,
all checkpoint files are kept. This is simply passed as the max_to_keep
arg to tf.train.Saver constructor.
supervisor_save_summaries_steps: Save summaries every
`supervisor_save_summaries_steps` seconds when training.
feed_fn: A function that is called every iteration to produce a `feed_dict`
passed to `session.run` calls. Optional.
steps: Trains for this many steps (e.g. current global step + `steps`).
fail_on_nan_loss: If true, raise `NanLossDuringTrainingError` if `loss_op`
evaluates to `NaN`. If false, continue training as if nothing happened.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
max_steps: Number of total steps for which to train model. If `None`,
train forever. Two calls fit(steps=100) means 200 training iterations.
On the other hand two calls of fit(max_steps=100) means, second call
will not do any iteration since first call did all 100 steps.
Returns:
The final loss value.
Raises:
ValueError: If `output_dir`, `train_op`, `loss_op`, or `global_step_tensor`
is not provided. See `tf.contrib.framework.get_global_step` for how we
look up the latter if not provided explicitly.
NanLossDuringTrainingError: If `fail_on_nan_loss` is `True`, and loss ever
evaluates to `NaN`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
while True:
try:
return _train_internal(graph,
output_dir,
train_op,
loss_op,
global_step_tensor,
init_op,
init_feed_dict,
init_fn,
log_every_steps,
supervisor_is_chief,
supervisor_master,
supervisor_save_model_secs,
keep_checkpoint_max,
supervisor_save_summaries_steps,
feed_fn,
steps,
fail_on_nan_loss,
monitors,
max_steps)
except errors.AbortedError:
# Happens when PS restarts, keep training.
logging.warning('Training got Aborted error. Keep training.')
def _train_internal(graph,
output_dir,
train_op,
loss_op,
global_step_tensor,
init_op,
init_feed_dict,
init_fn,
log_every_steps,
supervisor_is_chief,
supervisor_master,
supervisor_save_model_secs,
keep_checkpoint_max,
supervisor_save_summaries_steps,
feed_fn,
steps,
fail_on_nan_loss,
monitors,
max_steps):
"""See train."""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
if not output_dir:
raise ValueError('Output directory should be non-empty %s.' % output_dir)
if train_op is None:
raise ValueError('Missing train_op.')
if loss_op is None:
raise ValueError('Missing loss_op.')
with graph.as_default():
global_step_tensor = contrib_variables.assert_or_get_global_step(
graph, global_step_tensor)
if global_step_tensor is None:
raise ValueError('No "global_step" was provided or found in the graph.')
# Get current step.
try:
start_step = load_variable(output_dir, global_step_tensor.name)
except (errors.NotFoundError, ValueError):
start_step = 0
summary_writer = (get_summary_writer(output_dir)
if supervisor_is_chief else None)
# Add default chief monitors if none were provided.
if not monitors:
monitors = monitors_lib.get_default_monitors(
loss_op=loss_op,
summary_op=logging_ops.get_summary_op(),
save_summary_steps=supervisor_save_summaries_steps,
summary_writer=summary_writer) if supervisor_is_chief else []
# TODO(ipolosukhin): Replace all functionality of Supervisor
# with Chief-Exclusive Monitors.
if not supervisor_is_chief:
# Prune list of monitor to the ones runnable on all workers.
monitors = [monitor for monitor in monitors if monitor.run_on_all_workers]
if max_steps is None:
max_steps = (start_step + steps) if steps else None
# Start monitors, can create graph parts.
for monitor in monitors:
monitor.begin(max_steps=max_steps)
supervisor = tf_supervisor.Supervisor(
graph,
init_op=init_op or tf_supervisor.Supervisor.USE_DEFAULT,
init_feed_dict=init_feed_dict,
is_chief=supervisor_is_chief,
logdir=output_dir,
saver=_make_saver(graph, keep_checkpoint_max),
global_step=global_step_tensor,
summary_op=None,
summary_writer=summary_writer,
save_model_secs=supervisor_save_model_secs,
init_fn=init_fn)
session = supervisor.PrepareSession(master=supervisor_master,
start_standard_services=True)
supervisor.StartQueueRunners(session)
with session:
get_current_step = lambda: session.run(global_step_tensor)
start_step = get_current_step()
last_step = start_step
last_log_step = start_step
loss_value = None
logging.info('Training steps [%d,%s)', last_step, 'inf'
if max_steps is None else str(max_steps))
excinfo = None
try:
while not supervisor.ShouldStop() and (
(max_steps is None) or (last_step < max_steps)):
start_time = time.time()
feed_dict = feed_fn() if feed_fn is not None else None
outputs, should_stop = _run_with_monitors(
session, last_step + 1, [train_op, loss_op], feed_dict, monitors)
loss_value = outputs[loss_op.name]
if np.isnan(loss_value):
failure_message = 'Model diverged with loss = NaN.'
if fail_on_nan_loss:
logging.error(failure_message)
raise monitors_lib.NanLossDuringTrainingError()
else:
logging.warning(failure_message)
if should_stop:
break
this_step = get_current_step()
if this_step <= last_step:
logging.error(
'Global step was not incremented by train op at step %s'
': new step %d', last_step, this_step)
last_step = this_step
is_last_step = (max_steps is not None) and (last_step >= max_steps)
if is_last_step or (last_step - last_log_step >= log_every_steps):
logging.info(
'training step %d, loss = %.5f (%.3f sec/batch).',
last_step, loss_value, float(time.time() - start_time))
last_log_step = last_step
except errors.OutOfRangeError as e:
logging.warn('Got exception during tf.learn training loop possibly '
'due to exhausted input queue %s.', e)
except StopIteration:
logging.info('Exhausted input iterarator.')
except BaseException as e: # pylint: disable=broad-except
# Hold on to any other exceptions while we try recording a final
# checkpoint and summary.
excinfo = sys.exc_info()
finally:
try:
# Call supervisor.Stop() from within a try block because it re-raises
# exceptions thrown by the supervised threads.
supervisor.Stop(close_summary_writer=False)
# Save one last checkpoint and summaries
# TODO(wicke): This should be handled by Supervisor
# In case we encountered an exception in the try block before we updated
# last_step, update it here (again).
last_step = get_current_step()
if supervisor_is_chief:
ckpt_path = supervisor.save_path
logging.info('Saving checkpoint for step %d to checkpoint: %s.',
last_step, ckpt_path)
supervisor.saver.save(session, ckpt_path, global_step=last_step)
# Finish monitors.
for monitor in monitors:
monitor.end()
# catch OutOfRangeError which is thrown when queue is out of data (and for
# other reasons as well).
except errors.OutOfRangeError as e:
logging.warn('OutOfRangeError in tf.learn final checkpoint possibly '
'due to exhausted input queue. Note: summary_op is not '
'expected to trigger dequeues. %s.', e)
except BaseException as e: # pylint: disable=broad-except
# If we don't already have an exception to re-raise, raise this one.
if not excinfo:
raise
# Otherwise, log this one and raise the other in the finally block.
logging.error('Got exception during tf.learn final checkpoint %s.', e)
finally:
if excinfo:
reraise(*excinfo)
return loss_value
def _get_first_op_from_collection(collection_name):
elements = ops.get_collection(collection_name)
if elements:
return elements[0]
return None
def _get_saver():
"""Lazy init and return saver."""
saver = _get_first_op_from_collection(ops.GraphKeys.SAVERS)
if saver is None and variables.global_variables():
saver = tf_saver.Saver()
ops.add_to_collection(ops.GraphKeys.SAVERS, saver)
return saver
def _get_ready_op():
ready_op = _get_first_op_from_collection(ops.GraphKeys.READY_OP)
if ready_op is None:
ready_op = variables.report_uninitialized_variables()
ops.add_to_collection(ops.GraphKeys.READY_OP, ready_op)
return ready_op
def _get_local_init_op():
local_init_op = _get_first_op_from_collection(
ops.GraphKeys.LOCAL_INIT_OP)
if local_init_op is None:
op_list = [variables.local_variables_initializer(),
data_flow_ops.tables_initializer()]
if op_list:
local_init_op = control_flow_ops.group(*op_list)
ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, local_init_op)
return local_init_op
def _eval_results_to_str(eval_results):
return ', '.join('%s = %s' % (k, v) for k, v in sorted(eval_results.items()))
def _write_summary_results(output_dir, eval_results, current_global_step):
"""Writes eval results into summary file in given dir."""
logging.info('Saving evaluation summary for step %d: %s', current_global_step,
_eval_results_to_str(eval_results))
summary_writer = get_summary_writer(output_dir)
summary = summary_pb2.Summary()
for key in eval_results:
if eval_results[key] is None:
continue
value = summary.value.add()
value.tag = key
if (isinstance(eval_results[key], np.float32) or
isinstance(eval_results[key], float)):
value.simple_value = float(eval_results[key])
else:
logging.warn('Skipping summary for %s, must be a float or np.float32.',
key)
summary_writer.add_summary(summary, current_global_step)
summary_writer.flush()
@_graph_action_deprecation
def evaluate(graph,
output_dir,
checkpoint_path,
eval_dict,
update_op=None,
global_step_tensor=None,
supervisor_master='',
log_every_steps=10,
feed_fn=None,
max_steps=None):
"""Evaluate a model loaded from a checkpoint.
Given `graph`, a directory to write summaries to (`output_dir`), a checkpoint
to restore variables from, and a `dict` of `Tensor`s to evaluate, run an eval
loop for `max_steps` steps, or until an exception (generally, an
end-of-input signal from a reader operation) is raised from running
`eval_dict`.
In each step of evaluation, all tensors in the `eval_dict` are evaluated, and
every `log_every_steps` steps, they are logged. At the very end of evaluation,
a summary is evaluated (finding the summary ops using `Supervisor`'s logic)
and written to `output_dir`.
Args:
graph: A `Graph` to train. It is expected that this graph is not in use
elsewhere.
output_dir: A string containing the directory to write a summary to.
checkpoint_path: A string containing the path to a checkpoint to restore.
Can be `None` if the graph doesn't require loading any variables.
eval_dict: A `dict` mapping string names to tensors to evaluate. It is
evaluated in every logging step. The result of the final evaluation is
returned. If `update_op` is None, then it's evaluated in every step. If
`max_steps` is `None`, this should depend on a reader that will raise an
end-of-input exception when the inputs are exhausted.
update_op: A `Tensor` which is run in every step.
global_step_tensor: A `Variable` containing the global step. If `None`,
one is extracted from the graph using the same logic as in `Supervisor`.
Used to place eval summaries on training curves.
supervisor_master: The master string to use when preparing the session.
log_every_steps: Integer. Output logs every `log_every_steps` evaluation
steps. The logs contain the `eval_dict` and timing information.
feed_fn: A function that is called every iteration to produce a `feed_dict`
passed to `session.run` calls. Optional.
max_steps: Integer. Evaluate `eval_dict` this many times.
Returns:
A tuple `(eval_results, global_step)`:
eval_results: A `dict` mapping `string` to numeric values (`int`, `float`)
that are the result of running eval_dict in the last step. `None` if no
eval steps were run.
global_step: The global step this evaluation corresponds to.
Raises:
ValueError: if `output_dir` is empty.
"""
if not output_dir:
raise ValueError('Output directory should be non-empty %s.' % output_dir)
with graph.as_default():
global_step_tensor = contrib_variables.assert_or_get_global_step(
graph, global_step_tensor)
# Create or get summary op, global_step and saver.
saver = _get_saver()
local_init_op = _get_local_init_op()
ready_for_local_init_op = _get_first_op_from_collection(
ops.GraphKeys.READY_FOR_LOCAL_INIT_OP)
ready_op = _get_ready_op()
session_manager = session_manager_lib.SessionManager(
local_init_op=local_init_op,
ready_op=ready_op,
ready_for_local_init_op=ready_for_local_init_op)
session, initialized = session_manager.recover_session(
master=supervisor_master,
saver=saver,
checkpoint_dir=checkpoint_path)
# Start queue runners.
coord = coordinator.Coordinator()
threads = queue_runner.start_queue_runners(session, coord)
with session:
if not initialized:
logging.warning('Failed to initialize from %s.', checkpoint_path)
# TODO(ipolosukhin): This should be failing, but old code relies on that.
session.run(variables.global_variables_initializer())
if checkpoint_path:
_restore_from_checkpoint(session, graph, checkpoint_path, saver)
current_global_step = session.run(global_step_tensor)
eval_results = None
# TODO(amodei): Fix this to run through the eval set exactly once.
step = 0
eval_step = None
feed_dict = None
logging.info('Eval steps [%d,%s) for training step %d.', step,
'inf' if max_steps is None
else str(max_steps), current_global_step)
try:
try:
while (max_steps is None) or (step < max_steps):
step += 1
start_time = time.time()
feed_dict = feed_fn() if feed_fn is not None else None
if update_op is not None:
session.run(update_op, feed_dict=feed_dict)
else:
eval_results = session.run(eval_dict, feed_dict=feed_dict)
eval_step = step
# TODO(wicke): We should assert that the global step hasn't changed.
if step % log_every_steps == 0:
if eval_step is None or step != eval_step:
eval_results = session.run(eval_dict, feed_dict=feed_dict)
eval_step = step
duration = time.time() - start_time
logging.info('Results after %d steps (%.3f sec/batch): %s.',
step, float(duration),
_eval_results_to_str(eval_results))
finally:
if eval_results is None or step != eval_step:
eval_results = session.run(eval_dict, feed_dict=feed_dict)
eval_step = step
# Stop session first, before queue runners.
session.close()
# Stop queue runners.
try:
coord.request_stop()
coord.join(threads, stop_grace_period_secs=120)
except (RuntimeError, errors.CancelledError) as e:
logging.warning('Coordinator didn\'t stop cleanly: %s', e)
# catch OutOfRangeError which is thrown when queue is out of data (and for
# other reasons as well).
except errors.OutOfRangeError as e:
if max_steps is None:
logging.info('Input queue is exhausted.')
else:
logging.warn('Input queue is exhausted: %s.', e)
# catch StopIteration which is thrown is DataReader is out of data.
except StopIteration as e:
if max_steps is None:
logging.info('Input iterator is exhausted.')
else:
logging.warn('Input iterator is exhausted: %s.', e)
# Save summaries for this evaluation.
_write_summary_results(output_dir, eval_results, current_global_step)
return eval_results, current_global_step
@_graph_action_deprecation
def run_n(output_dict, feed_dict=None, restore_checkpoint_path=None, n=1):
"""Run `output_dict` tensors `n` times, with the same `feed_dict` each run.
Args:
output_dict: A `dict` mapping string names to tensors to run. Must all be
from the same graph.
feed_dict: `dict` of input values to feed each run.
restore_checkpoint_path: A string containing the path to a checkpoint to
restore.
n: Number of times to repeat.
Returns:
A list of `n` `dict` objects, each containing values read from `output_dict`
tensors.
"""
return run_feeds(
output_dict=output_dict,
feed_dicts=itertools.repeat(feed_dict, n),
restore_checkpoint_path=restore_checkpoint_path)
@_graph_action_deprecation
def run_feeds_iter(output_dict, feed_dicts, restore_checkpoint_path=None):
"""Run `output_dict` tensors with each input in `feed_dicts`.
If `restore_checkpoint_path` is supplied, restore from checkpoint. Otherwise,
init all variables.
Args:
output_dict: A `dict` mapping string names to `Tensor` objects to run.
Tensors must all be from the same graph.
feed_dicts: Iterable of `dict` objects of input values to feed.
restore_checkpoint_path: A string containing the path to a checkpoint to
restore.
Yields:
A sequence of dicts of values read from `output_dict` tensors, one item
yielded for each item in `feed_dicts`. Keys are the same as `output_dict`,
values are the results read from the corresponding `Tensor` in
`output_dict`.
Raises:
ValueError: if `output_dict` or `feed_dicts` is None or empty.
"""
if not output_dict:
raise ValueError('output_dict is invalid: %s.' % output_dict)
if not feed_dicts:
raise ValueError('feed_dicts is invalid: %s.' % feed_dicts)
graph = contrib_ops.get_graph_from_inputs(output_dict.values())
with graph.as_default() as g:
with tf_session.Session('') as session:
session.run(
resources.initialize_resources(resources.shared_resources() +
resources.local_resources()))
if restore_checkpoint_path:
_restore_from_checkpoint(session, g, restore_checkpoint_path)
else:
session.run(variables.global_variables_initializer())
session.run(variables.local_variables_initializer())
session.run(data_flow_ops.tables_initializer())
coord = coordinator.Coordinator()
threads = None
try:
threads = queue_runner.start_queue_runners(session, coord=coord)
for f in feed_dicts:
yield session.run(output_dict, f)
finally:
coord.request_stop()
if threads:
coord.join(threads, stop_grace_period_secs=120)
@_graph_action_deprecation
def run_feeds(*args, **kwargs):
"""See run_feeds_iter(). Returns a `list` instead of an iterator."""
return list(run_feeds_iter(*args, **kwargs))
@_graph_action_deprecation
def infer(restore_checkpoint_path, output_dict, feed_dict=None):
"""Restore graph from `restore_checkpoint_path` and run `output_dict` tensors.
If `restore_checkpoint_path` is supplied, restore from checkpoint. Otherwise,
init all variables.
Args:
restore_checkpoint_path: A string containing the path to a checkpoint to
restore.
output_dict: A `dict` mapping string names to `Tensor` objects to run.
Tensors must all be from the same graph.
feed_dict: `dict` object mapping `Tensor` objects to input values to feed.
Returns:
Dict of values read from `output_dict` tensors. Keys are the same as
`output_dict`, values are the results read from the corresponding `Tensor`
in `output_dict`.
Raises:
ValueError: if `output_dict` or `feed_dicts` is None or empty.
"""
return run_feeds(output_dict=output_dict,
feed_dicts=[feed_dict] if feed_dict is not None else [None],
restore_checkpoint_path=restore_checkpoint_path)[0]
| apache-2.0 |
cgundogan/RIOT | tests/gnrc_sock_udp/tests/01-run.py | 39 | 3105 | #!/usr/bin/env python3
# Copyright (C) 2016 Kaspar Schleiser <kaspar@schleiser.de>
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import sys
from testrunner import run
def testfunc(child):
child.expect_exact(u"Calling test_sock_udp_create__EADDRINUSE()")
child.expect_exact(u"Calling test_sock_udp_create__EAFNOSUPPORT()")
child.expect_exact(u"Calling test_sock_udp_create__EINVAL_addr()")
child.expect_exact(u"Calling test_sock_udp_create__EINVAL_netif()")
child.expect_exact(u"Calling test_sock_udp_create__no_endpoints()")
child.expect_exact(u"Calling test_sock_udp_create__only_local()")
child.expect_exact(u"Calling test_sock_udp_create__only_local_reuse_ep()")
child.expect_exact(u"Calling test_sock_udp_create__only_remote()")
child.expect_exact(u"Calling test_sock_udp_create__full()")
child.expect_exact(u"Calling test_sock_udp_recv__EADDRNOTAVAIL()")
child.expect_exact(u"Calling test_sock_udp_recv__EAGAIN()")
child.expect_exact(u"Calling test_sock_udp_recv__ENOBUFS()")
child.expect_exact(u"Calling test_sock_udp_recv__EPROTO()")
child.expect_exact(u"Calling test_sock_udp_recv__ETIMEDOUT()")
child.expect_exact(u" * Calling sock_udp_recv()")
child.expect(r" \* \(timed out with timeout \d+\)")
child.expect_exact(u"Calling test_sock_udp_recv__socketed()")
child.expect_exact(u"Calling test_sock_udp_recv__socketed_with_remote()")
child.expect_exact(u"Calling test_sock_udp_recv__unsocketed()")
child.expect_exact(u"Calling test_sock_udp_recv__unsocketed_with_remote()")
child.expect_exact(u"Calling test_sock_udp_recv__with_timeout()")
child.expect_exact(u"Calling test_sock_udp_recv__non_blocking()")
child.expect_exact(u"Calling test_sock_udp_send__EAFNOSUPPORT()")
child.expect_exact(u"Calling test_sock_udp_send__EINVAL_addr()")
child.expect_exact(u"Calling test_sock_udp_send__EINVAL_netif()")
child.expect_exact(u"Calling test_sock_udp_send__EINVAL_port()")
child.expect_exact(u"Calling test_sock_udp_send__ENOTCONN()")
child.expect_exact(u"Calling test_sock_udp_send__socketed_no_local_no_netif()")
child.expect_exact(u"Calling test_sock_udp_send__socketed_no_netif()")
child.expect_exact(u"Calling test_sock_udp_send__socketed_no_local()")
child.expect_exact(u"Calling test_sock_udp_send__socketed()")
child.expect_exact(u"Calling test_sock_udp_send__socketed_other_remote()")
child.expect_exact(u"Calling test_sock_udp_send__unsocketed_no_local_no_netif()")
child.expect_exact(u"Calling test_sock_udp_send__unsocketed_no_netif()")
child.expect_exact(u"Calling test_sock_udp_send__unsocketed_no_local()")
child.expect_exact(u"Calling test_sock_udp_send__unsocketed()")
child.expect_exact(u"Calling test_sock_udp_send__no_sock_no_netif()")
child.expect_exact(u"Calling test_sock_udp_send__no_sock()")
child.expect_exact(u"ALL TESTS SUCCESSFUL")
if __name__ == "__main__":
sys.exit(run(testfunc))
| lgpl-2.1 |
onto/sonata | sonata/misc.py | 1 | 6358 |
import os
import subprocess
import re
import locale
import logging
import sys
logger = logging.getLogger(__name__)
def convert_time(raw):
# Converts raw time to 'hh:mm:ss' with leading zeros as appropriate
h, m, s = ['%02d' % c for c in (raw / 3600, (raw % 3600) / 60, raw % 60)]
if h == '00':
if m.startswith('0'):
m = m[1:]
return m + ':' + s
else:
if h.startswith('0'):
h = h[1:]
return h + ':' + m + ':' + s
def bold(s):
if not (str(s).startswith('<b>') and str(s).endswith('</b>')):
return '<b>%s</b>' % s
else:
return s
def unbold(s):
if str(s).startswith('<b>') and str(s).endswith('</b>'):
return s[3:-4]
else:
return s
def escape_html(s):
# & needs to be escaped first, before more are introduced:
s = s.replace('&', '&')
s = s.replace('<', '<')
s = s.replace('>', '>')
s = s.replace('"', '"')
return s
def unescape_html(s):
s = s.replace('<', '<')
s = s.replace('>', '>')
s = s.replace('"', '"')
s = s.replace(' ', ' ')
# & needs to be unescaped last, so it can't get unescaped twice
s = s.replace('&', '&')
# FIXME why did we have this too? s = s.replace('amp;', '&')
return s
def wiki_to_html(s):
# XXX Should we depend on a library to do this or get
# html from the services?
s = re.sub(r"'''''(.*?)'''''", r"<i><b>\1</b></i>", s)
s = re.sub(r"'''(.*?)'''", r"<b>\1</b>", s)
s = re.sub(r"''(.*?)''", r"<i>\1</i>", s)
return s
def strip_all_slashes(s):
if s is None:
return ""
s = s.replace("\\", "")
s = s.replace("/", "")
s = s.replace("\"", "")
return s
def _rmgeneric(path, __func__):
try:
__func__(path)
except OSError:
pass
def is_binary(f):
if '\0' in f: # found null byte
return True
return False
def link_markup(s, enclose_in_parentheses, small, linkcolor):
if enclose_in_parentheses:
s = "(%s)" % s
if small:
s = "<small>%s</small>" % s
if linkcolor:
color = linkcolor
else:
color = "blue" #no theme color, default to blue..
s = "<span color='%s'>%s</span>" % (color, s)
return s
def iunique(iterable, key=id):
seen = set()
for i in iterable:
if key(i) not in seen:
seen.add(key(i))
yield i
def remove_list_duplicates(inputlist, case=True):
# Note that we can't use list(set(inputlist))
# because we want the inputlist order preserved.
if case:
key = lambda x: x
else:
# repr() allows inputlist to be a list of tuples
# FIXME: Doesn't correctly compare uppercase and
# lowercase unicode
key = lambda x: repr(x).lower()
return list(iunique(inputlist, key))
the_re = re.compile('^the ')
def lower_no_the(s):
s = unicode(s)
s = the_re.sub('', s.lower())
s = str(s)
return s
def create_dir(dirname):
if not os.path.exists(os.path.expanduser(dirname)):
try:
os.makedirs(os.path.expanduser(dirname))
except (IOError, OSError):
pass
def remove_file(filename):
if os.path.exists(filename):
try:
os.remove(filename)
except:
pass
def remove_dir_recursive(path):
if not os.path.isdir(path):
return
files = os.listdir(path)
for x in files:
fullpath = os.path.join(path, x)
if os.path.isfile(fullpath):
f = os.remove
_rmgeneric(fullpath, f)
elif os.path.isdir(fullpath):
remove_dir_recursive(fullpath)
f = os.rmdir
_rmgeneric(fullpath, f)
def file_exists_insensitive(filename):
# Returns an updated filename that exists on the
# user's filesystem; checks all possible combinations
# of case.
if os.path.exists(filename):
return filename
regexp = re.compile(re.escape(filename), re.IGNORECASE)
path = os.path.dirname(filename)
try:
files = os.listdir(path)
except OSError:
return filename
for x in files:
fullpath = os.path.join(path, x)
if regexp.match(fullpath):
return fullpath
return filename
def browser_load(docslink, browser, window):
if browser and browser.strip():
browsers = [browser.strip()]
else:
browsers = ["gnome-open", # default, we are a "gnome" app
"x-www-browser", # default on Debian-based systems
"exo-open",
"kfmclient openURL",
"firefox",
"mozilla",
"opera",
"chromium-browser"]
for browser in browsers:
try:
subprocess.Popen(browser.split() + [docslink])
break # done
except OSError:
pass # try next
else: # none worked
return False
return True
def file_from_utf8(filename):
import gobject
try:
return gobject.filename_from_utf8(filename)
except:
return filename
def is_lang_rtl(window):
import pango
# Check if a RTL (right-to-left) language:
return window.get_pango_context().get_base_dir() == pango.DIRECTION_RTL
def sanitize_musicdir(mdir):
return os.path.expanduser(mdir) if mdir else ''
def mpd_env_vars():
host = None
port = None
password = None
if 'MPD_HOST' in os.environ:
if '@' in os.environ['MPD_HOST']:
password, host = os.environ['MPD_HOST'].split('@')
else:
host = os.environ['MPD_HOST']
if 'MPD_PORT' in os.environ:
port = int(os.environ['MPD_PORT'])
return (host, port, password)
def get_files_recursively(dirname):
filenames = []
os.path.walk(dirname, _get_files_recursively, filenames)
return filenames
def _get_files_recursively(filenames, dirname, files):
filenames.extend([os.path.join(dirname, f) for f in files])
def setlocale():
try:
locale.setlocale(locale.LC_ALL, "")
# XXX this makes python-mpd correctly return lowercase
# keys for, e.g., playlistinfo() with a turkish locale:
locale.setlocale(locale.LC_CTYPE, "C")
except:
logger.exception("Failed to set locale")
sys.exit(1)
| gpl-3.0 |
AdrianaDinca/bitcoin | qa/rpc-tests/bip65-cltv.py | 62 | 3311 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test the CHECKLOCKTIMEVERIFY (BIP65) soft-fork logic
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class BIP65Test(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 3
self.setup_clean_chain = False
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, []))
self.nodes.append(start_node(1, self.options.tmpdir, ["-blockversion=3"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-blockversion=4"]))
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
self.is_network_split = False
self.sync_all()
def run_test(self):
cnt = self.nodes[0].getblockcount()
# Mine some old-version blocks
self.nodes[1].generate(200)
cnt += 100
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 100):
raise AssertionError("Failed to mine 100 version=3 blocks")
# Mine 750 new-version blocks
for i in range(15):
self.nodes[2].generate(50)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 850):
raise AssertionError("Failed to mine 750 version=4 blocks")
# TODO: check that new CHECKLOCKTIMEVERIFY rules are not enforced
# Mine 1 new-version block
self.nodes[2].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 851):
raise AssertionError("Failed to mine a version=4 blocks")
# TODO: check that new CHECKLOCKTIMEVERIFY rules are enforced
# Mine 198 new-version blocks
for i in range(2):
self.nodes[2].generate(99)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1049):
raise AssertionError("Failed to mine 198 version=4 blocks")
# Mine 1 old-version block
self.nodes[1].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1050):
raise AssertionError("Failed to mine a version=3 block after 949 version=4 blocks")
# Mine 1 new-version blocks
self.nodes[2].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1051):
raise AssertionError("Failed to mine a version=4 block")
# Mine 1 old-version blocks
try:
self.nodes[1].generate(1)
raise AssertionError("Succeeded to mine a version=3 block after 950 version=4 blocks")
except JSONRPCException:
pass
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1051):
raise AssertionError("Accepted a version=3 block after 950 version=4 blocks")
# Mine 1 new-version blocks
self.nodes[2].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1052):
raise AssertionError("Failed to mine a version=4 block")
if __name__ == '__main__':
BIP65Test().main()
| mit |
cloudcopy/seahub | thirdpart/rest_framework/filters.py | 5 | 1757 | from rest_framework.compat import django_filters
FilterSet = django_filters and django_filters.FilterSet or None
class BaseFilterBackend(object):
"""
A base class from which all filter backend classes should inherit.
"""
def filter_queryset(self, request, queryset, view):
"""
Return a filtered queryset.
"""
raise NotImplementedError(".filter_queryset() must be overridden.")
class DjangoFilterBackend(BaseFilterBackend):
"""
A filter backend that uses django-filter.
"""
default_filter_set = FilterSet
def __init__(self):
assert django_filters, 'Using DjangoFilterBackend, but django-filter is not installed'
def get_filter_class(self, view):
"""
Return the django-filters `FilterSet` used to filter the queryset.
"""
filter_class = getattr(view, 'filter_class', None)
filter_fields = getattr(view, 'filter_fields', None)
view_model = getattr(view, 'model', None)
if filter_class:
filter_model = filter_class.Meta.model
assert issubclass(filter_model, view_model), \
'FilterSet model %s does not match view model %s' % \
(filter_model, view_model)
return filter_class
if filter_fields:
class AutoFilterSet(self.default_filter_set):
class Meta:
model = view_model
fields = filter_fields
return AutoFilterSet
return None
def filter_queryset(self, request, queryset, view):
filter_class = self.get_filter_class(view)
if filter_class:
return filter_class(request.GET, queryset=queryset)
return queryset
| apache-2.0 |
vincepandolfo/django | tests/save_delete_hooks/models.py | 409 | 1030 | """
Adding hooks before/after saving and deleting
To execute arbitrary code around ``save()`` and ``delete()``, just subclass
the methods.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Person(models.Model):
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
def __init__(self, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
self.data = []
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
def save(self, *args, **kwargs):
self.data.append("Before save")
# Call the "real" save() method
super(Person, self).save(*args, **kwargs)
self.data.append("After save")
def delete(self):
self.data.append("Before deletion")
# Call the "real" delete() method
super(Person, self).delete()
self.data.append("After deletion")
| bsd-3-clause |
gopal1cloud/neutron | neutron/tests/unit/api/rpc/agentnotifiers/test_dhcp_rpc_agent_api.py | 9 | 7125 | # Copyright (c) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import mock
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.common import utils
from neutron.db import agents_db
from neutron.openstack.common import timeutils
from neutron.tests import base
class TestDhcpAgentNotifyAPI(base.BaseTestCase):
def setUp(self):
super(TestDhcpAgentNotifyAPI, self).setUp()
self.notifier = (
dhcp_rpc_agent_api.DhcpAgentNotifyAPI(plugin=mock.Mock()))
mock_util_p = mock.patch.object(utils, 'is_extension_supported')
mock_log_p = mock.patch.object(dhcp_rpc_agent_api, 'LOG')
mock_fanout_p = mock.patch.object(self.notifier, '_fanout_message')
mock_cast_p = mock.patch.object(self.notifier, '_cast_message')
self.mock_util = mock_util_p.start()
self.mock_log = mock_log_p.start()
self.mock_fanout = mock_fanout_p.start()
self.mock_cast = mock_cast_p.start()
def _test__schedule_network(self, network,
new_agents=None, existing_agents=None,
expected_casts=0, expected_warnings=0):
self.notifier.plugin.schedule_network.return_value = new_agents
agents = self.notifier._schedule_network(
mock.ANY, network, existing_agents)
if new_agents is None:
new_agents = []
self.assertEqual(new_agents + existing_agents, agents)
self.assertEqual(expected_casts, self.mock_cast.call_count)
self.assertEqual(expected_warnings, self.mock_log.warn.call_count)
def test__schedule_network(self):
agent = agents_db.Agent()
agent.admin_state_up = True
agent.heartbeat_timestamp = timeutils.utcnow()
network = {'id': 'foo_net_id'}
self._test__schedule_network(network,
new_agents=[agent], existing_agents=[],
expected_casts=1, expected_warnings=0)
def test__schedule_network_no_existing_agents(self):
agent = agents_db.Agent()
agent.admin_state_up = True
agent.heartbeat_timestamp = timeutils.utcnow()
network = {'id': 'foo_net_id'}
self._test__schedule_network(network,
new_agents=None, existing_agents=[agent],
expected_casts=0, expected_warnings=0)
def test__schedule_network_no_new_agents(self):
network = {'id': 'foo_net_id'}
self._test__schedule_network(network,
new_agents=None, existing_agents=[],
expected_casts=0, expected_warnings=1)
def _test__get_enabled_agents(self, network,
agents=None, port_count=0,
expected_warnings=0, expected_errors=0):
self.notifier.plugin.get_ports_count.return_value = port_count
enabled_agents = self.notifier._get_enabled_agents(
mock.ANY, network, agents, mock.ANY, mock.ANY)
self.assertEqual(agents, enabled_agents)
self.assertEqual(expected_warnings, self.mock_log.warn.call_count)
self.assertEqual(expected_errors, self.mock_log.error.call_count)
def test__get_enabled_agents(self):
agent = agents_db.Agent()
agent.admin_state_up = True
agent.heartbeat_timestamp = timeutils.utcnow()
network = {'id': 'foo_network_id'}
self._test__get_enabled_agents(network, agents=[agent])
def test__get_enabled_agents_with_inactive_ones(self):
agent1 = agents_db.Agent()
agent1.admin_state_up = True
agent1.heartbeat_timestamp = timeutils.utcnow()
agent2 = agents_db.Agent()
agent2.admin_state_up = True
# This is effectively an inactive agent
agent2.heartbeat_timestamp = datetime.datetime(2000, 1, 1, 0, 0)
network = {'id': 'foo_network_id'}
self._test__get_enabled_agents(network,
agents=[agent1, agent2],
expected_warnings=1, expected_errors=0)
def test__get_enabled_agents_with_notification_required(self):
network = {'id': 'foo_network_id', 'subnets': ['foo_subnet_id']}
self._test__get_enabled_agents(network, [], port_count=20,
expected_warnings=0, expected_errors=1)
def test__notify_agents_fanout_required(self):
self.notifier._notify_agents(mock.ANY,
'network_delete_end',
mock.ANY, 'foo_network_id')
self.assertEqual(1, self.mock_fanout.call_count)
def _test__notify_agents(self, method,
expected_scheduling=0, expected_casts=0):
with mock.patch.object(self.notifier, '_schedule_network') as f:
with mock.patch.object(self.notifier, '_get_enabled_agents') as g:
agent = agents_db.Agent()
agent.admin_state_up = True
agent.heartbeat_timestamp = timeutils.utcnow()
g.return_value = [agent]
self.notifier._notify_agents(mock.Mock(), method,
mock.ANY, 'foo_network_id')
self.assertEqual(expected_scheduling, f.call_count)
self.assertEqual(expected_casts, self.mock_cast.call_count)
def test__notify_agents_cast_required_with_scheduling(self):
self._test__notify_agents('port_create_end',
expected_scheduling=1, expected_casts=1)
def test__notify_agents_cast_required_wo_scheduling_on_port_update(self):
self._test__notify_agents('port_update_end',
expected_scheduling=0, expected_casts=1)
def test__notify_agents_cast_required_wo_scheduling_on_subnet_create(self):
self._test__notify_agents('subnet_create_end',
expected_scheduling=0, expected_casts=1)
def test__notify_agents_no_action(self):
self._test__notify_agents('network_create_end',
expected_scheduling=0, expected_casts=0)
def test__fanout_message(self):
self.notifier._fanout_message(mock.ANY, mock.ANY, mock.ANY)
self.assertEqual(1, self.mock_fanout.call_count)
def test__cast_message(self):
self.notifier._cast_message(mock.ANY, mock.ANY, mock.ANY)
self.assertEqual(1, self.mock_cast.call_count)
| apache-2.0 |
40223229/2015cd_midterm | gear.py | 204 | 19237 | #@+leo-ver=5-thin
#@+node:office.20150407074720.1: * @file gear.py
#@@language python
#@@tabwidth -4
#@+<<declarations>>
#@+node:office.20150407074720.2: ** <<declarations>> (application)
#@@language python
import cherrypy
import os
import sys
# 這個程式要計算正齒輪的齒面寬, 資料庫連結希望使用 pybean 與 SQLite
# 導入 pybean 模組與所要使用的 Store 及 SQLiteWriter 方法
from pybean import Store, SQLiteWriter
import math
# 確定程式檔案所在目錄, 在 Windows 有最後的反斜線
_curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
# 將所在目錄設為系統搜尋目錄
sys.path.append(_curdir)
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# while program is executed in OpenShift
download_root_dir = os.environ['OPENSHIFT_DATA_DIR']
data_dir = os.environ['OPENSHIFT_DATA_DIR']
else:
# while program is executed in localhost
download_root_dir = _curdir + "/local_data/"
data_dir = _curdir + "/local_data/"
# 這是 Gear 設計資料表的定義
'''
lewis.db 中有兩個資料表, steel 與 lewis
CREATE TABLE steel (
serialno INTEGER,
unsno TEXT,
aisino TEXT,
treatment TEXT,
yield_str INTEGER,
tensile_str INTEGER,
stretch_ratio INTEGER,
sectional_shr INTEGER,
brinell INTEGER
);
CREATE TABLE lewis (
serialno INTEGER PRIMARY KEY
NOT NULL,
gearno INTEGER,
type1 NUMERIC,
type4 NUMERIC,
type3 NUMERIC,
type2 NUMERIC
);
'''
#@-<<declarations>>
#@+others
#@+node:office.20150407074720.3: ** class Gear
class Gear(object):
#@+others
#@+node:office.20150407074720.4: *3* __init__
def __init__(self):
# hope to create downloads and images directories
if not os.path.isdir(download_root_dir+"downloads"):
try:
os.makedirs(download_root_dir+"downloads")
except:
print("mkdir error")
if not os.path.isdir(download_root_dir+"images"):
try:
os.makedirs(download_root_dir+"images")
except:
print("mkdir error")
if not os.path.isdir(download_root_dir+"tmp"):
try:
os.makedirs(download_root_dir+"tmp")
except:
print("mkdir error")
#@+node:office.20150407074720.5: *3* default
@cherrypy.expose
def default(self, attr='default', *args, **kwargs):
raise cherrypy.HTTPRedirect("/")
#@+node:office.20150407074720.6: *3* index
# 各組利用 index 引導隨後的程式執行
@cherrypy.expose
def index(self, *args, **kwargs):
# 進行資料庫檔案連結, 並且取出所有資料
try:
# 利用 Store 建立資料庫檔案對應物件, 並且設定 frozen=True 表示不要開放動態資料表的建立
# 因為程式以 application 所在目錄執行, 因此利用相對目錄連結 lewis.db 資料庫檔案
SQLite連結 = Store(SQLiteWriter(_curdir+"/lewis.db", frozen=True))
#material = SQLite連結.find_one("steel","serialno = ?",[序號])
# str(SQLite連結.count("steel")) 將傳回 70, 表示資料庫中有 70 筆資料
material = SQLite連結.find("steel")
# 所傳回的 material 為 iterator
'''
outstring = ""
for material_item in material:
outstring += str(material_item.serialno) + ":" + material_item.unsno + "_" + material_item.treatment + "<br />"
return outstring
'''
except:
return "抱歉! 資料庫無法連線<br />"
outstring = '''
<form id=entry method=post action="gear_width">
請填妥下列參數,以完成適當的齒尺寸大小設計。<br />
馬達馬力:<input type=text name=horsepower id=horsepower value=100 size=10>horse power<br />
馬達轉速:<input type=text name=rpm id=rpm value=1120 size=10>rpm<br />
齒輪減速比: <input type=text name=ratio id=ratio value=4 size=10><br />
齒形:<select name=toothtype id=toothtype>
<option value=type1>壓力角20度,a=0.8,b=1.0
<option value=type2>壓力角20度,a=1.0,b=1.25
<option value=type3>壓力角25度,a=1.0,b=1.25
<option value=type4>壓力角25度,a=1.0,b=1.35
</select><br />
安全係數:<input type=text name=safetyfactor id=safetyfactor value=3 size=10><br />
齒輪材質:<select name=material_serialno id=material_serialno>
'''
for material_item in material:
outstring += "<option value=" + str(material_item.serialno) + ">UNS - " + \
material_item.unsno + " - " + material_item.treatment
outstring += "</select><br />"
outstring += "小齒輪齒數:<input type=text name=npinion id=npinion value=18 size=10><br />"
outstring += "<input type=submit id=submit value=進行運算>"
outstring += "</form>"
return outstring
#@+node:office.20150407074720.7: *3* interpolation
@cherrypy.expose
def interpolation(self, small_gear_no=18, gear_type=1):
SQLite連結 = Store(SQLiteWriter(_curdir+"/lewis.db", frozen=True))
# 使用內插法求值
# 找出比目標齒數大的其中的最小的,就是最鄰近的大值
lewis_factor = SQLite連結.find_one("lewis","gearno > ?",[small_gear_no])
if(gear_type == 1):
larger_formfactor = lewis_factor.type1
elif(gear_type == 2):
larger_formfactor = lewis_factor.type2
elif(gear_type == 3):
larger_formfactor = lewis_factor.type3
else:
larger_formfactor = lewis_factor.type4
larger_toothnumber = lewis_factor.gearno
# 找出比目標齒數小的其中的最大的,就是最鄰近的小值
lewis_factor = SQLite連結.find_one("lewis","gearno < ? order by gearno DESC",[small_gear_no])
if(gear_type == 1):
smaller_formfactor = lewis_factor.type1
elif(gear_type == 2):
smaller_formfactor = lewis_factor.type2
elif(gear_type == 3):
smaller_formfactor = lewis_factor.type3
else:
smaller_formfactor = lewis_factor.type4
smaller_toothnumber = lewis_factor.gearno
calculated_factor = larger_formfactor + (small_gear_no - larger_toothnumber) * (larger_formfactor - smaller_formfactor) / (larger_toothnumber - smaller_toothnumber)
# 只傳回小數點後五位數
return str(round(calculated_factor, 5))
#@+node:office.20150407074720.8: *3* gear_width
# 改寫為齒面寬的設計函式
@cherrypy.expose
def gear_width(self, horsepower=100, rpm=1000, ratio=4, toothtype=1, safetyfactor=2, material_serialno=1, npinion=18):
SQLite連結 = Store(SQLiteWriter(_curdir+"/lewis.db", frozen=True))
outstring = ""
# 根據所選用的齒形決定壓力角
if(toothtype == 1 or toothtype == 2):
壓力角 = 20
else:
壓力角 = 25
# 根據壓力角決定最小齒數
if(壓力角== 20):
最小齒數 = 18
else:
最小齒數 = 12
# 直接設最小齒數
if int(npinion) <= 最小齒數:
npinion = 最小齒數
# 大於400的齒數則視為齒條(Rack)
if int(npinion) >= 400:
npinion = 400
# 根據所選用的材料查詢強度值
# 由 material之序號查 steel 表以得材料之降伏強度S單位為 kpsi 因此查得的值要成乘上1000
# 利用 Store 建立資料庫檔案對應物件, 並且設定 frozen=True 表示不要開放動態資料表的建立
#SQLite連結 = Store(SQLiteWriter("lewis.db", frozen=True))
# 指定 steel 資料表
steel = SQLite連結.new("steel")
# 資料查詢
#material = SQLite連結.find_one("steel","unsno=? and treatment=?",[unsno, treatment])
material = SQLite連結.find_one("steel","serialno=?",[material_serialno])
# 列出 steel 資料表中的資料筆數
#print(SQLite連結.count("steel"))
#print (material.yield_str)
strengthstress = material.yield_str*1000
# 由小齒輪的齒數與齒形類別,查詢lewis form factor
# 先查驗是否有直接對應值
on_table = SQLite連結.count("lewis","gearno=?",[npinion])
if on_table == 1:
# 直接進入設計運算
#print("直接運算")
#print(on_table)
lewis_factor = SQLite連結.find_one("lewis","gearno=?",[npinion])
#print(lewis_factor.type1)
# 根據齒形查出 formfactor 值
if(toothtype == 1):
formfactor = lewis_factor.type1
elif(toothtype == 2):
formfactor = lewis_factor.type2
elif(toothtype == 3):
formfactor = lewis_factor.type3
else:
formfactor = lewis_factor.type4
else:
# 沒有直接對應值, 必須進行查表內插運算後, 再執行設計運算
#print("必須內插")
#print(interpolation(npinion, gear_type))
formfactor = self.interpolation(npinion, toothtype)
# 開始進行設計運算
ngear = int(npinion) * int(ratio)
# 重要的最佳化設計---儘量用整數的diametralpitch
# 先嘗試用整數算若 diametralpitch 找到100 仍無所獲則改用 0.25 作為增量再不行則宣告 fail
counter = 0
i = 0.1
facewidth = 0
circularpitch = 0
while (facewidth <= 3 * circularpitch or facewidth >= 5 * circularpitch):
diametralpitch = i
#circularpitch = 3.14159/diametralpitch
circularpitch = math.pi/diametralpitch
pitchdiameter = int(npinion)/diametralpitch
#pitchlinevelocity = 3.14159*pitchdiameter*rpm/12
pitchlinevelocity = math.pi*pitchdiameter * float(rpm)/12
transmittedload = 33000*float(horsepower)/pitchlinevelocity
velocityfactor = 1200/(1200 + pitchlinevelocity)
# formfactor is Lewis form factor
# formfactor need to get from table 13-3 and determined ty teeth number and type of tooth
# formfactor = 0.293
# 90 is the value get from table corresponding to material type
facewidth = transmittedload*diametralpitch*float(safetyfactor)/velocityfactor/formfactor/strengthstress
if(counter>5000):
outstring += "超過5000次的設計運算,仍無法找到答案!<br />"
outstring += "可能所選用的傳遞功率過大,或無足夠強度的材料可以使用!<br />"
# 離開while迴圈
break
i += 0.1
counter += 1
facewidth = round(facewidth, 4)
if(counter<5000):
# 先載入 cube 程式測試
#outstring = self.cube_weblink()
# 再載入 gear 程式測試
outstring = self.gear_weblink()
outstring += "進行"+str(counter)+"次重複運算後,得到合用的facewidth值為:"+str(facewidth)
return outstring
#@+node:office.20150407074720.9: *3* cube_weblink
@cherrypy.expose
def cube_weblink(self):
outstring = '''<script type="text/javascript" src="/static/weblink/pfcUtils.js"></script>
<script type="text/javascript" src="/static/weblink/wl_header.js">
document.writeln ("Error loading Pro/Web.Link header!");
</script>
<script type="text/javascript" language="JavaScript">
// 若第三輸入為 false, 表示僅載入 session, 但是不顯示
// ret 為 model open return
var ret = document.pwl.pwlMdlOpen("cube.prt", "v:/tmp", false);
if (!ret.Status) {
alert("pwlMdlOpen failed (" + ret.ErrorCode + ")");
}
//將 ProE 執行階段設為變數 session
var session = pfcGetProESession();
// 在視窗中打開零件檔案, 並且顯示出來
var window = session.OpenFile(pfcCreate("pfcModelDescriptor").CreateFromFileName("cube.prt"));
var solid = session.GetModel("cube.prt",pfcCreate("pfcModelType").MDL_PART);
var length,width,myf,myn,i,j,volume,count,d1Value,d2Value;
// 將模型檔中的 length 變數設為 javascript 中的 length 變數
length = solid.GetParam("a1");
// 將模型檔中的 width 變數設為 javascript 中的 width 變數
width = solid.GetParam("a2");
//改變零件尺寸
//myf=20;
//myn=20;
volume=0;
count=0;
try
{
// 以下採用 URL 輸入對應變數
//createParametersFromArguments ();
// 以下則直接利用 javascript 程式改變零件參數
for(i=0;i<=5;i++)
{
//for(j=0;j<=2;j++)
//{
myf=20.0;
myn=10.0+i*0.5;
// 設定變數值, 利用 ModelItem 中的 CreateDoubleParamValue 轉換成 Pro/Web.Link 所需要的浮點數值
d1Value = pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myf);
d2Value = pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myn);
// 將處理好的變數值, 指定給對應的零件變數
length.Value = d1Value;
width.Value = d2Value;
//零件尺寸重新設定後, 呼叫 Regenerate 更新模型
solid.Regenerate(void null);
//利用 GetMassProperty 取得模型的質量相關物件
properties = solid.GetMassProperty(void null);
//volume = volume + properties.Volume;
volume = properties.Volume;
count = count + 1;
alert("執行第"+count+"次,零件總體積:"+volume);
// 將零件存為新檔案
var newfile = document.pwl.pwlMdlSaveAs("cube.prt", "v:/tmp", "cube"+count+".prt");
if (!newfile.Status) {
alert("pwlMdlSaveAs failed (" + newfile.ErrorCode + ")");
}
//} // 內圈 for 迴圈
} //外圈 for 迴圈
//alert("共執行:"+count+"次,零件總體積:"+volume);
//alert("零件體積:"+properties.Volume);
//alert("零件體積取整數:"+Math.round(properties.Volume));
}
catch(err)
{
alert ("Exception occurred: "+pfcGetExceptionType (err));
}
</script>
'''
return outstring
#@+node:office.20150407074720.10: *3* gear_weblink
@cherrypy.expose
def gear_weblink(self, facewidth=5, n=18):
outstring = '''<script type="text/javascript" src="/static/weblink/pfcUtils.js"></script>
<script type="text/javascript" src="/static/weblink/wl_header.js">// <![CDATA[
document.writeln ("Error loading Pro/Web.Link header!");
// ]]></script>
<script type="text/javascript" language="JavaScript">// <![CDATA[
if (!pfcIsWindows()) netscape.security.PrivilegeManager.enablePrivilege("UniversalXPConnect");
// 若第三輸入為 false, 表示僅載入 session, 但是不顯示
// ret 為 model open return
var ret = document.pwl.pwlMdlOpen("gear.prt", "v:/", false);
if (!ret.Status) {
alert("pwlMdlOpen failed (" + ret.ErrorCode + ")");
}
//將 ProE 執行階段設為變數 session
var session = pfcGetProESession();
// 在視窗中打開零件檔案, 並且顯示出來
var window = session.OpenFile(pfcCreate("pfcModelDescriptor").CreateFromFileName("gear.prt"));
var solid = session.GetModel("gear.prt",pfcCreate("pfcModelType").MDL_PART);
var length,width,myf,myn,i,j,volume,count,d1Value,d2Value;
// 將模型檔中的 length 變數設為 javascript 中的 length 變數
length = solid.GetParam("n");
// 將模型檔中的 width 變數設為 javascript 中的 width 變數
width = solid.GetParam("face_width");
//改變零件尺寸
//myf=20;
//myn=20;
volume=0;
count=0;
try
{
// 以下採用 URL 輸入對應變數
//createParametersFromArguments ();
// 以下則直接利用 javascript 程式改變零件參數
for(i=0;i<=5;i++)
{
//for(j=0;j<=2;j++)
//{
myf=25+i*2;
myn=10.0+i*0.5;
// 設定變數值, 利用 ModelItem 中的 CreateDoubleParamValue 轉換成 Pro/Web.Link 所需要的浮點數值
//d1Value = pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myf);
d1Value = pfcCreate ("MpfcModelItem").CreateIntParamValue(myf);
d2Value = pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myn);
// 將處理好的變數值, 指定給對應的零件變數
length.Value = d1Value;
width.Value = d2Value;
//零件尺寸重新設定後, 呼叫 Regenerate 更新模型
solid.Regenerate(void null);
//利用 GetMassProperty 取得模型的質量相關物件
properties = solid.GetMassProperty(void null);
//volume = volume + properties.Volume;
volume = properties.Volume;
count = count + 1;
alert("執行第"+count+"次,零件總體積:"+volume);
// 將零件存為新檔案
var newfile = document.pwl.pwlMdlSaveAs("gear.prt", "v:/", "mygear_"+count+".prt");
if (!newfile.Status) {
alert("pwlMdlSaveAs failed (" + newfile.ErrorCode + ")");
}
//} // 內圈 for 迴圈
} //外圈 for 迴圈
//alert("共執行:"+count+"次,零件總體積:"+volume);
//alert("零件體積:"+properties.Volume);
//alert("零件體積取整數:"+Math.round(properties.Volume));
}
catch(err)
{
alert ("Exception occurred: "+pfcGetExceptionType (err));
}
// ]]></script>
'''
return outstring
#@-others
#@-others
root = Gear()
# setup static, images and downloads directories
application_conf = {
'/static':{
'tools.staticdir.on': True,
'tools.staticdir.dir': _curdir+"/static"},
'/images':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/images"},
'/downloads':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/downloads"}
}
# if inOpenshift ('OPENSHIFT_REPO_DIR' exists in environment variables) or not inOpenshift
if __name__ == '__main__':
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# operate in OpenShift
application = cherrypy.Application(root, config = application_conf)
else:
# operate in localhost
cherrypy.quickstart(root, config = application_conf)
#@-leo
| gpl-2.0 |
jnewland/home-assistant | tests/components/zwave/test_sensor.py | 11 | 5058 | """Test Z-Wave sensor."""
from homeassistant.components.zwave import const, sensor
import homeassistant.const
from tests.mock.zwave import (
MockNode, MockValue, MockEntityValues, value_changed)
def test_get_device_detects_none(mock_openzwave):
"""Test get_device returns None."""
node = MockNode()
value = MockValue(data=0, node=node)
values = MockEntityValues(primary=value)
device = sensor.get_device(node=node, values=values, node_config={})
assert device is None
def test_get_device_detects_alarmsensor(mock_openzwave):
"""Test get_device returns a Z-Wave alarmsensor."""
node = MockNode(command_classes=[const.COMMAND_CLASS_ALARM,
const.COMMAND_CLASS_SENSOR_ALARM])
value = MockValue(data=0, node=node)
values = MockEntityValues(primary=value)
device = sensor.get_device(node=node, values=values, node_config={})
assert isinstance(device, sensor.ZWaveAlarmSensor)
def test_get_device_detects_multilevelsensor(mock_openzwave):
"""Test get_device returns a Z-Wave multilevel sensor."""
node = MockNode(command_classes=[const.COMMAND_CLASS_SENSOR_MULTILEVEL,
const.COMMAND_CLASS_METER])
value = MockValue(data=0, node=node)
values = MockEntityValues(primary=value)
device = sensor.get_device(node=node, values=values, node_config={})
assert isinstance(device, sensor.ZWaveMultilevelSensor)
assert device.force_update
def test_get_device_detects_multilevel_meter(mock_openzwave):
"""Test get_device returns a Z-Wave multilevel sensor."""
node = MockNode(command_classes=[const.COMMAND_CLASS_METER])
value = MockValue(data=0, node=node, type=const.TYPE_DECIMAL)
values = MockEntityValues(primary=value)
device = sensor.get_device(node=node, values=values, node_config={})
assert isinstance(device, sensor.ZWaveMultilevelSensor)
def test_multilevelsensor_value_changed_temp_fahrenheit(mock_openzwave):
"""Test value changed for Z-Wave multilevel sensor for temperature."""
node = MockNode(command_classes=[const.COMMAND_CLASS_SENSOR_MULTILEVEL,
const.COMMAND_CLASS_METER])
value = MockValue(data=190.95555, units='F', node=node)
values = MockEntityValues(primary=value)
device = sensor.get_device(node=node, values=values, node_config={})
assert device.state == 191.0
assert device.unit_of_measurement == homeassistant.const.TEMP_FAHRENHEIT
value.data = 197.95555
value_changed(value)
assert device.state == 198.0
def test_multilevelsensor_value_changed_temp_celsius(mock_openzwave):
"""Test value changed for Z-Wave multilevel sensor for temperature."""
node = MockNode(command_classes=[const.COMMAND_CLASS_SENSOR_MULTILEVEL,
const.COMMAND_CLASS_METER])
value = MockValue(data=38.85555, units='C', node=node)
values = MockEntityValues(primary=value)
device = sensor.get_device(node=node, values=values, node_config={})
assert device.state == 38.9
assert device.unit_of_measurement == homeassistant.const.TEMP_CELSIUS
value.data = 37.95555
value_changed(value)
assert device.state == 38.0
def test_multilevelsensor_value_changed_other_units(mock_openzwave):
"""Test value changed for Z-Wave multilevel sensor for other units."""
node = MockNode(command_classes=[const.COMMAND_CLASS_SENSOR_MULTILEVEL,
const.COMMAND_CLASS_METER])
value = MockValue(data=190.95555, units='kWh', node=node)
values = MockEntityValues(primary=value)
device = sensor.get_device(node=node, values=values, node_config={})
assert device.state == 190.96
assert device.unit_of_measurement == 'kWh'
value.data = 197.95555
value_changed(value)
assert device.state == 197.96
def test_multilevelsensor_value_changed_integer(mock_openzwave):
"""Test value changed for Z-Wave multilevel sensor for other units."""
node = MockNode(command_classes=[const.COMMAND_CLASS_SENSOR_MULTILEVEL,
const.COMMAND_CLASS_METER])
value = MockValue(data=5, units='counts', node=node)
values = MockEntityValues(primary=value)
device = sensor.get_device(node=node, values=values, node_config={})
assert device.state == 5
assert device.unit_of_measurement == 'counts'
value.data = 6
value_changed(value)
assert device.state == 6
def test_alarm_sensor_value_changed(mock_openzwave):
"""Test value changed for Z-Wave sensor."""
node = MockNode(command_classes=[const.COMMAND_CLASS_ALARM,
const.COMMAND_CLASS_SENSOR_ALARM])
value = MockValue(data=12.34, node=node, units='%')
values = MockEntityValues(primary=value)
device = sensor.get_device(node=node, values=values, node_config={})
assert device.state == 12.34
assert device.unit_of_measurement == '%'
value.data = 45.67
value_changed(value)
assert device.state == 45.67
| apache-2.0 |
2013Commons/HUE-SHARK | desktop/core/ext-py/south/south/management/commands/datamigration.py | 6 | 4513 | """
Data migration creation command
"""
import sys
import os
import re
from optparse import make_option
try:
set
except NameError:
from sets import Set as set
from django.core.management.base import BaseCommand
from django.core.management.color import no_style
from django.db import models
from django.conf import settings
from south.migration import Migrations
from south.exceptions import NoMigrations
from south.creator import freezer
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--freeze', action='append', dest='freeze_list', type='string',
help='Freeze the specified app(s). Provide an app name with each; use the option multiple times for multiple apps'),
make_option('--stdout', action='store_true', dest='stdout', default=False,
help='Print the migration to stdout instead of writing it to a file.'),
)
help = "Creates a new template data migration for the given app"
usage_str = "Usage: ./manage.py datamigration appname migrationname [--stdout] [--freeze appname]"
def handle(self, app=None, name="", freeze_list=None, stdout=False, verbosity=1, **options):
# Any supposed lists that are None become empty lists
freeze_list = freeze_list or []
# --stdout means name = -
if stdout:
name = "-"
# Only allow valid names
if re.search('[^_\w]', name) and name != "-":
self.error("Migration names should contain only alphanumeric characters and underscores.")
# if not name, there's an error
if not name:
self.error("You must provide a name for this migration\n" + self.usage_str)
if not app:
self.error("You must provide an app to create a migration for.\n" + self.usage_str)
# Get the Migrations for this app (creating the migrations dir if needed)
try:
migrations = Migrations(app)
except NoMigrations:
Migrations.create_migrations_directory(app, verbose=verbosity > 0)
migrations = Migrations(app)
# See what filename is next in line. We assume they use numbers.
new_filename = migrations.next_filename(name)
# Work out which apps to freeze
apps_to_freeze = self.calc_frozen_apps(migrations, freeze_list)
# So, what's in this file, then?
file_contents = MIGRATION_TEMPLATE % {
"frozen_models": freezer.freeze_apps_to_string(apps_to_freeze),
"complete_apps": apps_to_freeze and "complete_apps = [%s]" % (", ".join(map(repr, apps_to_freeze))) or ""
}
# - is a special name which means 'print to stdout'
if name == "-":
print file_contents
# Write the migration file if the name isn't -
else:
fp = open(os.path.join(migrations.migrations_dir(), new_filename), "w")
fp.write(file_contents)
fp.close()
print >>sys.stderr, "Created %s." % new_filename
def calc_frozen_apps(self, migrations, freeze_list):
"""
Works out, from the current app, settings, and the command line options,
which apps should be frozen.
"""
apps_to_freeze = []
for to_freeze in freeze_list:
if "." in to_freeze:
self.error("You cannot freeze %r; you must provide an app label, like 'auth' or 'books'." % to_freeze)
# Make sure it's a real app
if not models.get_app(to_freeze):
self.error("You cannot freeze %r; it's not an installed app." % to_freeze)
# OK, it's fine
apps_to_freeze.append(to_freeze)
if getattr(settings, 'SOUTH_AUTO_FREEZE_APP', True):
apps_to_freeze.append(migrations.app_label())
return apps_to_freeze
def error(self, message, code=1):
"""
Prints the error, and exits with the given code.
"""
print >>sys.stderr, message
sys.exit(code)
MIGRATION_TEMPLATE = """# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
def backwards(self, orm):
"Write your backwards methods here."
models = %(frozen_models)s
%(complete_apps)s
""" | apache-2.0 |
Cadair/ginga | ginga/web/bokehw/CanvasRenderBokeh.py | 3 | 5041 | #
# CanvasRenderBokeh.py -- for rendering into a Bokeh widget
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
import numpy
from . import BokehHelp
from ginga.canvas.mixins import *
# force registration of all canvas types
import ginga.canvas.types.all
# Bokeh imports
from bokeh.plotting import figure
class RenderContext(object):
def __init__(self, viewer):
self.viewer = viewer
self.shape = None
# TODO: encapsulate this drawable
self.cr = BokehHelp.BokehContext(self.viewer.figure)
self.pen = None
self.brush = None
self.font = None
def set_line_from_shape(self, shape):
# TODO: support line width and style
alpha = getattr(shape, 'alpha', 1.0)
self.pen = self.cr.get_pen(shape.color, alpha=alpha)
def set_fill_from_shape(self, shape):
fill = getattr(shape, 'fill', False)
if fill:
if hasattr(shape, 'fillcolor') and shape.fillcolor:
color = shape.fillcolor
else:
color = shape.color
alpha = getattr(shape, 'alpha', 1.0)
alpha = getattr(shape, 'fillalpha', alpha)
self.brush = self.cr.get_brush(color, alpha=alpha)
else:
self.brush = None
def set_font_from_shape(self, shape):
if hasattr(shape, 'font'):
if hasattr(shape, 'fontsize') and shape.fontsize is not None:
fontsize = shape.fontsize
else:
fontsize = shape.scale_font(self.viewer)
alpha = getattr(shape, 'alpha', 1.0)
self.font = self.cr.get_font(shape.font, fontsize, shape.color,
alpha=alpha)
else:
self.font = None
def initialize_from_shape(self, shape, line=True, fill=True, font=True):
if line:
self.set_line_from_shape(shape)
if fill:
self.set_fill_from_shape(shape)
if font:
self.set_font_from_shape(shape)
def set_line(self, color, alpha=1.0, linewidth=1, style='solid'):
# TODO: support style
self.pen = self.cr.get_pen(color, alpha=alpha, linewidth=linewidth,
linestyle=style)
def set_fill(self, color, alpha=1.0):
if color is None:
self.brush = None
else:
self.brush = self.cr.get_brush(color, alpha=alpha)
def set_font(self, fontname, fontsize):
self.font = self.cr.get_font(fontname, fontsize, 'black',
alpha=1.0)
def text_extents(self, text):
return self.cr.text_extents(text, self.font)
##### DRAWING OPERATIONS #####
def draw_text(self, cx, cy, text, rot_deg=0.0):
self.cr.init(angle=[numpy.radians(rot_deg)])
self.cr.update_font(self.pen, self.font)
self.cr.plot.text(x=[cx], y=[cy], text=[text], **self.cr.kwdargs)
def draw_polygon(self, cpoints):
self.cr.init()
self.cr.update_patch(self.pen, self.brush)
xy = numpy.array(cpoints)
self.cr.plot.patches(xs=[xy.T[0]], ys=[xy.T[1]], **self.cr.kwdargs)
def draw_circle(self, cx, cy, cradius):
self.cr.init()
self.cr.update_patch(self.pen, self.brush)
self.cr.plot.circle(x=[cx], y=[cy], radius=[cradius],
**self.cr.kwdargs)
def draw_bezier_curve(self, verts):
self.cr.init()
self.cr.update_line(self.pen)
cx, cy = verts.T[0], verts.T[1]
self.cr.plot.bezier(x0=[cx[0]], y0=[cy[0]],
x1=[cx[3]], y1=[cy[3]],
cx0=[cx[1]], cy0=[cy[1]],
cx1=[cx[2]], cy1=[cy[2]],
**self.cr.kwdargs)
def draw_ellipse(self, cx, cy, cxradius, cyradius, theta):
self.cr.init()
self.cr.update_patch(self.pen, self.brush)
self.cr.plot.oval(x=[cx], y=[cy],
width=[cxradius*2.0], height=[cyradius*2.0],
angle=[numpy.radians(theta)], **self.cr.kwdargs)
def draw_line(self, cx1, cy1, cx2, cy2):
self.cr.init()
self.cr.update_line(self.pen)
self.cr.plot.line(x=[cx1, cx2], y=[cy1, cy2], **self.cr.kwdargs)
def draw_path(self, cpoints):
self.cr.init()
self.cr.update_line(self.pen)
xy = numpy.array(cpoints)
self.cr.plot.line(x=xy.T[0], y=xy.T[1], **self.cr.kwdargs)
class CanvasRenderer(object):
def __init__(self, viewer):
self.viewer = viewer
def setup_cr(self, shape):
cr = RenderContext(self.viewer)
cr.initialize_from_shape(shape)
return cr
def get_dimensions(self, shape):
cr = self.setup_cr(shape)
cr.set_font_from_shape(shape)
return cr.text_extents(shape.text)
#END
| bsd-3-clause |
mspublic/OpenLTE | LTE_fdd_dl_file_scan/src/__init__.py | 8 | 1748 | #
# Copyright 2008,2009 Free Software Foundation, Inc.
#
# This application is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# The presence of this file turns this directory into a Python package
'''
This is the GNU Radio LTE_fdd_dl_fs module.
'''
# ----------------------------------------------------------------
# Temporary workaround for ticket:181 (swig+python problem)
import sys
_RTLD_GLOBAL = 0
try:
from dl import RTLD_GLOBAL as _RTLD_GLOBAL
except ImportError:
try:
from DLFCN import RTLD_GLOBAL as _RTLD_GLOBAL
except ImportError:
pass
if _RTLD_GLOBAL != 0:
_dlopenflags = sys.getdlopenflags()
sys.setdlopenflags(_dlopenflags|_RTLD_GLOBAL)
# ----------------------------------------------------------------
# import swig generated symbols into the LTE_fdd_dl_fs namespace
from LTE_fdd_dl_fs import *
# import any pure python here
#
# ----------------------------------------------------------------
# Tail of workaround
if _RTLD_GLOBAL != 0:
sys.setdlopenflags(_dlopenflags) # Restore original flags
# ----------------------------------------------------------------
| agpl-3.0 |
sburnett/seattle | nodemanager/timeout_xmlrpclib.py | 3 | 1051 | # This code is almost verbatim from:
# http://blog.bjola.ca/2007/08/using-timeout-with-xmlrpclib.html
try:
import xmlrpclib
from xmlrpclib import *
except ImportError:
# Python 3.0 portability fix...
import xmlrpc.client as xmlrpclib
from xmlrpc.client import *
import httplib
def Server(url, *args, **kwargs):
t = TimeoutTransport()
t.timeout = kwargs.get('timeout', 20)
if 'timeout' in kwargs:
del kwargs['timeout']
kwargs['transport'] = t
server = xmlrpclib.Server(url, *args, **kwargs)
return server
ServerProxy = Server
class TimeoutTransport(xmlrpclib.Transport):
def make_connection(self, host):
conn = TimeoutHTTP(host)
conn.set_timeout(self.timeout)
return conn
class TimeoutHTTPConnection(httplib.HTTPConnection):
def connect(self):
httplib.HTTPConnection.connect(self)
self.sock.settimeout(self.timeout)
class TimeoutHTTP(httplib.HTTP):
_connection_class = TimeoutHTTPConnection
def set_timeout(self, timeout):
self._conn.timeout = timeout
| mit |
trafi/gyp | pylib/gyp/input.py | 292 | 114315 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from compiler.ast import Const
from compiler.ast import Dict
from compiler.ast import Discard
from compiler.ast import List
from compiler.ast import Module
from compiler.ast import Node
from compiler.ast import Stmt
import compiler
import gyp.common
import gyp.simple_copy
import multiprocessing
import optparse
import os.path
import re
import shlex
import signal
import subprocess
import sys
import threading
import time
import traceback
from gyp.common import GypError
from gyp.common import OrderedSet
# A list of types that are treated as linkable.
linkable_types = ['executable', 'shared_library', 'loadable_module']
# A list of sections that contain links to other targets.
dependency_sections = ['dependencies', 'export_dependent_settings']
# base_path_sections is a list of sections defined by GYP that contain
# pathnames. The generators can provide more keys, the two lists are merged
# into path_sections, but you should call IsPathSection instead of using either
# list directly.
base_path_sections = [
'destination',
'files',
'include_dirs',
'inputs',
'libraries',
'outputs',
'sources',
]
path_sections = set()
# These per-process dictionaries are used to cache build file data when loading
# in parallel mode.
per_process_data = {}
per_process_aux_data = {}
def IsPathSection(section):
# If section ends in one of the '=+?!' characters, it's applied to a section
# without the trailing characters. '/' is notably absent from this list,
# because there's no way for a regular expression to be treated as a path.
while section[-1:] in '=+?!':
section = section[:-1]
if section in path_sections:
return True
# Sections mathing the regexp '_(dir|file|path)s?$' are also
# considered PathSections. Using manual string matching since that
# is much faster than the regexp and this can be called hundreds of
# thousands of times so micro performance matters.
if "_" in section:
tail = section[-6:]
if tail[-1] == 's':
tail = tail[:-1]
if tail[-5:] in ('_file', '_path'):
return True
return tail[-4:] == '_dir'
return False
# base_non_configuration_keys is a list of key names that belong in the target
# itself and should not be propagated into its configurations. It is merged
# with a list that can come from the generator to
# create non_configuration_keys.
base_non_configuration_keys = [
# Sections that must exist inside targets and not configurations.
'actions',
'configurations',
'copies',
'default_configuration',
'dependencies',
'dependencies_original',
'libraries',
'postbuilds',
'product_dir',
'product_extension',
'product_name',
'product_prefix',
'rules',
'run_as',
'sources',
'standalone_static_library',
'suppress_wildcard',
'target_name',
'toolset',
'toolsets',
'type',
# Sections that can be found inside targets or configurations, but that
# should not be propagated from targets into their configurations.
'variables',
]
non_configuration_keys = []
# Keys that do not belong inside a configuration dictionary.
invalid_configuration_keys = [
'actions',
'all_dependent_settings',
'configurations',
'dependencies',
'direct_dependent_settings',
'libraries',
'link_settings',
'sources',
'standalone_static_library',
'target_name',
'type',
]
# Controls whether or not the generator supports multiple toolsets.
multiple_toolsets = False
# Paths for converting filelist paths to output paths: {
# toplevel,
# qualified_output_dir,
# }
generator_filelist_paths = None
def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
"""Return a list of all build files included into build_file_path.
The returned list will contain build_file_path as well as all other files
that it included, either directly or indirectly. Note that the list may
contain files that were included into a conditional section that evaluated
to false and was not merged into build_file_path's dict.
aux_data is a dict containing a key for each build file or included build
file. Those keys provide access to dicts whose "included" keys contain
lists of all other files included by the build file.
included should be left at its default None value by external callers. It
is used for recursion.
The returned list will not contain any duplicate entries. Each build file
in the list will be relative to the current directory.
"""
if included == None:
included = []
if build_file_path in included:
return included
included.append(build_file_path)
for included_build_file in aux_data[build_file_path].get('included', []):
GetIncludedBuildFiles(included_build_file, aux_data, included)
return included
def CheckedEval(file_contents):
"""Return the eval of a gyp file.
The gyp file is restricted to dictionaries and lists only, and
repeated keys are not allowed.
Note that this is slower than eval() is.
"""
ast = compiler.parse(file_contents)
assert isinstance(ast, Module)
c1 = ast.getChildren()
assert c1[0] is None
assert isinstance(c1[1], Stmt)
c2 = c1[1].getChildren()
assert isinstance(c2[0], Discard)
c3 = c2[0].getChildren()
assert len(c3) == 1
return CheckNode(c3[0], [])
def CheckNode(node, keypath):
if isinstance(node, Dict):
c = node.getChildren()
dict = {}
for n in range(0, len(c), 2):
assert isinstance(c[n], Const)
key = c[n].getChildren()[0]
if key in dict:
raise GypError("Key '" + key + "' repeated at level " +
repr(len(keypath) + 1) + " with key path '" +
'.'.join(keypath) + "'")
kp = list(keypath) # Make a copy of the list for descending this node.
kp.append(key)
dict[key] = CheckNode(c[n + 1], kp)
return dict
elif isinstance(node, List):
c = node.getChildren()
children = []
for index, child in enumerate(c):
kp = list(keypath) # Copy list.
kp.append(repr(index))
children.append(CheckNode(child, kp))
return children
elif isinstance(node, Const):
return node.getChildren()[0]
else:
raise TypeError("Unknown AST node at key path '" + '.'.join(keypath) +
"': " + repr(node))
def LoadOneBuildFile(build_file_path, data, aux_data, includes,
is_target, check):
if build_file_path in data:
return data[build_file_path]
if os.path.exists(build_file_path):
build_file_contents = open(build_file_path).read()
else:
raise GypError("%s not found (cwd: %s)" % (build_file_path, os.getcwd()))
build_file_data = None
try:
if check:
build_file_data = CheckedEval(build_file_contents)
else:
build_file_data = eval(build_file_contents, {'__builtins__': None},
None)
except SyntaxError, e:
e.filename = build_file_path
raise
except Exception, e:
gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path)
raise
if type(build_file_data) is not dict:
raise GypError("%s does not evaluate to a dictionary." % build_file_path)
data[build_file_path] = build_file_data
aux_data[build_file_path] = {}
# Scan for includes and merge them in.
if ('skip_includes' not in build_file_data or
not build_file_data['skip_includes']):
try:
if is_target:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, includes, check)
else:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, None, check)
except Exception, e:
gyp.common.ExceptionAppend(e,
'while reading includes of ' + build_file_path)
raise
return build_file_data
def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data,
includes, check):
includes_list = []
if includes != None:
includes_list.extend(includes)
if 'includes' in subdict:
for include in subdict['includes']:
# "include" is specified relative to subdict_path, so compute the real
# path to include by appending the provided "include" to the directory
# in which subdict_path resides.
relative_include = \
os.path.normpath(os.path.join(os.path.dirname(subdict_path), include))
includes_list.append(relative_include)
# Unhook the includes list, it's no longer needed.
del subdict['includes']
# Merge in the included files.
for include in includes_list:
if not 'included' in aux_data[subdict_path]:
aux_data[subdict_path]['included'] = []
aux_data[subdict_path]['included'].append(include)
gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'", include)
MergeDicts(subdict,
LoadOneBuildFile(include, data, aux_data, None, False, check),
subdict_path, include)
# Recurse into subdictionaries.
for k, v in subdict.iteritems():
if type(v) is dict:
LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data,
None, check)
elif type(v) is list:
LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data,
check)
# This recurses into lists so that it can look for dicts.
def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data, check):
for item in sublist:
if type(item) is dict:
LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data,
None, check)
elif type(item) is list:
LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data, check)
# Processes toolsets in all the targets. This recurses into condition entries
# since they can contain toolsets as well.
def ProcessToolsetsInDict(data):
if 'targets' in data:
target_list = data['targets']
new_target_list = []
for target in target_list:
# If this target already has an explicit 'toolset', and no 'toolsets'
# list, don't modify it further.
if 'toolset' in target and 'toolsets' not in target:
new_target_list.append(target)
continue
if multiple_toolsets:
toolsets = target.get('toolsets', ['target'])
else:
toolsets = ['target']
# Make sure this 'toolsets' definition is only processed once.
if 'toolsets' in target:
del target['toolsets']
if len(toolsets) > 0:
# Optimization: only do copies if more than one toolset is specified.
for build in toolsets[1:]:
new_target = gyp.simple_copy.deepcopy(target)
new_target['toolset'] = build
new_target_list.append(new_target)
target['toolset'] = toolsets[0]
new_target_list.append(target)
data['targets'] = new_target_list
if 'conditions' in data:
for condition in data['conditions']:
if type(condition) is list:
for condition_dict in condition[1:]:
if type(condition_dict) is dict:
ProcessToolsetsInDict(condition_dict)
# TODO(mark): I don't love this name. It just means that it's going to load
# a build file that contains targets and is expected to provide a targets dict
# that contains the targets...
def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes,
depth, check, load_dependencies):
# If depth is set, predefine the DEPTH variable to be a relative path from
# this build file's directory to the directory identified by depth.
if depth:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path))
if d == '':
variables['DEPTH'] = '.'
else:
variables['DEPTH'] = d.replace('\\', '/')
# The 'target_build_files' key is only set when loading target build files in
# the non-parallel code path, where LoadTargetBuildFile is called
# recursively. In the parallel code path, we don't need to check whether the
# |build_file_path| has already been loaded, because the 'scheduled' set in
# ParallelState guarantees that we never load the same |build_file_path|
# twice.
if 'target_build_files' in data:
if build_file_path in data['target_build_files']:
# Already loaded.
return False
data['target_build_files'].add(build_file_path)
gyp.DebugOutput(gyp.DEBUG_INCLUDES,
"Loading Target Build File '%s'", build_file_path)
build_file_data = LoadOneBuildFile(build_file_path, data, aux_data,
includes, True, check)
# Store DEPTH for later use in generators.
build_file_data['_DEPTH'] = depth
# Set up the included_files key indicating which .gyp files contributed to
# this target dict.
if 'included_files' in build_file_data:
raise GypError(build_file_path + ' must not contain included_files key')
included = GetIncludedBuildFiles(build_file_path, aux_data)
build_file_data['included_files'] = []
for included_file in included:
# included_file is relative to the current directory, but it needs to
# be made relative to build_file_path's directory.
included_relative = \
gyp.common.RelativePath(included_file,
os.path.dirname(build_file_path))
build_file_data['included_files'].append(included_relative)
# Do a first round of toolsets expansion so that conditions can be defined
# per toolset.
ProcessToolsetsInDict(build_file_data)
# Apply "pre"/"early" variable expansions and condition evaluations.
ProcessVariablesAndConditionsInDict(
build_file_data, PHASE_EARLY, variables, build_file_path)
# Since some toolsets might have been defined conditionally, perform
# a second round of toolsets expansion now.
ProcessToolsetsInDict(build_file_data)
# Look at each project's target_defaults dict, and merge settings into
# targets.
if 'target_defaults' in build_file_data:
if 'targets' not in build_file_data:
raise GypError("Unable to find targets in build file %s" %
build_file_path)
index = 0
while index < len(build_file_data['targets']):
# This procedure needs to give the impression that target_defaults is
# used as defaults, and the individual targets inherit from that.
# The individual targets need to be merged into the defaults. Make
# a deep copy of the defaults for each target, merge the target dict
# as found in the input file into that copy, and then hook up the
# copy with the target-specific data merged into it as the replacement
# target dict.
old_target_dict = build_file_data['targets'][index]
new_target_dict = gyp.simple_copy.deepcopy(
build_file_data['target_defaults'])
MergeDicts(new_target_dict, old_target_dict,
build_file_path, build_file_path)
build_file_data['targets'][index] = new_target_dict
index += 1
# No longer needed.
del build_file_data['target_defaults']
# Look for dependencies. This means that dependency resolution occurs
# after "pre" conditionals and variable expansion, but before "post" -
# in other words, you can't put a "dependencies" section inside a "post"
# conditional within a target.
dependencies = []
if 'targets' in build_file_data:
for target_dict in build_file_data['targets']:
if 'dependencies' not in target_dict:
continue
for dependency in target_dict['dependencies']:
dependencies.append(
gyp.common.ResolveTarget(build_file_path, dependency, None)[0])
if load_dependencies:
for dependency in dependencies:
try:
LoadTargetBuildFile(dependency, data, aux_data, variables,
includes, depth, check, load_dependencies)
except Exception, e:
gyp.common.ExceptionAppend(
e, 'while loading dependencies of %s' % build_file_path)
raise
else:
return (build_file_path, dependencies)
def CallLoadTargetBuildFile(global_flags,
build_file_path, variables,
includes, depth, check,
generator_input_info):
"""Wrapper around LoadTargetBuildFile for parallel processing.
This wrapper is used when LoadTargetBuildFile is executed in
a worker process.
"""
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Apply globals so that the worker process behaves the same.
for key, value in global_flags.iteritems():
globals()[key] = value
SetGeneratorGlobals(generator_input_info)
result = LoadTargetBuildFile(build_file_path, per_process_data,
per_process_aux_data, variables,
includes, depth, check, False)
if not result:
return result
(build_file_path, dependencies) = result
# We can safely pop the build_file_data from per_process_data because it
# will never be referenced by this process again, so we don't need to keep
# it in the cache.
build_file_data = per_process_data.pop(build_file_path)
# This gets serialized and sent back to the main process via a pipe.
# It's handled in LoadTargetBuildFileCallback.
return (build_file_path,
build_file_data,
dependencies)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return None
except Exception, e:
print >>sys.stderr, 'Exception:', e
print >>sys.stderr, traceback.format_exc()
return None
class ParallelProcessingError(Exception):
pass
class ParallelState(object):
"""Class to keep track of state when processing input files in parallel.
If build files are loaded in parallel, use this to keep track of
state during farming out and processing parallel jobs. It's stored
in a global so that the callback function can have access to it.
"""
def __init__(self):
# The multiprocessing pool.
self.pool = None
# The condition variable used to protect this object and notify
# the main loop when there might be more data to process.
self.condition = None
# The "data" dict that was passed to LoadTargetBuildFileParallel
self.data = None
# The number of parallel calls outstanding; decremented when a response
# was received.
self.pending = 0
# The set of all build files that have been scheduled, so we don't
# schedule the same one twice.
self.scheduled = set()
# A list of dependency build file paths that haven't been scheduled yet.
self.dependencies = []
# Flag to indicate if there was an error in a child process.
self.error = False
def LoadTargetBuildFileCallback(self, result):
"""Handle the results of running LoadTargetBuildFile in another process.
"""
self.condition.acquire()
if not result:
self.error = True
self.condition.notify()
self.condition.release()
return
(build_file_path0, build_file_data0, dependencies0) = result
self.data[build_file_path0] = build_file_data0
self.data['target_build_files'].add(build_file_path0)
for new_dependency in dependencies0:
if new_dependency not in self.scheduled:
self.scheduled.add(new_dependency)
self.dependencies.append(new_dependency)
self.pending -= 1
self.condition.notify()
self.condition.release()
def LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info):
parallel_state = ParallelState()
parallel_state.condition = threading.Condition()
# Make copies of the build_files argument that we can modify while working.
parallel_state.dependencies = list(build_files)
parallel_state.scheduled = set(build_files)
parallel_state.pending = 0
parallel_state.data = data
try:
parallel_state.condition.acquire()
while parallel_state.dependencies or parallel_state.pending:
if parallel_state.error:
break
if not parallel_state.dependencies:
parallel_state.condition.wait()
continue
dependency = parallel_state.dependencies.pop()
parallel_state.pending += 1
global_flags = {
'path_sections': globals()['path_sections'],
'non_configuration_keys': globals()['non_configuration_keys'],
'multiple_toolsets': globals()['multiple_toolsets']}
if not parallel_state.pool:
parallel_state.pool = multiprocessing.Pool(multiprocessing.cpu_count())
parallel_state.pool.apply_async(
CallLoadTargetBuildFile,
args = (global_flags, dependency,
variables, includes, depth, check, generator_input_info),
callback = parallel_state.LoadTargetBuildFileCallback)
except KeyboardInterrupt, e:
parallel_state.pool.terminate()
raise e
parallel_state.condition.release()
parallel_state.pool.close()
parallel_state.pool.join()
parallel_state.pool = None
if parallel_state.error:
sys.exit(1)
# Look for the bracket that matches the first bracket seen in a
# string, and return the start and end as a tuple. For example, if
# the input is something like "<(foo <(bar)) blah", then it would
# return (1, 13), indicating the entire string except for the leading
# "<" and trailing " blah".
LBRACKETS= set('{[(')
BRACKETS = {'}': '{', ']': '[', ')': '('}
def FindEnclosingBracketGroup(input_str):
stack = []
start = -1
for index, char in enumerate(input_str):
if char in LBRACKETS:
stack.append(char)
if start == -1:
start = index
elif char in BRACKETS:
if not stack:
return (-1, -1)
if stack.pop() != BRACKETS[char]:
return (-1, -1)
if not stack:
return (start, index + 1)
return (-1, -1)
def IsStrCanonicalInt(string):
"""Returns True if |string| is in its canonical integer form.
The canonical form is such that str(int(string)) == string.
"""
if type(string) is str:
# This function is called a lot so for maximum performance, avoid
# involving regexps which would otherwise make the code much
# shorter. Regexps would need twice the time of this function.
if string:
if string == "0":
return True
if string[0] == "-":
string = string[1:]
if not string:
return False
if '1' <= string[0] <= '9':
return string.isdigit()
return False
# This matches things like "<(asdf)", "<!(cmd)", "<!@(cmd)", "<|(list)",
# "<!interpreter(arguments)", "<([list])", and even "<([)" and "<(<())".
# In the last case, the inner "<()" is captured in match['content'].
early_variable_re = re.compile(
r'(?P<replace>(?P<type><(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '>' instead of '<'.
late_variable_re = re.compile(
r'(?P<replace>(?P<type>>(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '^' instead of '<'.
latelate_variable_re = re.compile(
r'(?P<replace>(?P<type>[\^](?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# Global cache of results from running commands so they don't have to be run
# more then once.
cached_command_results = {}
def FixupPlatformCommand(cmd):
if sys.platform == 'win32':
if type(cmd) is list:
cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:]
else:
cmd = re.sub('^cat ', 'type ', cmd)
return cmd
PHASE_EARLY = 0
PHASE_LATE = 1
PHASE_LATELATE = 2
def ExpandVariables(input, phase, variables, build_file):
# Look for the pattern that gets expanded into variables
if phase == PHASE_EARLY:
variable_re = early_variable_re
expansion_symbol = '<'
elif phase == PHASE_LATE:
variable_re = late_variable_re
expansion_symbol = '>'
elif phase == PHASE_LATELATE:
variable_re = latelate_variable_re
expansion_symbol = '^'
else:
assert False
input_str = str(input)
if IsStrCanonicalInt(input_str):
return int(input_str)
# Do a quick scan to determine if an expensive regex search is warranted.
if expansion_symbol not in input_str:
return input_str
# Get the entire list of matches as a list of MatchObject instances.
# (using findall here would return strings instead of MatchObjects).
matches = list(variable_re.finditer(input_str))
if not matches:
return input_str
output = input_str
# Reverse the list of matches so that replacements are done right-to-left.
# That ensures that earlier replacements won't mess up the string in a
# way that causes later calls to find the earlier substituted text instead
# of what's intended for replacement.
matches.reverse()
for match_group in matches:
match = match_group.groupdict()
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Matches: %r", match)
# match['replace'] is the substring to look for, match['type']
# is the character code for the replacement type (< > <! >! <| >| <@
# >@ <!@ >!@), match['is_array'] contains a '[' for command
# arrays, and match['content'] is the name of the variable (< >)
# or command to run (<! >!). match['command_string'] is an optional
# command string. Currently, only 'pymod_do_main' is supported.
# run_command is true if a ! variant is used.
run_command = '!' in match['type']
command_string = match['command_string']
# file_list is true if a | variant is used.
file_list = '|' in match['type']
# Capture these now so we can adjust them later.
replace_start = match_group.start('replace')
replace_end = match_group.end('replace')
# Find the ending paren, and re-evaluate the contained string.
(c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:])
# Adjust the replacement range to match the entire command
# found by FindEnclosingBracketGroup (since the variable_re
# probably doesn't match the entire command if it contained
# nested variables).
replace_end = replace_start + c_end
# Find the "real" replacement, matching the appropriate closing
# paren, and adjust the replacement start and end.
replacement = input_str[replace_start:replace_end]
# Figure out what the contents of the variable parens are.
contents_start = replace_start + c_start + 1
contents_end = replace_end - 1
contents = input_str[contents_start:contents_end]
# Do filter substitution now for <|().
# Admittedly, this is different than the evaluation order in other
# contexts. However, since filtration has no chance to run on <|(),
# this seems like the only obvious way to give them access to filters.
if file_list:
processed_variables = gyp.simple_copy.deepcopy(variables)
ProcessListFiltersInDict(contents, processed_variables)
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase,
processed_variables, build_file)
else:
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase, variables, build_file)
# Strip off leading/trailing whitespace so that variable matches are
# simpler below (and because they are rarely needed).
contents = contents.strip()
# expand_to_list is true if an @ variant is used. In that case,
# the expansion should result in a list. Note that the caller
# is to be expecting a list in return, and not all callers do
# because not all are working in list context. Also, for list
# expansions, there can be no other text besides the variable
# expansion in the input string.
expand_to_list = '@' in match['type'] and input_str == replacement
if run_command or file_list:
# Find the build file's directory, so commands can be run or file lists
# generated relative to it.
build_file_dir = os.path.dirname(build_file)
if build_file_dir == '' and not file_list:
# If build_file is just a leaf filename indicating a file in the
# current directory, build_file_dir might be an empty string. Set
# it to None to signal to subprocess.Popen that it should run the
# command in the current directory.
build_file_dir = None
# Support <|(listfile.txt ...) which generates a file
# containing items from a gyp list, generated at gyp time.
# This works around actions/rules which have more inputs than will
# fit on the command line.
if file_list:
if type(contents) is list:
contents_list = contents
else:
contents_list = contents.split(' ')
replacement = contents_list[0]
if os.path.isabs(replacement):
raise GypError('| cannot handle absolute paths, got "%s"' % replacement)
if not generator_filelist_paths:
path = os.path.join(build_file_dir, replacement)
else:
if os.path.isabs(build_file_dir):
toplevel = generator_filelist_paths['toplevel']
rel_build_file_dir = gyp.common.RelativePath(build_file_dir, toplevel)
else:
rel_build_file_dir = build_file_dir
qualified_out_dir = generator_filelist_paths['qualified_out_dir']
path = os.path.join(qualified_out_dir, rel_build_file_dir, replacement)
gyp.common.EnsureDirExists(path)
replacement = gyp.common.RelativePath(path, build_file_dir)
f = gyp.common.WriteOnDiff(path)
for i in contents_list[1:]:
f.write('%s\n' % i)
f.close()
elif run_command:
use_shell = True
if match['is_array']:
contents = eval(contents)
use_shell = False
# Check for a cached value to avoid executing commands, or generating
# file lists more than once. The cache key contains the command to be
# run as well as the directory to run it from, to account for commands
# that depend on their current directory.
# TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory,
# someone could author a set of GYP files where each time the command
# is invoked it produces different output by design. When the need
# arises, the syntax should be extended to support no caching off a
# command's output so it is run every time.
cache_key = (str(contents), build_file_dir)
cached_value = cached_command_results.get(cache_key, None)
if cached_value is None:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Executing command '%s' in directory '%s'",
contents, build_file_dir)
replacement = ''
if command_string == 'pymod_do_main':
# <!pymod_do_main(modulename param eters) loads |modulename| as a
# python module and then calls that module's DoMain() function,
# passing ["param", "eters"] as a single list argument. For modules
# that don't load quickly, this can be faster than
# <!(python modulename param eters). Do this in |build_file_dir|.
oldwd = os.getcwd() # Python doesn't like os.open('.'): no fchdir.
if build_file_dir: # build_file_dir may be None (see above).
os.chdir(build_file_dir)
try:
parsed_contents = shlex.split(contents)
try:
py_module = __import__(parsed_contents[0])
except ImportError as e:
raise GypError("Error importing pymod_do_main"
"module (%s): %s" % (parsed_contents[0], e))
replacement = str(py_module.DoMain(parsed_contents[1:])).rstrip()
finally:
os.chdir(oldwd)
assert replacement != None
elif command_string:
raise GypError("Unknown command string '%s' in '%s'." %
(command_string, contents))
else:
# Fix up command with platform specific workarounds.
contents = FixupPlatformCommand(contents)
p = subprocess.Popen(contents, shell=use_shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=build_file_dir)
p_stdout, p_stderr = p.communicate('')
if p.wait() != 0 or p_stderr:
sys.stderr.write(p_stderr)
# Simulate check_call behavior, since check_call only exists
# in python 2.5 and later.
raise GypError("Call to '%s' returned exit status %d." %
(contents, p.returncode))
replacement = p_stdout.rstrip()
cached_command_results[cache_key] = replacement
else:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Had cache value for command '%s' in directory '%s'",
contents,build_file_dir)
replacement = cached_value
else:
if not contents in variables:
if contents[-1] in ['!', '/']:
# In order to allow cross-compiles (nacl) to happen more naturally,
# we will allow references to >(sources/) etc. to resolve to
# and empty list if undefined. This allows actions to:
# 'action!': [
# '>@(_sources!)',
# ],
# 'action/': [
# '>@(_sources/)',
# ],
replacement = []
else:
raise GypError('Undefined variable ' + contents +
' in ' + build_file)
else:
replacement = variables[contents]
if type(replacement) is list:
for item in replacement:
if not contents[-1] == '/' and type(item) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'list contains a ' +
item.__class__.__name__)
# Run through the list and handle variable expansions in it. Since
# the list is guaranteed not to contain dicts, this won't do anything
# with conditions sections.
ProcessVariablesAndConditionsInList(replacement, phase, variables,
build_file)
elif type(replacement) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'found a ' + replacement.__class__.__name__)
if expand_to_list:
# Expanding in list context. It's guaranteed that there's only one
# replacement to do in |input_str| and that it's this replacement. See
# above.
if type(replacement) is list:
# If it's already a list, make a copy.
output = replacement[:]
else:
# Split it the same way sh would split arguments.
output = shlex.split(str(replacement))
else:
# Expanding in string context.
encoded_replacement = ''
if type(replacement) is list:
# When expanding a list into string context, turn the list items
# into a string in a way that will work with a subprocess call.
#
# TODO(mark): This isn't completely correct. This should
# call a generator-provided function that observes the
# proper list-to-argument quoting rules on a specific
# platform instead of just calling the POSIX encoding
# routine.
encoded_replacement = gyp.common.EncodePOSIXShellList(replacement)
else:
encoded_replacement = replacement
output = output[:replace_start] + str(encoded_replacement) + \
output[replace_end:]
# Prepare for the next match iteration.
input_str = output
if output == input:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Found only identity matches on %r, avoiding infinite "
"recursion.",
output)
else:
# Look for more matches now that we've replaced some, to deal with
# expanding local variables (variables defined in the same
# variables block as this one).
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found output %r, recursing.", output)
if type(output) is list:
if output and type(output[0]) is list:
# Leave output alone if it's a list of lists.
# We don't want such lists to be stringified.
pass
else:
new_output = []
for item in output:
new_output.append(
ExpandVariables(item, phase, variables, build_file))
output = new_output
else:
output = ExpandVariables(output, phase, variables, build_file)
# Convert all strings that are canonically-represented integers into integers.
if type(output) is list:
for index in xrange(0, len(output)):
if IsStrCanonicalInt(output[index]):
output[index] = int(output[index])
elif IsStrCanonicalInt(output):
output = int(output)
return output
# The same condition is often evaluated over and over again so it
# makes sense to cache as much as possible between evaluations.
cached_conditions_asts = {}
def EvalCondition(condition, conditions_key, phase, variables, build_file):
"""Returns the dict that should be used or None if the result was
that nothing should be used."""
if type(condition) is not list:
raise GypError(conditions_key + ' must be a list')
if len(condition) < 2:
# It's possible that condition[0] won't work in which case this
# attempt will raise its own IndexError. That's probably fine.
raise GypError(conditions_key + ' ' + condition[0] +
' must be at least length 2, not ' + str(len(condition)))
i = 0
result = None
while i < len(condition):
cond_expr = condition[i]
true_dict = condition[i + 1]
if type(true_dict) is not dict:
raise GypError('{} {} must be followed by a dictionary, not {}'.format(
conditions_key, cond_expr, type(true_dict)))
if len(condition) > i + 2 and type(condition[i + 2]) is dict:
false_dict = condition[i + 2]
i = i + 3
if i != len(condition):
raise GypError('{} {} has {} unexpected trailing items'.format(
conditions_key, cond_expr, len(condition) - i))
else:
false_dict = None
i = i + 2
if result == None:
result = EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file)
return result
def EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file):
"""Returns true_dict if cond_expr evaluates to true, and false_dict
otherwise."""
# Do expansions on the condition itself. Since the conditon can naturally
# contain variable references without needing to resort to GYP expansion
# syntax, this is of dubious value for variables, but someone might want to
# use a command expansion directly inside a condition.
cond_expr_expanded = ExpandVariables(cond_expr, phase, variables,
build_file)
if type(cond_expr_expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + cond_expr_expanded.__class__.__name__)
try:
if cond_expr_expanded in cached_conditions_asts:
ast_code = cached_conditions_asts[cond_expr_expanded]
else:
ast_code = compile(cond_expr_expanded, '<string>', 'eval')
cached_conditions_asts[cond_expr_expanded] = ast_code
if eval(ast_code, {'__builtins__': None}, variables):
return true_dict
return false_dict
except SyntaxError, e:
syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
'at character %d.' %
(str(e.args[0]), e.text, build_file, e.offset),
e.filename, e.lineno, e.offset, e.text)
raise syntax_error
except NameError, e:
gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
(cond_expr_expanded, build_file))
raise GypError(e)
def ProcessConditionsInDict(the_dict, phase, variables, build_file):
# Process a 'conditions' or 'target_conditions' section in the_dict,
# depending on phase.
# early -> conditions
# late -> target_conditions
# latelate -> no conditions
#
# Each item in a conditions list consists of cond_expr, a string expression
# evaluated as the condition, and true_dict, a dict that will be merged into
# the_dict if cond_expr evaluates to true. Optionally, a third item,
# false_dict, may be present. false_dict is merged into the_dict if
# cond_expr evaluates to false.
#
# Any dict merged into the_dict will be recursively processed for nested
# conditionals and other expansions, also according to phase, immediately
# prior to being merged.
if phase == PHASE_EARLY:
conditions_key = 'conditions'
elif phase == PHASE_LATE:
conditions_key = 'target_conditions'
elif phase == PHASE_LATELATE:
return
else:
assert False
if not conditions_key in the_dict:
return
conditions_list = the_dict[conditions_key]
# Unhook the conditions list, it's no longer needed.
del the_dict[conditions_key]
for condition in conditions_list:
merge_dict = EvalCondition(condition, conditions_key, phase, variables,
build_file)
if merge_dict != None:
# Expand variables and nested conditinals in the merge_dict before
# merging it.
ProcessVariablesAndConditionsInDict(merge_dict, phase,
variables, build_file)
MergeDicts(the_dict, merge_dict, build_file, build_file)
def LoadAutomaticVariablesFromDict(variables, the_dict):
# Any keys with plain string values in the_dict become automatic variables.
# The variable name is the key name with a "_" character prepended.
for key, value in the_dict.iteritems():
if type(value) in (str, int, list):
variables['_' + key] = value
def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key):
# Any keys in the_dict's "variables" dict, if it has one, becomes a
# variable. The variable name is the key name in the "variables" dict.
# Variables that end with the % character are set only if they are unset in
# the variables dict. the_dict_key is the name of the key that accesses
# the_dict in the_dict's parent dict. If the_dict's parent is not a dict
# (it could be a list or it could be parentless because it is a root dict),
# the_dict_key will be None.
for key, value in the_dict.get('variables', {}).iteritems():
if type(value) not in (str, int, list):
continue
if key.endswith('%'):
variable_name = key[:-1]
if variable_name in variables:
# If the variable is already set, don't set it.
continue
if the_dict_key is 'variables' and variable_name in the_dict:
# If the variable is set without a % in the_dict, and the_dict is a
# variables dict (making |variables| a varaibles sub-dict of a
# variables dict), use the_dict's definition.
value = the_dict[variable_name]
else:
variable_name = key
variables[variable_name] = value
def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in,
build_file, the_dict_key=None):
"""Handle all variable and command expansion and conditional evaluation.
This function is the public entry point for all variable expansions and
conditional evaluations. The variables_in dictionary will not be modified
by this function.
"""
# Make a copy of the variables_in dict that can be modified during the
# loading of automatics and the loading of the variables dict.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
if 'variables' in the_dict:
# Make sure all the local variables are added to the variables
# list before we process them so that you can reference one
# variable from another. They will be fully expanded by recursion
# in ExpandVariables.
for key, value in the_dict['variables'].iteritems():
variables[key] = value
# Handle the associated variables dict first, so that any variable
# references within can be resolved prior to using them as variables.
# Pass a copy of the variables dict to avoid having it be tainted.
# Otherwise, it would have extra automatics added for everything that
# should just be an ordinary variable in this scope.
ProcessVariablesAndConditionsInDict(the_dict['variables'], phase,
variables, build_file, 'variables')
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
for key, value in the_dict.iteritems():
# Skip "variables", which was already processed if present.
if key != 'variables' and type(value) is str:
expanded = ExpandVariables(value, phase, variables, build_file)
if type(expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__ + ' for ' + key)
the_dict[key] = expanded
# Variable expansion may have resulted in changes to automatics. Reload.
# TODO(mark): Optimization: only reload if no changes were made.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Process conditions in this dict. This is done after variable expansion
# so that conditions may take advantage of expanded variables. For example,
# if the_dict contains:
# {'type': '<(library_type)',
# 'conditions': [['_type=="static_library"', { ... }]]},
# _type, as used in the condition, will only be set to the value of
# library_type if variable expansion is performed before condition
# processing. However, condition processing should occur prior to recursion
# so that variables (both automatic and "variables" dict type) may be
# adjusted by conditions sections, merged into the_dict, and have the
# intended impact on contained dicts.
#
# This arrangement means that a "conditions" section containing a "variables"
# section will only have those variables effective in subdicts, not in
# the_dict. The workaround is to put a "conditions" section within a
# "variables" section. For example:
# {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]],
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will not result in "IS_MAC" being appended to the "defines" list in the
# current scope but would result in it being appended to the "defines" list
# within "my_subdict". By comparison:
# {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]},
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will append "IS_MAC" to both "defines" lists.
# Evaluate conditions sections, allowing variable expansions within them
# as well as nested conditionals. This will process a 'conditions' or
# 'target_conditions' section, perform appropriate merging and recursive
# conditional and variable processing, and then remove the conditions section
# from the_dict if it is present.
ProcessConditionsInDict(the_dict, phase, variables, build_file)
# Conditional processing may have resulted in changes to automatics or the
# variables dict. Reload.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Recurse into child dicts, or process child lists which may result in
# further recursion into descendant dicts.
for key, value in the_dict.iteritems():
# Skip "variables" and string values, which were already processed if
# present.
if key == 'variables' or type(value) is str:
continue
if type(value) is dict:
# Pass a copy of the variables dict so that subdicts can't influence
# parents.
ProcessVariablesAndConditionsInDict(value, phase, variables,
build_file, key)
elif type(value) is list:
# The list itself can't influence the variables dict, and
# ProcessVariablesAndConditionsInList will make copies of the variables
# dict if it needs to pass it to something that can influence it. No
# copy is necessary here.
ProcessVariablesAndConditionsInList(value, phase, variables,
build_file)
elif type(value) is not int:
raise TypeError('Unknown type ' + value.__class__.__name__ + \
' for ' + key)
def ProcessVariablesAndConditionsInList(the_list, phase, variables,
build_file):
# Iterate using an index so that new values can be assigned into the_list.
index = 0
while index < len(the_list):
item = the_list[index]
if type(item) is dict:
# Make a copy of the variables dict so that it won't influence anything
# outside of its own scope.
ProcessVariablesAndConditionsInDict(item, phase, variables, build_file)
elif type(item) is list:
ProcessVariablesAndConditionsInList(item, phase, variables, build_file)
elif type(item) is str:
expanded = ExpandVariables(item, phase, variables, build_file)
if type(expanded) in (str, int):
the_list[index] = expanded
elif type(expanded) is list:
the_list[index:index+1] = expanded
index += len(expanded)
# index now identifies the next item to examine. Continue right now
# without falling into the index increment below.
continue
else:
raise ValueError(
'Variable expansion in this context permits strings and ' + \
'lists only, found ' + expanded.__class__.__name__ + ' at ' + \
index)
elif type(item) is not int:
raise TypeError('Unknown type ' + item.__class__.__name__ + \
' at index ' + index)
index = index + 1
def BuildTargetsDict(data):
"""Builds a dict mapping fully-qualified target names to their target dicts.
|data| is a dict mapping loaded build files by pathname relative to the
current directory. Values in |data| are build file contents. For each
|data| value with a "targets" key, the value of the "targets" key is taken
as a list containing target dicts. Each target's fully-qualified name is
constructed from the pathname of the build file (|data| key) and its
"target_name" property. These fully-qualified names are used as the keys
in the returned dict. These keys provide access to the target dicts,
the dicts in the "targets" lists.
"""
targets = {}
for build_file in data['target_build_files']:
for target in data[build_file].get('targets', []):
target_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if target_name in targets:
raise GypError('Duplicate target definitions for ' + target_name)
targets[target_name] = target
return targets
def QualifyDependencies(targets):
"""Make dependency links fully-qualified relative to the current directory.
|targets| is a dict mapping fully-qualified target names to their target
dicts. For each target in this dict, keys known to contain dependency
links are examined, and any dependencies referenced will be rewritten
so that they are fully-qualified and relative to the current directory.
All rewritten dependencies are suitable for use as keys to |targets| or a
similar dict.
"""
all_dependency_sections = [dep + op
for dep in dependency_sections
for op in ('', '!', '/')]
for target, target_dict in targets.iteritems():
target_build_file = gyp.common.BuildFile(target)
toolset = target_dict['toolset']
for dependency_key in all_dependency_sections:
dependencies = target_dict.get(dependency_key, [])
for index in xrange(0, len(dependencies)):
dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget(
target_build_file, dependencies[index], toolset)
if not multiple_toolsets:
# Ignore toolset specification in the dependency if it is specified.
dep_toolset = toolset
dependency = gyp.common.QualifiedTarget(dep_file,
dep_target,
dep_toolset)
dependencies[index] = dependency
# Make sure anything appearing in a list other than "dependencies" also
# appears in the "dependencies" list.
if dependency_key != 'dependencies' and \
dependency not in target_dict['dependencies']:
raise GypError('Found ' + dependency + ' in ' + dependency_key +
' of ' + target + ', but not in dependencies')
def ExpandWildcardDependencies(targets, data):
"""Expands dependencies specified as build_file:*.
For each target in |targets|, examines sections containing links to other
targets. If any such section contains a link of the form build_file:*, it
is taken as a wildcard link, and is expanded to list each target in
build_file. The |data| dict provides access to build file dicts.
Any target that does not wish to be included by wildcard can provide an
optional "suppress_wildcard" key in its target dict. When present and
true, a wildcard dependency link will not include such targets.
All dependency names, including the keys to |targets| and the values in each
dependency list, must be qualified when this function is called.
"""
for target, target_dict in targets.iteritems():
toolset = target_dict['toolset']
target_build_file = gyp.common.BuildFile(target)
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
# Loop this way instead of "for dependency in" or "for index in xrange"
# because the dependencies list will be modified within the loop body.
index = 0
while index < len(dependencies):
(dependency_build_file, dependency_target, dependency_toolset) = \
gyp.common.ParseQualifiedTarget(dependencies[index])
if dependency_target != '*' and dependency_toolset != '*':
# Not a wildcard. Keep it moving.
index = index + 1
continue
if dependency_build_file == target_build_file:
# It's an error for a target to depend on all other targets in
# the same file, because a target cannot depend on itself.
raise GypError('Found wildcard in ' + dependency_key + ' of ' +
target + ' referring to same build file')
# Take the wildcard out and adjust the index so that the next
# dependency in the list will be processed the next time through the
# loop.
del dependencies[index]
index = index - 1
# Loop through the targets in the other build file, adding them to
# this target's list of dependencies in place of the removed
# wildcard.
dependency_target_dicts = data[dependency_build_file]['targets']
for dependency_target_dict in dependency_target_dicts:
if int(dependency_target_dict.get('suppress_wildcard', False)):
continue
dependency_target_name = dependency_target_dict['target_name']
if (dependency_target != '*' and
dependency_target != dependency_target_name):
continue
dependency_target_toolset = dependency_target_dict['toolset']
if (dependency_toolset != '*' and
dependency_toolset != dependency_target_toolset):
continue
dependency = gyp.common.QualifiedTarget(dependency_build_file,
dependency_target_name,
dependency_target_toolset)
index = index + 1
dependencies.insert(index, dependency)
index = index + 1
def Unify(l):
"""Removes duplicate elements from l, keeping the first element."""
seen = {}
return [seen.setdefault(e, e) for e in l if e not in seen]
def RemoveDuplicateDependencies(targets):
"""Makes sure every dependency appears only once in all targets's dependency
lists."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
target_dict[dependency_key] = Unify(dependencies)
def Filter(l, item):
"""Removes item from l."""
res = {}
return [res.setdefault(e, e) for e in l if e != item]
def RemoveSelfDependencies(targets):
"""Remove self dependencies from targets that have the prune_self_dependency
variable set."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if t == target_name:
if targets[t].get('variables', {}).get('prune_self_dependency', 0):
target_dict[dependency_key] = Filter(dependencies, target_name)
def RemoveLinkDependenciesFromNoneTargets(targets):
"""Remove dependencies having the 'link_dependency' attribute from the 'none'
targets."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if target_dict.get('type', None) == 'none':
if targets[t].get('variables', {}).get('link_dependency', 0):
target_dict[dependency_key] = \
Filter(target_dict[dependency_key], t)
class DependencyGraphNode(object):
"""
Attributes:
ref: A reference to an object that this DependencyGraphNode represents.
dependencies: List of DependencyGraphNodes on which this one depends.
dependents: List of DependencyGraphNodes that depend on this one.
"""
class CircularException(GypError):
pass
def __init__(self, ref):
self.ref = ref
self.dependencies = []
self.dependents = []
def __repr__(self):
return '<DependencyGraphNode: %r>' % self.ref
def FlattenToList(self):
# flat_list is the sorted list of dependencies - actually, the list items
# are the "ref" attributes of DependencyGraphNodes. Every target will
# appear in flat_list after all of its dependencies, and before all of its
# dependents.
flat_list = OrderedSet()
# in_degree_zeros is the list of DependencyGraphNodes that have no
# dependencies not in flat_list. Initially, it is a copy of the children
# of this node, because when the graph was built, nodes with no
# dependencies were made implicit dependents of the root node.
in_degree_zeros = set(self.dependents[:])
while in_degree_zeros:
# Nodes in in_degree_zeros have no dependencies not in flat_list, so they
# can be appended to flat_list. Take these nodes out of in_degree_zeros
# as work progresses, so that the next node to process from the list can
# always be accessed at a consistent position.
node = in_degree_zeros.pop()
flat_list.add(node.ref)
# Look at dependents of the node just added to flat_list. Some of them
# may now belong in in_degree_zeros.
for node_dependent in node.dependents:
is_in_degree_zero = True
# TODO: We want to check through the
# node_dependent.dependencies list but if it's long and we
# always start at the beginning, then we get O(n^2) behaviour.
for node_dependent_dependency in node_dependent.dependencies:
if not node_dependent_dependency.ref in flat_list:
# The dependent one or more dependencies not in flat_list. There
# will be more chances to add it to flat_list when examining
# it again as a dependent of those other dependencies, provided
# that there are no cycles.
is_in_degree_zero = False
break
if is_in_degree_zero:
# All of the dependent's dependencies are already in flat_list. Add
# it to in_degree_zeros where it will be processed in a future
# iteration of the outer loop.
in_degree_zeros.add(node_dependent)
return list(flat_list)
def FindCycles(self):
"""
Returns a list of cycles in the graph, where each cycle is its own list.
"""
results = []
visited = set()
def Visit(node, path):
for child in node.dependents:
if child in path:
results.append([child] + path[:path.index(child) + 1])
elif not child in visited:
visited.add(child)
Visit(child, [child] + path)
visited.add(self)
Visit(self, [self])
return results
def DirectDependencies(self, dependencies=None):
"""Returns a list of just direct dependencies."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
return dependencies
def _AddImportedDependencies(self, targets, dependencies=None):
"""Given a list of direct dependencies, adds indirect dependencies that
other dependencies have declared to export their settings.
This method does not operate on self. Rather, it operates on the list
of dependencies in the |dependencies| argument. For each dependency in
that list, if any declares that it exports the settings of one of its
own dependencies, those dependencies whose settings are "passed through"
are added to the list. As new items are added to the list, they too will
be processed, so it is possible to import settings through multiple levels
of dependencies.
This method is not terribly useful on its own, it depends on being
"primed" with a list of direct dependencies such as one provided by
DirectDependencies. DirectAndImportedDependencies is intended to be the
public entry point.
"""
if dependencies == None:
dependencies = []
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Add any dependencies whose settings should be imported to the list
# if not already present. Newly-added items will be checked for
# their own imports when the list iteration reaches them.
# Rather than simply appending new items, insert them after the
# dependency that exported them. This is done to more closely match
# the depth-first method used by DeepDependencies.
add_index = 1
for imported_dependency in \
dependency_dict.get('export_dependent_settings', []):
if imported_dependency not in dependencies:
dependencies.insert(index + add_index, imported_dependency)
add_index = add_index + 1
index = index + 1
return dependencies
def DirectAndImportedDependencies(self, targets, dependencies=None):
"""Returns a list of a target's direct dependencies and all indirect
dependencies that a dependency has advertised settings should be exported
through the dependency for.
"""
dependencies = self.DirectDependencies(dependencies)
return self._AddImportedDependencies(targets, dependencies)
def DeepDependencies(self, dependencies=None):
"""Returns an OrderedSet of all of a target's dependencies, recursively."""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref is None:
continue
if dependency.ref not in dependencies:
dependencies.add(dependency.ref)
dependency.DeepDependencies(dependencies)
return dependencies
def _LinkDependenciesInternal(self, targets, include_shared_libraries,
dependencies=None, initial=True):
"""Returns an OrderedSet of dependency targets that are linked
into this target.
This function has a split personality, depending on the setting of
|initial|. Outside callers should always leave |initial| at its default
setting.
When adding a target to the list of dependencies, this function will
recurse into itself with |initial| set to False, to collect dependencies
that are linked into the linkable target for which the list is being built.
If |include_shared_libraries| is False, the resulting dependencies will not
include shared_library targets that are linked into this target.
"""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
# Check for None, corresponding to the root node.
if self.ref is None:
return dependencies
# It's kind of sucky that |targets| has to be passed into this function,
# but that's presently the easiest way to access the target dicts so that
# this function can find target types.
if 'target_name' not in targets[self.ref]:
raise GypError("Missing 'target_name' field in target.")
if 'type' not in targets[self.ref]:
raise GypError("Missing 'type' field in target %s" %
targets[self.ref]['target_name'])
target_type = targets[self.ref]['type']
is_linkable = target_type in linkable_types
if initial and not is_linkable:
# If this is the first target being examined and it's not linkable,
# return an empty list of link dependencies, because the link
# dependencies are intended to apply to the target itself (initial is
# True) and this target won't be linked.
return dependencies
# Don't traverse 'none' targets if explicitly excluded.
if (target_type == 'none' and
not targets[self.ref].get('dependencies_traverse', True)):
dependencies.add(self.ref)
return dependencies
# Executables and loadable modules are already fully and finally linked.
# Nothing else can be a link dependency of them, there can only be
# dependencies in the sense that a dependent target might run an
# executable or load the loadable_module.
if not initial and target_type in ('executable', 'loadable_module'):
return dependencies
# Shared libraries are already fully linked. They should only be included
# in |dependencies| when adjusting static library dependencies (in order to
# link against the shared_library's import lib), but should not be included
# in |dependencies| when propagating link_settings.
# The |include_shared_libraries| flag controls which of these two cases we
# are handling.
if (not initial and target_type == 'shared_library' and
not include_shared_libraries):
return dependencies
# The target is linkable, add it to the list of link dependencies.
if self.ref not in dependencies:
dependencies.add(self.ref)
if initial or not is_linkable:
# If this is a subsequent target and it's linkable, don't look any
# further for linkable dependencies, as they'll already be linked into
# this target linkable. Always look at dependencies of the initial
# target, and always look at dependencies of non-linkables.
for dependency in self.dependencies:
dependency._LinkDependenciesInternal(targets,
include_shared_libraries,
dependencies, False)
return dependencies
def DependenciesForLinkSettings(self, targets):
"""
Returns a list of dependency targets whose link_settings should be merged
into this target.
"""
# TODO(sbaig) Currently, chrome depends on the bug that shared libraries'
# link_settings are propagated. So for now, we will allow it, unless the
# 'allow_sharedlib_linksettings_propagation' flag is explicitly set to
# False. Once chrome is fixed, we can remove this flag.
include_shared_libraries = \
targets[self.ref].get('allow_sharedlib_linksettings_propagation', True)
return self._LinkDependenciesInternal(targets, include_shared_libraries)
def DependenciesToLinkAgainst(self, targets):
"""
Returns a list of dependency targets that are linked into this target.
"""
return self._LinkDependenciesInternal(targets, True)
def BuildDependencyList(targets):
# Create a DependencyGraphNode for each target. Put it into a dict for easy
# access.
dependency_nodes = {}
for target, spec in targets.iteritems():
if target not in dependency_nodes:
dependency_nodes[target] = DependencyGraphNode(target)
# Set up the dependency links. Targets that have no dependencies are treated
# as dependent on root_node.
root_node = DependencyGraphNode(None)
for target, spec in targets.iteritems():
target_node = dependency_nodes[target]
target_build_file = gyp.common.BuildFile(target)
dependencies = spec.get('dependencies')
if not dependencies:
target_node.dependencies = [root_node]
root_node.dependents.append(target_node)
else:
for dependency in dependencies:
dependency_node = dependency_nodes.get(dependency)
if not dependency_node:
raise GypError("Dependency '%s' not found while "
"trying to load target %s" % (dependency, target))
target_node.dependencies.append(dependency_node)
dependency_node.dependents.append(target_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(targets):
if not root_node.dependents:
# If all targets have dependencies, add the first target as a dependent
# of root_node so that the cycle can be discovered from root_node.
target = targets.keys()[0]
target_node = dependency_nodes[target]
target_node.dependencies.append(root_node)
root_node.dependents.append(target_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in dependency graph detected:\n' + '\n'.join(cycles))
return [dependency_nodes, flat_list]
def VerifyNoGYPFileCircularDependencies(targets):
# Create a DependencyGraphNode for each gyp file containing a target. Put
# it into a dict for easy access.
dependency_nodes = {}
for target in targets.iterkeys():
build_file = gyp.common.BuildFile(target)
if not build_file in dependency_nodes:
dependency_nodes[build_file] = DependencyGraphNode(build_file)
# Set up the dependency links.
for target, spec in targets.iteritems():
build_file = gyp.common.BuildFile(target)
build_file_node = dependency_nodes[build_file]
target_dependencies = spec.get('dependencies', [])
for dependency in target_dependencies:
try:
dependency_build_file = gyp.common.BuildFile(dependency)
except GypError, e:
gyp.common.ExceptionAppend(
e, 'while computing dependencies of .gyp file %s' % build_file)
raise
if dependency_build_file == build_file:
# A .gyp file is allowed to refer back to itself.
continue
dependency_node = dependency_nodes.get(dependency_build_file)
if not dependency_node:
raise GypError("Dependancy '%s' not found" % dependency_build_file)
if dependency_node not in build_file_node.dependencies:
build_file_node.dependencies.append(dependency_node)
dependency_node.dependents.append(build_file_node)
# Files that have no dependencies are treated as dependent on root_node.
root_node = DependencyGraphNode(None)
for build_file_node in dependency_nodes.itervalues():
if len(build_file_node.dependencies) == 0:
build_file_node.dependencies.append(root_node)
root_node.dependents.append(build_file_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(dependency_nodes):
if not root_node.dependents:
# If all files have dependencies, add the first file as a dependent
# of root_node so that the cycle can be discovered from root_node.
file_node = dependency_nodes.values()[0]
file_node.dependencies.append(root_node)
root_node.dependents.append(file_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in .gyp file dependency graph detected:\n' + '\n'.join(cycles))
def DoDependentSettings(key, flat_list, targets, dependency_nodes):
# key should be one of all_dependent_settings, direct_dependent_settings,
# or link_settings.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
if key == 'all_dependent_settings':
dependencies = dependency_nodes[target].DeepDependencies()
elif key == 'direct_dependent_settings':
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
elif key == 'link_settings':
dependencies = \
dependency_nodes[target].DependenciesForLinkSettings(targets)
else:
raise GypError("DoDependentSettings doesn't know how to determine "
'dependencies for ' + key)
for dependency in dependencies:
dependency_dict = targets[dependency]
if not key in dependency_dict:
continue
dependency_build_file = gyp.common.BuildFile(dependency)
MergeDicts(target_dict, dependency_dict[key],
build_file, dependency_build_file)
def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
sort_dependencies):
# Recompute target "dependencies" properties. For each static library
# target, remove "dependencies" entries referring to other static libraries,
# unless the dependency has the "hard_dependency" attribute set. For each
# linkable target, add a "dependencies" entry referring to all of the
# target's computed list of link dependencies (including static libraries
# if no such entry is already present.
for target in flat_list:
target_dict = targets[target]
target_type = target_dict['type']
if target_type == 'static_library':
if not 'dependencies' in target_dict:
continue
target_dict['dependencies_original'] = target_dict.get(
'dependencies', [])[:]
# A static library should not depend on another static library unless
# the dependency relationship is "hard," which should only be done when
# a dependent relies on some side effect other than just the build
# product, like a rule or action output. Further, if a target has a
# non-hard dependency, but that dependency exports a hard dependency,
# the non-hard dependency can safely be removed, but the exported hard
# dependency must be added to the target to keep the same dependency
# ordering.
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Remove every non-hard static library dependency and remove every
# non-static library dependency that isn't a direct dependency.
if (dependency_dict['type'] == 'static_library' and \
not dependency_dict.get('hard_dependency', False)) or \
(dependency_dict['type'] != 'static_library' and \
not dependency in target_dict['dependencies']):
# Take the dependency out of the list, and don't increment index
# because the next dependency to analyze will shift into the index
# formerly occupied by the one being removed.
del dependencies[index]
else:
index = index + 1
# Update the dependencies. If the dependencies list is empty, it's not
# needed, so unhook it.
if len(dependencies) > 0:
target_dict['dependencies'] = dependencies
else:
del target_dict['dependencies']
elif target_type in linkable_types:
# Get a list of dependency targets that should be linked into this
# target. Add them to the dependencies list if they're not already
# present.
link_dependencies = \
dependency_nodes[target].DependenciesToLinkAgainst(targets)
for dependency in link_dependencies:
if dependency == target:
continue
if not 'dependencies' in target_dict:
target_dict['dependencies'] = []
if not dependency in target_dict['dependencies']:
target_dict['dependencies'].append(dependency)
# Sort the dependencies list in the order from dependents to dependencies.
# e.g. If A and B depend on C and C depends on D, sort them in A, B, C, D.
# Note: flat_list is already sorted in the order from dependencies to
# dependents.
if sort_dependencies and 'dependencies' in target_dict:
target_dict['dependencies'] = [dep for dep in reversed(flat_list)
if dep in target_dict['dependencies']]
# Initialize this here to speed up MakePathRelative.
exception_re = re.compile(r'''["']?[-/$<>^]''')
def MakePathRelative(to_file, fro_file, item):
# If item is a relative path, it's relative to the build file dict that it's
# coming from. Fix it up to make it relative to the build file dict that
# it's going into.
# Exception: any |item| that begins with these special characters is
# returned without modification.
# / Used when a path is already absolute (shortcut optimization;
# such paths would be returned as absolute anyway)
# $ Used for build environment variables
# - Used for some build environment flags (such as -lapr-1 in a
# "libraries" section)
# < Used for our own variable and command expansions (see ExpandVariables)
# > Used for our own variable and command expansions (see ExpandVariables)
# ^ Used for our own variable and command expansions (see ExpandVariables)
#
# "/' Used when a value is quoted. If these are present, then we
# check the second character instead.
#
if to_file == fro_file or exception_re.match(item):
return item
else:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
ret = os.path.normpath(os.path.join(
gyp.common.RelativePath(os.path.dirname(fro_file),
os.path.dirname(to_file)),
item)).replace('\\', '/')
if item[-1] == '/':
ret += '/'
return ret
def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True):
# Python documentation recommends objects which do not support hash
# set this value to None. Python library objects follow this rule.
is_hashable = lambda val: val.__hash__
# If x is hashable, returns whether x is in s. Else returns whether x is in l.
def is_in_set_or_list(x, s, l):
if is_hashable(x):
return x in s
return x in l
prepend_index = 0
# Make membership testing of hashables in |to| (in particular, strings)
# faster.
hashable_to_set = set(x for x in to if is_hashable(x))
for item in fro:
singleton = False
if type(item) in (str, int):
# The cheap and easy case.
if is_paths:
to_item = MakePathRelative(to_file, fro_file, item)
else:
to_item = item
if not (type(item) is str and item.startswith('-')):
# Any string that doesn't begin with a "-" is a singleton - it can
# only appear once in a list, to be enforced by the list merge append
# or prepend.
singleton = True
elif type(item) is dict:
# Make a copy of the dictionary, continuing to look for paths to fix.
# The other intelligent aspects of merge processing won't apply because
# item is being merged into an empty dict.
to_item = {}
MergeDicts(to_item, item, to_file, fro_file)
elif type(item) is list:
# Recurse, making a copy of the list. If the list contains any
# descendant dicts, path fixing will occur. Note that here, custom
# values for is_paths and append are dropped; those are only to be
# applied to |to| and |fro|, not sublists of |fro|. append shouldn't
# matter anyway because the new |to_item| list is empty.
to_item = []
MergeLists(to_item, item, to_file, fro_file)
else:
raise TypeError(
'Attempt to merge list item of unsupported type ' + \
item.__class__.__name__)
if append:
# If appending a singleton that's already in the list, don't append.
# This ensures that the earliest occurrence of the item will stay put.
if not singleton or not is_in_set_or_list(to_item, hashable_to_set, to):
to.append(to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
else:
# If prepending a singleton that's already in the list, remove the
# existing instance and proceed with the prepend. This ensures that the
# item appears at the earliest possible position in the list.
while singleton and to_item in to:
to.remove(to_item)
# Don't just insert everything at index 0. That would prepend the new
# items to the list in reverse order, which would be an unwelcome
# surprise.
to.insert(prepend_index, to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
prepend_index = prepend_index + 1
def MergeDicts(to, fro, to_file, fro_file):
# I wanted to name the parameter "from" but it's a Python keyword...
for k, v in fro.iteritems():
# It would be nice to do "if not k in to: to[k] = v" but that wouldn't give
# copy semantics. Something else may want to merge from the |fro| dict
# later, and having the same dict ref pointed to twice in the tree isn't
# what anyone wants considering that the dicts may subsequently be
# modified.
if k in to:
bad_merge = False
if type(v) in (str, int):
if type(to[k]) not in (str, int):
bad_merge = True
elif type(v) is not type(to[k]):
bad_merge = True
if bad_merge:
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[k].__class__.__name__ + \
' for key ' + k)
if type(v) in (str, int):
# Overwrite the existing value, if any. Cheap and easy.
is_path = IsPathSection(k)
if is_path:
to[k] = MakePathRelative(to_file, fro_file, v)
else:
to[k] = v
elif type(v) is dict:
# Recurse, guaranteeing copies will be made of objects that require it.
if not k in to:
to[k] = {}
MergeDicts(to[k], v, to_file, fro_file)
elif type(v) is list:
# Lists in dicts can be merged with different policies, depending on
# how the key in the "from" dict (k, the from-key) is written.
#
# If the from-key has ...the to-list will have this action
# this character appended:... applied when receiving the from-list:
# = replace
# + prepend
# ? set, only if to-list does not yet exist
# (none) append
#
# This logic is list-specific, but since it relies on the associated
# dict key, it's checked in this dict-oriented function.
ext = k[-1]
append = True
if ext == '=':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '?']
to[list_base] = []
elif ext == '+':
list_base = k[:-1]
lists_incompatible = [list_base + '=', list_base + '?']
append = False
elif ext == '?':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '=', list_base + '+']
else:
list_base = k
lists_incompatible = [list_base + '=', list_base + '?']
# Some combinations of merge policies appearing together are meaningless.
# It's stupid to replace and append simultaneously, for example. Append
# and prepend are the only policies that can coexist.
for list_incompatible in lists_incompatible:
if list_incompatible in fro:
raise GypError('Incompatible list policies ' + k + ' and ' +
list_incompatible)
if list_base in to:
if ext == '?':
# If the key ends in "?", the list will only be merged if it doesn't
# already exist.
continue
elif type(to[list_base]) is not list:
# This may not have been checked above if merging in a list with an
# extension character.
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[list_base].__class__.__name__ + \
' for key ' + list_base + '(' + k + ')')
else:
to[list_base] = []
# Call MergeLists, which will make copies of objects that require it.
# MergeLists can recurse back into MergeDicts, although this will be
# to make copies of dicts (with paths fixed), there will be no
# subsequent dict "merging" once entering a list because lists are
# always replaced, appended to, or prepended to.
is_paths = IsPathSection(list_base)
MergeLists(to[list_base], v, to_file, fro_file, is_paths, append)
else:
raise TypeError(
'Attempt to merge dict value of unsupported type ' + \
v.__class__.__name__ + ' for key ' + k)
def MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, visited):
# Skip if previously visted.
if configuration in visited:
return
# Look at this configuration.
configuration_dict = target_dict['configurations'][configuration]
# Merge in parents.
for parent in configuration_dict.get('inherit_from', []):
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, parent, visited + [configuration])
# Merge it into the new config.
MergeDicts(new_configuration_dict, configuration_dict,
build_file, build_file)
# Drop abstract.
if 'abstract' in new_configuration_dict:
del new_configuration_dict['abstract']
def SetUpConfigurations(target, target_dict):
# key_suffixes is a list of key suffixes that might appear on key names.
# These suffixes are handled in conditional evaluations (for =, +, and ?)
# and rules/exclude processing (for ! and /). Keys with these suffixes
# should be treated the same as keys without.
key_suffixes = ['=', '+', '?', '!', '/']
build_file = gyp.common.BuildFile(target)
# Provide a single configuration by default if none exists.
# TODO(mark): Signal an error if default_configurations exists but
# configurations does not.
if not 'configurations' in target_dict:
target_dict['configurations'] = {'Default': {}}
if not 'default_configuration' in target_dict:
concrete = [i for (i, config) in target_dict['configurations'].iteritems()
if not config.get('abstract')]
target_dict['default_configuration'] = sorted(concrete)[0]
merged_configurations = {}
configs = target_dict['configurations']
for (configuration, old_configuration_dict) in configs.iteritems():
# Skip abstract configurations (saves work only).
if old_configuration_dict.get('abstract'):
continue
# Configurations inherit (most) settings from the enclosing target scope.
# Get the inheritance relationship right by making a copy of the target
# dict.
new_configuration_dict = {}
for (key, target_val) in target_dict.iteritems():
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
new_configuration_dict[key] = gyp.simple_copy.deepcopy(target_val)
# Merge in configuration (with all its parents first).
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, [])
merged_configurations[configuration] = new_configuration_dict
# Put the new configurations back into the target dict as a configuration.
for configuration in merged_configurations.keys():
target_dict['configurations'][configuration] = (
merged_configurations[configuration])
# Now drop all the abstract ones.
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
if old_configuration_dict.get('abstract'):
del target_dict['configurations'][configuration]
# Now that all of the target's configurations have been built, go through
# the target dict's keys and remove everything that's been moved into a
# "configurations" section.
delete_keys = []
for key in target_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del target_dict[key]
# Check the configurations to see if they contain invalid keys.
for configuration in target_dict['configurations'].keys():
configuration_dict = target_dict['configurations'][configuration]
for key in configuration_dict.keys():
if key in invalid_configuration_keys:
raise GypError('%s not allowed in the %s configuration, found in '
'target %s' % (key, configuration, target))
def ProcessListFiltersInDict(name, the_dict):
"""Process regular expression and exclusion-based filters on lists.
An exclusion list is in a dict key named with a trailing "!", like
"sources!". Every item in such a list is removed from the associated
main list, which in this example, would be "sources". Removed items are
placed into a "sources_excluded" list in the dict.
Regular expression (regex) filters are contained in dict keys named with a
trailing "/", such as "sources/" to operate on the "sources" list. Regex
filters in a dict take the form:
'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'],
['include', '_mac\\.cc$'] ],
The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
_win.cc. The second filter then includes all files ending in _mac.cc that
are now or were once in the "sources" list. Items matching an "exclude"
filter are subject to the same processing as would occur if they were listed
by name in an exclusion list (ending in "!"). Items matching an "include"
filter are brought back into the main list if previously excluded by an
exclusion list or exclusion regex filter. Subsequent matching "exclude"
patterns can still cause items to be excluded after matching an "include".
"""
# Look through the dictionary for any lists whose keys end in "!" or "/".
# These are lists that will be treated as exclude lists and regular
# expression-based exclude/include lists. Collect the lists that are
# needed first, looking for the lists that they operate on, and assemble
# then into |lists|. This is done in a separate loop up front, because
# the _included and _excluded keys need to be added to the_dict, and that
# can't be done while iterating through it.
lists = []
del_lists = []
for key, value in the_dict.iteritems():
operation = key[-1]
if operation != '!' and operation != '/':
continue
if type(value) is not list:
raise ValueError(name + ' key ' + key + ' must be list, not ' + \
value.__class__.__name__)
list_key = key[:-1]
if list_key not in the_dict:
# This happens when there's a list like "sources!" but no corresponding
# "sources" list. Since there's nothing for it to operate on, queue up
# the "sources!" list for deletion now.
del_lists.append(key)
continue
if type(the_dict[list_key]) is not list:
value = the_dict[list_key]
raise ValueError(name + ' key ' + list_key + \
' must be list, not ' + \
value.__class__.__name__ + ' when applying ' + \
{'!': 'exclusion', '/': 'regex'}[operation])
if not list_key in lists:
lists.append(list_key)
# Delete the lists that are known to be unneeded at this point.
for del_list in del_lists:
del the_dict[del_list]
for list_key in lists:
the_list = the_dict[list_key]
# Initialize the list_actions list, which is parallel to the_list. Each
# item in list_actions identifies whether the corresponding item in
# the_list should be excluded, unconditionally preserved (included), or
# whether no exclusion or inclusion has been applied. Items for which
# no exclusion or inclusion has been applied (yet) have value -1, items
# excluded have value 0, and items included have value 1. Includes and
# excludes override previous actions. All items in list_actions are
# initialized to -1 because no excludes or includes have been processed
# yet.
list_actions = list((-1,) * len(the_list))
exclude_key = list_key + '!'
if exclude_key in the_dict:
for exclude_item in the_dict[exclude_key]:
for index in xrange(0, len(the_list)):
if exclude_item == the_list[index]:
# This item matches the exclude_item, so set its action to 0
# (exclude).
list_actions[index] = 0
# The "whatever!" list is no longer needed, dump it.
del the_dict[exclude_key]
regex_key = list_key + '/'
if regex_key in the_dict:
for regex_item in the_dict[regex_key]:
[action, pattern] = regex_item
pattern_re = re.compile(pattern)
if action == 'exclude':
# This item matches an exclude regex, so set its value to 0 (exclude).
action_value = 0
elif action == 'include':
# This item matches an include regex, so set its value to 1 (include).
action_value = 1
else:
# This is an action that doesn't make any sense.
raise ValueError('Unrecognized action ' + action + ' in ' + name + \
' key ' + regex_key)
for index in xrange(0, len(the_list)):
list_item = the_list[index]
if list_actions[index] == action_value:
# Even if the regex matches, nothing will change so continue (regex
# searches are expensive).
continue
if pattern_re.search(list_item):
# Regular expression match.
list_actions[index] = action_value
# The "whatever/" list is no longer needed, dump it.
del the_dict[regex_key]
# Add excluded items to the excluded list.
#
# Note that exclude_key ("sources!") is different from excluded_key
# ("sources_excluded"). The exclude_key list is input and it was already
# processed and deleted; the excluded_key list is output and it's about
# to be created.
excluded_key = list_key + '_excluded'
if excluded_key in the_dict:
raise GypError(name + ' key ' + excluded_key +
' must not be present prior '
' to applying exclusion/regex filters for ' + list_key)
excluded_list = []
# Go backwards through the list_actions list so that as items are deleted,
# the indices of items that haven't been seen yet don't shift. That means
# that things need to be prepended to excluded_list to maintain them in the
# same order that they existed in the_list.
for index in xrange(len(list_actions) - 1, -1, -1):
if list_actions[index] == 0:
# Dump anything with action 0 (exclude). Keep anything with action 1
# (include) or -1 (no include or exclude seen for the item).
excluded_list.insert(0, the_list[index])
del the_list[index]
# If anything was excluded, put the excluded list into the_dict at
# excluded_key.
if len(excluded_list) > 0:
the_dict[excluded_key] = excluded_list
# Now recurse into subdicts and lists that may contain dicts.
for key, value in the_dict.iteritems():
if type(value) is dict:
ProcessListFiltersInDict(key, value)
elif type(value) is list:
ProcessListFiltersInList(key, value)
def ProcessListFiltersInList(name, the_list):
for item in the_list:
if type(item) is dict:
ProcessListFiltersInDict(name, item)
elif type(item) is list:
ProcessListFiltersInList(name, item)
def ValidateTargetType(target, target_dict):
"""Ensures the 'type' field on the target is one of the known types.
Arguments:
target: string, name of target.
target_dict: dict, target spec.
Raises an exception on error.
"""
VALID_TARGET_TYPES = ('executable', 'loadable_module',
'static_library', 'shared_library',
'none')
target_type = target_dict.get('type', None)
if target_type not in VALID_TARGET_TYPES:
raise GypError("Target %s has an invalid target type '%s'. "
"Must be one of %s." %
(target, target_type, '/'.join(VALID_TARGET_TYPES)))
if (target_dict.get('standalone_static_library', 0) and
not target_type == 'static_library'):
raise GypError('Target %s has type %s but standalone_static_library flag is'
' only valid for static_library type.' % (target,
target_type))
def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules):
"""Ensures that the rules sections in target_dict are valid and consistent,
and determines which sources they apply to.
Arguments:
target: string, name of target.
target_dict: dict, target spec containing "rules" and "sources" lists.
extra_sources_for_rules: a list of keys to scan for rule matches in
addition to 'sources'.
"""
# Dicts to map between values found in rules' 'rule_name' and 'extension'
# keys and the rule dicts themselves.
rule_names = {}
rule_extensions = {}
rules = target_dict.get('rules', [])
for rule in rules:
# Make sure that there's no conflict among rule names and extensions.
rule_name = rule['rule_name']
if rule_name in rule_names:
raise GypError('rule %s exists in duplicate, target %s' %
(rule_name, target))
rule_names[rule_name] = rule
rule_extension = rule['extension']
if rule_extension.startswith('.'):
rule_extension = rule_extension[1:]
if rule_extension in rule_extensions:
raise GypError(('extension %s associated with multiple rules, ' +
'target %s rules %s and %s') %
(rule_extension, target,
rule_extensions[rule_extension]['rule_name'],
rule_name))
rule_extensions[rule_extension] = rule
# Make sure rule_sources isn't already there. It's going to be
# created below if needed.
if 'rule_sources' in rule:
raise GypError(
'rule_sources must not exist in input, target %s rule %s' %
(target, rule_name))
rule_sources = []
source_keys = ['sources']
source_keys.extend(extra_sources_for_rules)
for source_key in source_keys:
for source in target_dict.get(source_key, []):
(source_root, source_extension) = os.path.splitext(source)
if source_extension.startswith('.'):
source_extension = source_extension[1:]
if source_extension == rule_extension:
rule_sources.append(source)
if len(rule_sources) > 0:
rule['rule_sources'] = rule_sources
def ValidateRunAsInTarget(target, target_dict, build_file):
target_name = target_dict.get('target_name')
run_as = target_dict.get('run_as')
if not run_as:
return
if type(run_as) is not dict:
raise GypError("The 'run_as' in target %s from file %s should be a "
"dictionary." %
(target_name, build_file))
action = run_as.get('action')
if not action:
raise GypError("The 'run_as' in target %s from file %s must have an "
"'action' section." %
(target_name, build_file))
if type(action) is not list:
raise GypError("The 'action' for 'run_as' in target %s from file %s "
"must be a list." %
(target_name, build_file))
working_directory = run_as.get('working_directory')
if working_directory and type(working_directory) is not str:
raise GypError("The 'working_directory' for 'run_as' in target %s "
"in file %s should be a string." %
(target_name, build_file))
environment = run_as.get('environment')
if environment and type(environment) is not dict:
raise GypError("The 'environment' for 'run_as' in target %s "
"in file %s should be a dictionary." %
(target_name, build_file))
def ValidateActionsInTarget(target, target_dict, build_file):
'''Validates the inputs to the actions in a target.'''
target_name = target_dict.get('target_name')
actions = target_dict.get('actions', [])
for action in actions:
action_name = action.get('action_name')
if not action_name:
raise GypError("Anonymous action in target %s. "
"An action must have an 'action_name' field." %
target_name)
inputs = action.get('inputs', None)
if inputs is None:
raise GypError('Action in target %s has no inputs.' % target_name)
action_command = action.get('action')
if action_command and not action_command[0]:
raise GypError("Empty action as command in target %s." % target_name)
def TurnIntIntoStrInDict(the_dict):
"""Given dict the_dict, recursively converts all integers into strings.
"""
# Use items instead of iteritems because there's no need to try to look at
# reinserted keys and their associated values.
for k, v in the_dict.items():
if type(v) is int:
v = str(v)
the_dict[k] = v
elif type(v) is dict:
TurnIntIntoStrInDict(v)
elif type(v) is list:
TurnIntIntoStrInList(v)
if type(k) is int:
del the_dict[k]
the_dict[str(k)] = v
def TurnIntIntoStrInList(the_list):
"""Given list the_list, recursively converts all integers into strings.
"""
for index in xrange(0, len(the_list)):
item = the_list[index]
if type(item) is int:
the_list[index] = str(item)
elif type(item) is dict:
TurnIntIntoStrInDict(item)
elif type(item) is list:
TurnIntIntoStrInList(item)
def PruneUnwantedTargets(targets, flat_list, dependency_nodes, root_targets,
data):
"""Return only the targets that are deep dependencies of |root_targets|."""
qualified_root_targets = []
for target in root_targets:
target = target.strip()
qualified_targets = gyp.common.FindQualifiedTargets(target, flat_list)
if not qualified_targets:
raise GypError("Could not find target %s" % target)
qualified_root_targets.extend(qualified_targets)
wanted_targets = {}
for target in qualified_root_targets:
wanted_targets[target] = targets[target]
for dependency in dependency_nodes[target].DeepDependencies():
wanted_targets[dependency] = targets[dependency]
wanted_flat_list = [t for t in flat_list if t in wanted_targets]
# Prune unwanted targets from each build_file's data dict.
for build_file in data['target_build_files']:
if not 'targets' in data[build_file]:
continue
new_targets = []
for target in data[build_file]['targets']:
qualified_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if qualified_name in wanted_targets:
new_targets.append(target)
data[build_file]['targets'] = new_targets
return wanted_targets, wanted_flat_list
def VerifyNoCollidingTargets(targets):
"""Verify that no two targets in the same directory share the same name.
Arguments:
targets: A list of targets in the form 'path/to/file.gyp:target_name'.
"""
# Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'.
used = {}
for target in targets:
# Separate out 'path/to/file.gyp, 'target_name' from
# 'path/to/file.gyp:target_name'.
path, name = target.rsplit(':', 1)
# Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'.
subdir, gyp = os.path.split(path)
# Use '.' for the current directory '', so that the error messages make
# more sense.
if not subdir:
subdir = '.'
# Prepare a key like 'path/to:target_name'.
key = subdir + ':' + name
if key in used:
# Complain if this target is already used.
raise GypError('Duplicate target name "%s" in directory "%s" used both '
'in "%s" and "%s".' % (name, subdir, gyp, used[key]))
used[key] = gyp
def SetGeneratorGlobals(generator_input_info):
# Set up path_sections and non_configuration_keys with the default data plus
# the generator-specific data.
global path_sections
path_sections = set(base_path_sections)
path_sections.update(generator_input_info['path_sections'])
global non_configuration_keys
non_configuration_keys = base_non_configuration_keys[:]
non_configuration_keys.extend(generator_input_info['non_configuration_keys'])
global multiple_toolsets
multiple_toolsets = generator_input_info[
'generator_supports_multiple_toolsets']
global generator_filelist_paths
generator_filelist_paths = generator_input_info['generator_filelist_paths']
def Load(build_files, variables, includes, depth, generator_input_info, check,
circular_check, parallel, root_targets):
SetGeneratorGlobals(generator_input_info)
# A generator can have other lists (in addition to sources) be processed
# for rules.
extra_sources_for_rules = generator_input_info['extra_sources_for_rules']
# Load build files. This loads every target-containing build file into
# the |data| dictionary such that the keys to |data| are build file names,
# and the values are the entire build file contents after "early" or "pre"
# processing has been done and includes have been resolved.
# NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as
# well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps
# track of the keys corresponding to "target" files.
data = {'target_build_files': set()}
# Normalize paths everywhere. This is important because paths will be
# used as keys to the data dict and for references between input files.
build_files = set(map(os.path.normpath, build_files))
if parallel:
LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info)
else:
aux_data = {}
for build_file in build_files:
try:
LoadTargetBuildFile(build_file, data, aux_data,
variables, includes, depth, check, True)
except Exception, e:
gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file)
raise
# Build a dict to access each target's subdict by qualified name.
targets = BuildTargetsDict(data)
# Fully qualify all dependency links.
QualifyDependencies(targets)
# Remove self-dependencies from targets that have 'prune_self_dependencies'
# set to 1.
RemoveSelfDependencies(targets)
# Expand dependencies specified as build_file:*.
ExpandWildcardDependencies(targets, data)
# Remove all dependencies marked as 'link_dependency' from the targets of
# type 'none'.
RemoveLinkDependenciesFromNoneTargets(targets)
# Apply exclude (!) and regex (/) list filters only for dependency_sections.
for target_name, target_dict in targets.iteritems():
tmp_dict = {}
for key_base in dependency_sections:
for op in ('', '!', '/'):
key = key_base + op
if key in target_dict:
tmp_dict[key] = target_dict[key]
del target_dict[key]
ProcessListFiltersInDict(target_name, tmp_dict)
# Write the results back to |target_dict|.
for key in tmp_dict:
target_dict[key] = tmp_dict[key]
# Make sure every dependency appears at most once.
RemoveDuplicateDependencies(targets)
if circular_check:
# Make sure that any targets in a.gyp don't contain dependencies in other
# .gyp files that further depend on a.gyp.
VerifyNoGYPFileCircularDependencies(targets)
[dependency_nodes, flat_list] = BuildDependencyList(targets)
if root_targets:
# Remove, from |targets| and |flat_list|, the targets that are not deep
# dependencies of the targets specified in |root_targets|.
targets, flat_list = PruneUnwantedTargets(
targets, flat_list, dependency_nodes, root_targets, data)
# Check that no two targets in the same directory have the same name.
VerifyNoCollidingTargets(flat_list)
# Handle dependent settings of various types.
for settings_type in ['all_dependent_settings',
'direct_dependent_settings',
'link_settings']:
DoDependentSettings(settings_type, flat_list, targets, dependency_nodes)
# Take out the dependent settings now that they've been published to all
# of the targets that require them.
for target in flat_list:
if settings_type in targets[target]:
del targets[target][settings_type]
# Make sure static libraries don't declare dependencies on other static
# libraries, but that linkables depend on all unlinked static libraries
# that they need so that their link steps will be correct.
gii = generator_input_info
if gii['generator_wants_static_library_dependencies_adjusted']:
AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
gii['generator_wants_sorted_dependencies'])
# Apply "post"/"late"/"target" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATE, variables, build_file)
# Move everything that can go into a "configurations" section into one.
for target in flat_list:
target_dict = targets[target]
SetUpConfigurations(target, target_dict)
# Apply exclude (!) and regex (/) list filters.
for target in flat_list:
target_dict = targets[target]
ProcessListFiltersInDict(target, target_dict)
# Apply "latelate" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATELATE, variables, build_file)
# Make sure that the rules make sense, and build up rule_sources lists as
# needed. Not all generators will need to use the rule_sources lists, but
# some may, and it seems best to build the list in a common spot.
# Also validate actions and run_as elements in targets.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ValidateTargetType(target, target_dict)
ValidateRulesInTarget(target, target_dict, extra_sources_for_rules)
ValidateRunAsInTarget(target, target_dict, build_file)
ValidateActionsInTarget(target, target_dict, build_file)
# Generators might not expect ints. Turn them into strs.
TurnIntIntoStrInDict(data)
# TODO(mark): Return |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
return [flat_list, targets, data]
| bsd-3-clause |
daafgo/CourseBuilder-Xapi | modules/review/peer.py | 4 | 10698 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Internal implementation details of the peer review subsystem.
Public classes, including domain objects, can be found in domain.py and
models/student_work.py. Entities declared here should not be used by external
clients.
"""
__author__ = [
'johncox@google.com (John Cox)',
]
from models import counters
from models import models
from models import student_work
from modules.review import domain
from google.appengine.ext import db
COUNTER_INCREMENT_COUNT_COUNT_AGGREGATE_EXCEEDED_MAX = counters.PerfCounter(
'gcb-pr-increment-count-count-aggregate-exceeded-max',
('number of times increment_count() failed because the new aggregate of '
'the counts would have exceeded domain.MAX_UNREMOVED_REVIEW_STEPS'))
class ReviewSummary(student_work.BaseEntity):
"""Object that tracks the aggregate state of reviews for a submission."""
# UTC last modification timestamp.
change_date = db.DateTimeProperty(auto_now=True, required=True)
# UTC create date.
create_date = db.DateTimeProperty(auto_now_add=True, required=True)
# Strong counters. Callers should never manipulate these directly. Instead,
# use decrement|increment_count.
# Count of ReviewStep entities for this submission currently in state
# STATE_ASSIGNED.
assigned_count = db.IntegerProperty(default=0, required=True)
# Count of ReviewStep entities for this submission currently in state
# STATE_COMPLETED.
completed_count = db.IntegerProperty(default=0, required=True)
# Count of ReviewStep entities for this submission currently in state
# STATE_EXPIRED.
expired_count = db.IntegerProperty(default=0, required=True)
# Key of the student who wrote the submission being reviewed.
reviewee_key = student_work.KeyProperty(
kind=models.Student.kind(), required=True)
# Key of the submission being reviewed.
submission_key = student_work.KeyProperty(
kind=student_work.Submission.kind(), required=True)
# Identifier of the unit this review is a part of.
unit_id = db.StringProperty(required=True)
def __init__(self, *args, **kwargs):
"""Constructs a new ReviewSummary."""
assert not kwargs.get('key_name'), (
'Setting key_name manually not supported')
submission_key = kwargs.get('submission_key')
assert submission_key, 'Missing required submission_key property'
kwargs['key_name'] = self.key_name(submission_key)
super(ReviewSummary, self).__init__(*args, **kwargs)
@classmethod
def key_name(cls, submission_key):
"""Creates a key_name string for datastore operations."""
return '(review_summary:%s)' % submission_key.id_or_name()
@classmethod
def safe_key(cls, db_key, transform_fn):
_, _, unit_id, unsafe_reviewee_key_name = cls._split_key(db_key.name())
unsafe_reviewee_key = db.Key.from_path(
models.Student.kind(), unsafe_reviewee_key_name)
unsafe_submission_key = student_work.Submission.get_key(
unit_id, unsafe_reviewee_key)
safe_submission_key = student_work.Submission.safe_key(
unsafe_submission_key, transform_fn)
return db.Key.from_path(cls.kind(), cls.key_name(safe_submission_key))
def _check_count(self):
count_sum = (
self.assigned_count + self.completed_count + self.expired_count)
if count_sum >= domain.MAX_UNREMOVED_REVIEW_STEPS:
COUNTER_INCREMENT_COUNT_COUNT_AGGREGATE_EXCEEDED_MAX.inc()
raise db.BadValueError(
'Unable to increment %s to %s; max is %s' % (
self.kind(), count_sum, domain.MAX_UNREMOVED_REVIEW_STEPS))
def decrement_count(self, state):
"""Decrements the count for the given state enum; does not save.
Args:
state: string. State indicating counter to decrement; must be one of
domain.REVIEW_STATES.
Raises:
ValueError: if state not in domain.REVIEW_STATES.
"""
if state == domain.REVIEW_STATE_ASSIGNED:
self.assigned_count -= 1
elif state == domain.REVIEW_STATE_COMPLETED:
self.completed_count -= 1
elif state == domain.REVIEW_STATE_EXPIRED:
self.expired_count -= 1
else:
raise ValueError('%s not in %s' % (state, domain.REVIEW_STATES))
def increment_count(self, state):
"""Increments the count for the given state enum; does not save.
Args:
state: string. State indicating counter to increment; must be one of
domain.REVIEW_STATES.
Raises:
db.BadValueError: if incrementing the counter would cause the sum of
all *_counts to exceed domain.MAX_UNREMOVED_REVIEW_STEPS.
ValueError: if state not in domain.REVIEW_STATES
"""
if state not in domain.REVIEW_STATES:
raise ValueError('%s not in %s' % (state, domain.REVIEW_STATES))
self._check_count()
if state == domain.REVIEW_STATE_ASSIGNED:
self.assigned_count += 1
elif state == domain.REVIEW_STATE_COMPLETED:
self.completed_count += 1
elif state == domain.REVIEW_STATE_EXPIRED:
self.expired_count += 1
def for_export(self, transform_fn):
model = super(ReviewSummary, self).for_export(transform_fn)
model.reviewee_key = models.Student.safe_key(
model.reviewee_key, transform_fn)
model.submission_key = student_work.Submission.safe_key(
model.submission_key, transform_fn)
return model
class ReviewStep(student_work.BaseEntity):
"""Object that represents a single state of a review."""
# Audit trail information.
# Identifier for the kind of thing that did the assignment. Used to
# distinguish between assignments done by humans and those done by the
# review subsystem.
assigner_kind = db.StringProperty(
choices=domain.ASSIGNER_KINDS, required=True)
# UTC last modification timestamp.
change_date = db.DateTimeProperty(auto_now=True, required=True)
# UTC create date.
create_date = db.DateTimeProperty(auto_now_add=True, required=True)
# Repeated data to allow filtering/ordering in queries.
# Key of the submission being reviewed.
submission_key = student_work.KeyProperty(
kind=student_work.Submission.kind(), required=True)
# Unit this review step is part of.
unit_id = db.StringProperty(required=True)
# State information.
# State of this review step.
state = db.StringProperty(choices=domain.REVIEW_STATES, required=True)
# Whether or not the review has been removed. By default removed entities
# are ignored for most queries.
removed = db.BooleanProperty(default=False)
# Pointers that tie the work and people involved together.
# Key of the Review associated with this step.
review_key = student_work.KeyProperty(kind=student_work.Review.kind())
# Key of the associated ReviewSummary.
review_summary_key = student_work.KeyProperty(kind=ReviewSummary.kind())
# Key of the Student being reviewed.
reviewee_key = student_work.KeyProperty(kind=models.Student.kind())
# Key of the Student doing this review.
reviewer_key = student_work.KeyProperty(kind=models.Student.kind())
def __init__(self, *args, **kwargs):
"""Constructs a new ReviewStep."""
assert not kwargs.get('key_name'), (
'Setting key_name manually not supported')
reviewer_key = kwargs.get('reviewer_key')
submission_key = kwargs.get('submission_key')
assert reviewer_key, 'Missing required reviewer_key property'
assert submission_key, 'Missing required submission_key property'
kwargs['key_name'] = self.key_name(submission_key, reviewer_key)
super(ReviewStep, self).__init__(*args, **kwargs)
@classmethod
def key_name(cls, submission_key, reviewer_key):
"""Creates a key_name string for datastore operations."""
return '(review_step:%s:%s)' % (
submission_key.id_or_name(), reviewer_key.id_or_name())
@classmethod
def safe_key(cls, db_key, transform_fn):
"""Constructs a version of the entitiy's key that is safe for export."""
cls._split_key(db_key.name())
name = db_key.name().strip('()')
unsafe_submission_key_name, unsafe_reviewer_id_or_name = name.split(
':', 1)[1].rsplit(':', 1)
unsafe_reviewer_key = db.Key.from_path(
models.Student.kind(), unsafe_reviewer_id_or_name)
safe_reviewer_key = models.Student.safe_key(
unsafe_reviewer_key, transform_fn)
# Treating as module-protected. pylint: disable=protected-access
_, unit_id, unsafe_reviewee_key_name = (
student_work.Submission._split_key(unsafe_submission_key_name))
unsafe_reviewee_key = db.Key.from_path(
models.Student.kind(), unsafe_reviewee_key_name)
unsafe_submission_key = student_work.Submission.get_key(
unit_id, unsafe_reviewee_key)
safe_submission_key = student_work.Submission.safe_key(
unsafe_submission_key, transform_fn)
return db.Key.from_path(
cls.kind(), cls.key_name(safe_submission_key, safe_reviewer_key))
def for_export(self, transform_fn):
"""Creates a version of the entity that is safe for export."""
model = super(ReviewStep, self).for_export(transform_fn)
model.review_key = student_work.Review.safe_key(
model.review_key, transform_fn)
model.review_summary_key = ReviewSummary.safe_key(
model.review_summary_key, transform_fn)
model.reviewee_key = models.Student.safe_key(
model.reviewee_key, transform_fn)
model.reviewer_key = models.Student.safe_key(
model.reviewer_key, transform_fn)
model.submission_key = student_work.Submission.safe_key(
model.submission_key, transform_fn)
return model
| apache-2.0 |
Eksmo/calibre | src/calibre/ebooks/conversion/plugins/djvu_input.py | 5 | 3369 | # -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL 3'
__copyright__ = '2011, Anthon van der Neut <anthon@mnt.org>'
__docformat__ = 'restructuredtext en'
import os
from subprocess import Popen, PIPE
from cStringIO import StringIO
from calibre.customize.conversion import InputFormatPlugin, OptionRecommendation
class DJVUInput(InputFormatPlugin):
name = 'DJVU Input'
author = 'Anthon van der Neut'
description = 'Convert OCR-ed DJVU files (.djvu) to HTML'
file_types = set(['djvu', 'djv'])
options = set([
OptionRecommendation(name='use_djvutxt', recommended_value=True,
help=_('Try to use the djvutxt program and fall back to pure '
'python implementation if it fails or is not available')),
])
def convert(self, stream, options, file_ext, log, accelerators):
from calibre.ebooks.txt.processor import convert_basic
stdout = StringIO()
ppdjvu = True
# using djvutxt is MUCH faster, should make it an option
if options.use_djvutxt and os.path.exists('/usr/bin/djvutxt'):
from calibre.ptempfile import PersistentTemporaryFile
try:
fp = PersistentTemporaryFile(suffix='.djvu', prefix='djv_input')
filename = fp._name
fp.write(stream.read())
fp.close()
cmd = ['djvutxt', filename]
stdout.write(Popen(cmd, stdout=PIPE, close_fds=True).communicate()[0])
os.remove(filename)
ppdjvu = False
except:
stream.seek(0) # retry with the pure python converter
if ppdjvu:
from calibre.ebooks.djvu.djvu import DJVUFile
x = DJVUFile(stream)
x.get_text(stdout)
html = convert_basic(stdout.getvalue().replace(b"\n", b' ').replace(
b'\037', b'\n\n'))
# Run the HTMLized text through the html processing plugin.
from calibre.customize.ui import plugin_for_input_format
html_input = plugin_for_input_format('html')
for opt in html_input.options:
setattr(options, opt.option.name, opt.recommended_value)
options.input_encoding = 'utf-8'
base = os.getcwdu()
if file_ext != 'txtz' and hasattr(stream, 'name'):
base = os.path.dirname(stream.name)
fname = os.path.join(base, 'index.html')
c = 0
while os.path.exists(fname):
c += 1
fname = 'index%d.html'%c
htmlfile = open(fname, 'wb')
with htmlfile:
htmlfile.write(html.encode('utf-8'))
odi = options.debug_pipeline
options.debug_pipeline = None
# Generate oeb from html conversion.
with open(htmlfile.name, 'rb') as f:
oeb = html_input.convert(f, options, 'html', log,
{})
options.debug_pipeline = odi
os.remove(htmlfile.name)
# Set metadata from file.
from calibre.customize.ui import get_file_type_metadata
from calibre.ebooks.oeb.transforms.metadata import meta_info_to_oeb_metadata
mi = get_file_type_metadata(stream, file_ext)
meta_info_to_oeb_metadata(mi, oeb.metadata, log)
return oeb
| gpl-3.0 |
pravisankar/solum | solum/api/controllers/v1/datamodel/extension.py | 6 | 1867 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from wsme import types as wtypes
from solum.api.controllers import common_types
from solum.api.controllers.v1.datamodel import types as api_types
class Extension(api_types.Base):
"""The Extension resource represents changes that the Provider has added
onto a Platform in addition to the ones supplied by Solum by default.
This may include additional protocol semantics, resource types,
application lifecycle states, resource attributes, etc. Anything may be
added, as long as it does not contradict the base functionality offered
by Solum.
"""
version = wtypes.text
"Version of the extension."
documentation = common_types.Uri
"Documentation URI to the extension."
@classmethod
def sample(cls):
return cls(uri='http://example.com/v1/extensions/logstash',
name='logstash',
type='extension',
tags=['large'],
project_id='1dae5a09ef2b4d8cbf3594b0eb4f6b94',
user_id='55f41cf46df74320b9486a35f5d28a11',
description='This logstash extension provides a tool for '
'managing your application events and logs.',
version='2.13',
documentation='http://example.com/docs/ext/logstash')
| apache-2.0 |
rbejar/odrl-ogc-cache-policies | owslib/iso.py | 1 | 30051 | # -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2009 Tom Kralidis
#
# Authors : Tom Kralidis <tomkralidis@gmail.com>
# Angelos Tzotsos <tzotsos@gmail.com>
#
# Contact email: tomkralidis@gmail.com
# =============================================================================
""" ISO metadata parser """
from owslib.etree import etree
from owslib import util
from owslib.namespaces import Namespaces
# default variables
def get_namespaces():
n = Namespaces()
ns = n.get_namespaces(["gco","gmd","gml","gml32","gmx","gts","srv","xlink"])
ns[None] = n.get_namespace("gmd")
return ns
namespaces = get_namespaces()
class MD_Metadata(object):
""" Process gmd:MD_Metadata """
def __init__(self, md):
if hasattr(md, 'getroot'): # standalone document
self.xml = etree.tostring(md.getroot())
else: # part of a larger document
self.xml = etree.tostring(md)
val = md.find(util.nspath_eval('gmd:fileIdentifier/gco:CharacterString', namespaces))
self.identifier = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:parentIdentifier/gco:CharacterString', namespaces))
self.parentidentifier = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:language/gco:CharacterString', namespaces))
self.language = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:dataSetURI/gco:CharacterString', namespaces))
self.dataseturi = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:language/gmd:LanguageCode', namespaces))
self.languagecode = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:dateStamp/gco:Date', namespaces))
self.datestamp = util.testXMLValue(val)
if not self.datestamp:
val = md.find(util.nspath_eval('gmd:dateStamp/gco:DateTime', namespaces))
self.datestamp = util.testXMLValue(val)
self.charset = _testCodeListValue(md.find(util.nspath_eval('gmd:characterSet/gmd:MD_CharacterSetCode', namespaces)))
self.hierarchy = _testCodeListValue(md.find(util.nspath_eval('gmd:hierarchyLevel/gmd:MD_ScopeCode', namespaces)))
self.contact = []
for i in md.findall(util.nspath_eval('gmd:contact/gmd:CI_ResponsibleParty', namespaces)):
o = CI_ResponsibleParty(i)
self.contact.append(o)
val = md.find(util.nspath_eval('gmd:dateStamp/gco:DateTime', namespaces))
self.datetimestamp = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:metadataStandardName/gco:CharacterString', namespaces))
self.stdname = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:metadataStandardVersion/gco:CharacterString', namespaces))
self.stdver = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:referenceSystemInfo/gmd:MD_ReferenceSystem', namespaces))
if val is not None:
self.referencesystem = MD_ReferenceSystem(val)
else:
self.referencesystem = None
# TODO: merge .identificationinfo into .identification
#warnings.warn(
# 'the .identification and .serviceidentification properties will merge into '
# '.identification being a list of properties. This is currently implemented '
# 'in .identificationinfo. '
# 'Please see https://github.com/geopython/OWSLib/issues/38 for more information',
# FutureWarning)
val = md.find(util.nspath_eval('gmd:identificationInfo/gmd:MD_DataIdentification', namespaces))
val2 = md.find(util.nspath_eval('gmd:identificationInfo/srv:SV_ServiceIdentification', namespaces))
if val is not None:
self.identification = MD_DataIdentification(val, 'dataset')
self.serviceidentification = None
elif val2 is not None:
self.identification = MD_DataIdentification(val2, 'service')
self.serviceidentification = SV_ServiceIdentification(val2)
else:
self.identification = None
self.serviceidentification = None
self.identificationinfo = []
for idinfo in md.findall(util.nspath_eval('gmd:identificationInfo', namespaces)):
val = list(idinfo)[0]
tagval = util.xmltag_split(val.tag)
if tagval == 'MD_DataIdentification':
self.identificationinfo.append(MD_DataIdentification(val, 'dataset'))
elif tagval == 'MD_ServiceIdentification':
self.identificationinfo.append(MD_DataIdentification(val, 'service'))
elif tagval == 'SV_ServiceIdentification':
self.identificationinfo.append(SV_ServiceIdentification(val))
val = md.find(util.nspath_eval('gmd:distributionInfo/gmd:MD_Distribution', namespaces))
if val is not None:
self.distribution = MD_Distribution(val)
else:
self.distribution = None
val = md.find(util.nspath_eval('gmd:dataQualityInfo/gmd:DQ_DataQuality', namespaces))
if val is not None:
self.dataquality = DQ_DataQuality(val)
else:
self.dataquality = None
class CI_Date(object):
""" process CI_Date """
def __init__(self, md):
val = md.find(util.nspath_eval('gmd:date/gco:Date', namespaces))
if val is not None:
self.date = util.testXMLValue(val)
else:
val = md.find(util.nspath_eval('gmd:date/gco:DateTime', namespaces))
if val is not None:
self.date = util.testXMLValue(val)
else:
self.date = None
val = md.find(util.nspath_eval('gmd:dateType/gmd:CI_DateTypeCode', namespaces))
self.type = _testCodeListValue(val)
class CI_ResponsibleParty(object):
""" process CI_ResponsibleParty """
def __init__(self, md):
val = md.find(util.nspath_eval('gmd:individualName/gco:CharacterString', namespaces))
self.name = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:organisationName/gco:CharacterString', namespaces))
self.organization = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:positionName/gco:CharacterString', namespaces))
self.position = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:phone/gmd:CI_Telephone/gmd:voice/gco:CharacterString', namespaces))
self.phone = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:phone/gmd:CI_Telephone/gmd:facsimile/gco:CharacterString', namespaces))
self.fax = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:deliveryPoint/gco:CharacterString', namespaces))
self.address = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:city/gco:CharacterString', namespaces))
self.city = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:administrativeArea/gco:CharacterString', namespaces))
self.region = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:postalCode/gco:CharacterString', namespaces))
self.postcode = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:country/gco:CharacterString', namespaces))
self.country = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:electronicMailAddress/gco:CharacterString', namespaces))
self.email = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:onlineResource/gmd:CI_OnlineResource', namespaces))
if val is not None:
self.onlineresource = CI_OnlineResource(val)
else:
self.onlineresource = None
self.role = _testCodeListValue(md.find(util.nspath_eval('gmd:role/gmd:CI_RoleCode', namespaces)))
class MD_DataIdentification(object):
""" process MD_DataIdentification """
def __init__(self, md, identtype):
self.identtype = identtype
val = md.find(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:title/gco:CharacterString', namespaces))
self.title = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:alternateTitle/gco:CharacterString', namespaces))
self.alternatetitle = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:aggregationInfo', namespaces))
self.aggregationinfo = util.testXMLValue(val)
self.date = []
self.datetype = []
for i in md.findall(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:date/gmd:CI_Date', namespaces)):
self.date.append(CI_Date(i))
self.uselimitation = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_Constraints/gmd:useLimitation/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.uselimitation.append(val)
self.accessconstraints = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:accessConstraints/gmd:MD_RestrictionCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.accessconstraints.append(val)
self.classification = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:accessConstraints/gmd:MD_ClassificationCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.classification.append(val)
self.otherconstraints = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:otherConstraints/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.otherconstraints.append(val)
self.securityconstraints = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_SecurityConstraints/gmd:useLimitation', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.securityconstraints.append(val)
self.useconstraints = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:useConstraints/gmd:MD_RestrictionCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.useconstraints.append(val)
self.denominators = []
for i in md.findall(util.nspath_eval('gmd:spatialResolution/gmd:MD_Resolution/gmd:equivalentScale/gmd:MD_RepresentativeFraction/gmd:denominator/gco:Integer', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.denominators.append(val)
self.distance = []
self.uom = []
for i in md.findall(util.nspath_eval('gmd:spatialResolution/gmd:MD_Resolution/gmd:distance/gco:Distance', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.distance.append(val)
self.uom.append(i.get("uom"))
self.resourcelanguage = []
for i in md.findall(util.nspath_eval('gmd:language/gmd:LanguageCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.resourcelanguage.append(val)
val = md.find(util.nspath_eval('gmd:pointOfContact/gmd:CI_ResponsibleParty/gmd:organisationName', namespaces))
if val is not None:
val2 = val.find(util.nspath_eval('gmd:role/gmd:CI_RoleCode', namespaces))
if val2 is not None:
clv = _testCodeListValue(val)
if clv == 'originator':
self.creator = util.testXMLValue(val)
elif clv == 'publisher':
self.publisher = util.testXMLValue(val)
elif clv == 'contributor':
self.originator = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:edition/gco:CharacterString', namespaces))
self.edition = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:abstract/gco:CharacterString', namespaces))
self.abstract = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:purpose/gco:CharacterString', namespaces))
self.purpose = util.testXMLValue(val)
self.status = _testCodeListValue(md.find(util.nspath_eval('gmd:status/gmd:MD_ProgressCode', namespaces)))
self.contact = []
for i in md.findall(util.nspath_eval('gmd:pointOfContact/gmd:CI_ResponsibleParty', namespaces)):
o = CI_ResponsibleParty(i)
self.contact.append(o)
self.keywords = []
for i in md.findall(util.nspath_eval('gmd:descriptiveKeywords', namespaces)):
mdkw = {}
mdkw['type'] = _testCodeListValue(i.find(util.nspath_eval('gmd:MD_Keywords/gmd:type/gmd:MD_KeywordTypeCode', namespaces)))
mdkw['thesaurus'] = {}
val = i.find(util.nspath_eval('gmd:MD_Keywords/gmd:thesaurusName/gmd:CI_Citation/gmd:title/gco:CharacterString', namespaces))
mdkw['thesaurus']['title'] = util.testXMLValue(val)
val = i.find(util.nspath_eval('gmd:MD_Keywords/gmd:thesaurusName/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:date/gco:Date', namespaces))
mdkw['thesaurus']['date'] = util.testXMLValue(val)
val = i.find(util.nspath_eval('gmd:MD_Keywords/gmd:thesaurusName/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:dateType/gmd:CI_DateTypeCode', namespaces))
mdkw['thesaurus']['datetype'] = util.testXMLValue(val)
mdkw['keywords'] = []
for k in i.findall(util.nspath_eval('gmd:MD_Keywords/gmd:keyword', namespaces)):
val = k.find(util.nspath_eval('gco:CharacterString', namespaces))
if val is not None:
val2 = util.testXMLValue(val)
if val2 is not None:
mdkw['keywords'].append(val2)
self.keywords.append(mdkw)
self.topiccategory = []
for i in md.findall(util.nspath_eval('gmd:topicCategory/gmd:MD_TopicCategoryCode', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.topiccategory.append(val)
val = md.find(util.nspath_eval('gmd:supplementalInformation/gco:CharacterString', namespaces))
self.supplementalinformation = util.testXMLValue(val)
# There may be multiple geographicElement, create an extent
# from the one containing either an EX_GeographicBoundingBox or EX_BoundingPolygon.
# The schema also specifies an EX_GeographicDescription. This is not implemented yet.
val = None
val2 = None
val3 = None
extents = md.findall(util.nspath_eval('gmd:extent', namespaces))
extents.extend(md.findall(util.nspath_eval('srv:extent', namespaces)))
for extent in extents:
if val is None:
for e in extent.findall(util.nspath_eval('gmd:EX_Extent/gmd:geographicElement', namespaces)):
if e.find(util.nspath_eval('gmd:EX_GeographicBoundingBox', namespaces)) is not None or e.find(util.nspath_eval('gmd:EX_BoundingPolygon', namespaces)) is not None:
val = e
break
self.extent = EX_Extent(val)
self.bbox = self.extent.boundingBox # for backwards compatibility
if val2 is None:
val2 = extent.find(util.nspath_eval('gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml:TimePeriod/gml:beginPosition', namespaces))
if val2 is None:
val2 = extent.find(util.nspath_eval('gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml32:TimePeriod/gml32:beginPosition', namespaces))
self.temporalextent_start = util.testXMLValue(val2)
if val3 is None:
val3 = extent.find(util.nspath_eval('gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml:TimePeriod/gml:endPosition', namespaces))
if val3 is None:
val3 = extent.find(util.nspath_eval('gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml32:TimePeriod/gml32:endPosition', namespaces))
self.temporalextent_end = util.testXMLValue(val3)
class MD_Distributor(object):
""" process MD_Distributor """
def __init__(self, md):
self.contact = None
val = md.find(util.nspath_eval('gmd:MD_Distributor/gmd:distributorContact/gmd:CI_ResponsibleParty', namespaces))
if val is not None:
self.contact = CI_ResponsibleParty(val)
self.online = []
for ol in md.findall(util.nspath_eval('gmd:MD_Distributor/gmd:distributorTransferOptions/gmd:MD_DigitalTransferOptions/gmd:onLine/gmd:CI_OnlineResource', namespaces)):
self.online.append(CI_OnlineResource(ol))
class MD_Distribution(object):
""" process MD_Distribution """
def __init__(self, md):
val = md.find(util.nspath_eval('gmd:distributionFormat/gmd:MD_Format/gmd:name/gco:CharacterString', namespaces))
self.format = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:distributionFormat/gmd:MD_Format/gmd:version/gco:CharacterString', namespaces))
self.version = util.testXMLValue(val)
self.distributor = []
for dist in md.findall(util.nspath_eval('gmd:distributor', namespaces)):
self.distributor.append(MD_Distributor(dist))
self.online = []
for ol in md.findall(util.nspath_eval('gmd:transferOptions/gmd:MD_DigitalTransferOptions/gmd:onLine/gmd:CI_OnlineResource', namespaces)):
self.online.append(CI_OnlineResource(ol))
class DQ_DataQuality(object):
''' process DQ_DataQuality'''
def __init__(self, md):
self.conformancetitle = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:title/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.conformancetitle.append(val)
self.conformancedate = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:date/gco:Date', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.conformancedate.append(val)
self.conformancedatetype = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:dateType/gmd:CI_DateTypeCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.conformancedatetype.append(val)
self.conformancedegree = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:pass/gco:Boolean', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.conformancedegree.append(val)
val = md.find(util.nspath_eval('gmd:lineage/gmd:LI_Lineage/gmd:statement/gco:CharacterString', namespaces))
self.lineage = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:title/gco:CharacterString', namespaces))
self.specificationtitle = util.testXMLValue(val)
self.specificationdate = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:date/gmd:CI_Date', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.specificationdate.append(val)
class SV_ServiceIdentification(object):
""" process SV_ServiceIdentification """
def __init__(self, md):
self.identtype = 'service'
val = md.find(util.nspath_eval('srv:serviceType/gco:LocalName', namespaces))
self.type = util.testXMLValue(val)
val = md.find(util.nspath_eval('srv:serviceTypeVersion/gco:CharacterString', namespaces))
self.version = util.testXMLValue(val)
val = md.find(util.nspath_eval('srv:accessProperties/gmd:MD_StandardOrderProcess/gmd:fees/gco:CharacterString', namespaces))
self.fees = util.testXMLValue(val)
val = md.find(util.nspath_eval('srv:extent/gmd:EX_Extent', namespaces))
if val is not None:
self.bbox = EX_Extent(val)
else:
self.bbox = None
self.couplingtype = _testCodeListValue(md.find(util.nspath_eval('gmd:couplingType/gmd:SV_CouplingType', namespaces)))
self.operations = []
for i in md.findall(util.nspath_eval('srv:containsOperations', namespaces)):
tmp = {}
val = i.find(util.nspath_eval('srv:SV_OperationMetadata/srv:operationName/gco:CharacterString', namespaces))
tmp['name'] = util.testXMLValue(val)
tmp['dcplist'] = []
for d in i.findall(util.nspath_eval('srv:SV_OperationMetadata/srv:DCP', namespaces)):
tmp2 = _testCodeListValue(d.find(util.nspath_eval('srv:DCPList', namespaces)))
tmp['dcplist'].append(tmp2)
tmp['connectpoint'] = []
for d in i.findall(util.nspath_eval('srv:SV_OperationMetadata/srv:connectPoint', namespaces)):
tmp3 = d.find(util.nspath_eval('gmd:CI_OnlineResource', namespaces))
tmp['connectpoint'].append(CI_OnlineResource(tmp3))
self.operations.append(tmp)
self.operateson = []
for i in md.findall(util.nspath_eval('srv:operatesOn', namespaces)):
tmp = {}
tmp['uuidref'] = i.attrib.get('uuidref')
tmp['href'] = i.attrib.get(util.nspath_eval('xlink:href', namespaces))
tmp['title'] = i.attrib.get(util.nspath_eval('xlink:title', namespaces))
self.operateson.append(tmp)
class CI_OnlineResource(object):
""" process CI_OnlineResource """
def __init__(self,md):
val = md.find(util.nspath_eval('gmd:linkage/gmd:URL', namespaces))
self.url = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:protocol/gco:CharacterString', namespaces))
self.protocol = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:name/gco:CharacterString', namespaces))
self.name = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:description/gco:CharacterString', namespaces))
self.description = util.testXMLValue(val)
self.function = _testCodeListValue(md.find(util.nspath_eval('gmd:function/gmd:CI_OnLineFunctionCode', namespaces)))
class EX_GeographicBoundingBox(object):
def __init__(self, md):
val = md.find(util.nspath_eval('gmd:westBoundLongitude/gco:Decimal', namespaces))
self.minx = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:eastBoundLongitude/gco:Decimal', namespaces))
self.maxx = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:southBoundLatitude/gco:Decimal', namespaces))
self.miny = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:northBoundLatitude/gco:Decimal', namespaces))
self.maxy = util.testXMLValue(val)
class EX_Polygon(object):
def __init__(self, md):
linear_ring = md.find(util.nspath_eval('gml32:Polygon/gml32:exterior/gml32:LinearRing', namespaces))
if linear_ring is not None:
self.exterior_ring = self._coordinates_for_ring(linear_ring)
interior_ring_elements = md.findall(util.nspath_eval('gml32:Polygon/gml32:interior', namespaces))
self.interior_rings = []
for iring_element in interior_ring_elements:
linear_ring = iring_element.find(util.nspath_eval('gml32:LinearRing', namespaces))
self.interior_rings.append(self._coordinates_for_ring(linear_ring))
def _coordinates_for_ring(self, linear_ring):
coordinates = []
positions = linear_ring.findall(util.nspath_eval('gml32:pos', namespaces))
for pos in positions:
tokens = pos.text.split()
coords = tuple([float(t) for t in tokens])
coordinates.append(coords)
return coordinates
class EX_GeographicBoundingPolygon(object):
def __init__(self, md):
val = md.find(util.nspath_eval('gmd:extentTypeCode', namespaces))
self.is_extent = util.testXMLValue(val)
md_polygons = md.findall(util.nspath_eval('gmd:polygon', namespaces))
self.polygons = []
for val in md_polygons:
self.polygons.append(EX_Polygon(val))
class EX_Extent(object):
""" process EX_Extent """
def __init__(self, md):
self.boundingBox = None
self.boundingPolygon = None
if md is not None:
bboxElement = md.find(util.nspath_eval('gmd:EX_GeographicBoundingBox', namespaces))
if bboxElement is not None:
self.boundingBox = EX_GeographicBoundingBox(bboxElement)
polygonElement = md.find(util.nspath_eval('gmd:EX_BoundingPolygon', namespaces))
if polygonElement is not None:
self.boundingPolygon = EX_GeographicBoundingPolygon(polygonElement)
val = md.find(util.nspath_eval('gmd:EX_GeographicDescription/gmd:geographicIdentifier/gmd:MD_Identifier/gmd:code/gco:CharacterString', namespaces))
self.description_code = util.testXMLValue(val)
class MD_ReferenceSystem(object):
""" process MD_ReferenceSystem """
def __init__(self, md):
val = md.find(util.nspath_eval('gmd:referenceSystemIdentifier/gmd:RS_Identifier/gmd:code/gco:CharacterString', namespaces))
self.code = util.testXMLValue(val)
def _testCodeListValue(elpath):
""" get gco:CodeListValue_Type attribute, else get text content """
if elpath is not None: # try to get @codeListValue
val = util.testXMLValue(elpath.attrib.get('codeListValue'), True)
if val is not None:
return val
else: # see if there is element text
return util.testXMLValue(elpath)
else:
return None
class CodelistCatalogue(object):
""" process CT_CodelistCatalogue """
def __init__(self, ct):
val = ct.find(util.nspath_eval('gmx:name/gco:CharacterString', namespaces))
self.name = util.testXMLValue(val)
val = ct.find(util.nspath_eval('gmx:scope/gco:CharacterString', namespaces))
self.scope = util.testXMLValue(val)
val = ct.find(util.nspath_eval('gmx:fieldOfApplication/gco:CharacterString', namespaces))
self.fieldapp = util.testXMLValue(val)
val = ct.find(util.nspath_eval('gmx:versionNumber/gco:CharacterString', namespaces))
self.version = util.testXMLValue(val)
val = ct.find(util.nspath_eval('gmx:versionDate/gco:Date', namespaces))
self.date = util.testXMLValue(val)
self.dictionaries = {}
for i in ct.findall(util.nspath_eval('gmx:codelistItem/gmx:CodeListDictionary', namespaces)):
id = i.attrib.get(util.nspath_eval('gml32:id', namespaces))
self.dictionaries[id] = {}
val = i.find(util.nspath_eval('gml32:description', namespaces))
self.dictionaries[id]['description'] = util.testXMLValue(val)
val = i.find(util.nspath_eval('gml32:identifier', namespaces))
self.dictionaries[id]['identifier'] = util.testXMLValue(val)
self.dictionaries[id]['entries'] = {}
for j in i.findall(util.nspath_eval('gmx:codeEntry', namespaces)):
id2 = j.find(util.nspath_eval('gmx:CodeDefinition', namespaces)).attrib.get(util.nspath_eval('gml32:id', namespaces))
self.dictionaries[id]['entries'][id2] = {}
val = j.find(util.nspath_eval('gmx:CodeDefinition/gml32:description', namespaces))
self.dictionaries[id]['entries'][id2]['description'] = util.testXMLValue(val)
val = j.find(util.nspath_eval('gmx:CodeDefinition/gml32:identifier', namespaces))
self.dictionaries[id]['entries'][id2]['identifier'] = util.testXMLValue(val)
val = j.find(util.nspath_eval('gmx:CodeDefinition', namespaces)).attrib.get('codeSpace')
self.dictionaries[id]['entries'][id2]['codespace'] = util.testXMLValue(val, True)
def getcodelistdictionaries(self):
return self.dictionaries.keys()
def getcodedefinitionidentifiers(self, cdl):
if self.dictionaries.has_key(cdl):
ids = []
for i in self.dictionaries[cdl]['entries']:
ids.append(self.dictionaries[cdl]['entries'][i]['identifier'])
return ids
else:
return None
| mit |
bourne015/kernel-3.0-s5pv210 | tools/perf/scripts/python/futex-contention.py | 11261 | 1486 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
CalthorpeAnalytics/urbanfootprint | footprint/main/models/analysis_module/core_module/core_revert_to_base_condition.py | 1 | 5853 |
# UrbanFootprint v1.5
# Copyright (C) 2017 Calthorpe Analytics
#
# This file is part of UrbanFootprint version 1.5
#
# UrbanFootprint is distributed under the terms of the GNU General
# Public License version 3, as published by the Free Software Foundation. This
# code is distributed WITHOUT ANY WARRANTY, without implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License v3 for more details; see <http://www.gnu.org/licenses/>.
__author__ = 'calthorpe_analytics'
def core_end_state_revert_to_base_condition(end_state, base):
end_state.built_form_key = base.built_form_key
end_state.built_form_base = None
end_state.built_form_id = base.built_form_id
end_state.land_development_category = None
end_state.intersection_density_sqmi = base.intersection_density_sqmi
end_state.pop = base.pop
end_state.hh =base.hh
end_state.du = base.du
end_state.du_detsf = base.du_detsf
end_state.du_attsf = base.du_attsf
end_state.du_mf = base.du_mf
end_state.emp = base.emp
end_state.emp_ret = base.emp_ret
end_state.emp_off = base.emp_off
end_state.emp_pub = base.emp_pub
end_state.emp_ind = base.emp_ind
end_state.emp_ag = base.emp_ag
end_state.emp_military = base.emp_military
end_state.du_detsf_sl = base.du_detsf_sl
end_state.du_detsf_ll = base.du_detsf_ll
end_state.du_mf2to4 = base.du_mf2to4
end_state.du_mf5p = base.du_mf5p
end_state.emp_retail_services = base.emp_retail_services
end_state.emp_restaurant = base.emp_restaurant
end_state.emp_accommodation = base.emp_accommodation
end_state.emp_arts_entertainment = base.emp_arts_entertainment
end_state.emp_other_services = base.emp_other_services
end_state.emp_office_services = base.emp_office_services
end_state.emp_public_admin = base.emp_public_admin
end_state.emp_education =base.emp_education
end_state.emp_medical_services = base.emp_medical_services
end_state.emp_manufacturing =base.emp_manufacturing
end_state.emp_wholesale = base.emp_wholesale
end_state.emp_transport_warehousing = base.emp_transport_warehousing
end_state.emp_utilities = base.emp_utilities
end_state.emp_construction = base.emp_construction
end_state.emp_agriculture = base.emp_agriculture
end_state.emp_extraction = base.emp_extraction
end_state.bldg_sqft_detsf_sl = base.bldg_sqft_detsf_sl
end_state.bldg_sqft_detsf_ll = base.bldg_sqft_detsf_ll
end_state.bldg_sqft_attsf = base.bldg_sqft_attsf
end_state.bldg_sqft_mf = base.bldg_sqft_mf
end_state.bldg_sqft_retail_services = base.bldg_sqft_retail_services
end_state.bldg_sqft_restaurant = base.bldg_sqft_restaurant
end_state.bldg_sqft_accommodation = base.bldg_sqft_accommodation
end_state.bldg_sqft_arts_entertainment = base.bldg_sqft_arts_entertainment
end_state.bldg_sqft_other_services = base.bldg_sqft_other_services
end_state.bldg_sqft_office_services = base.bldg_sqft_office_services
end_state.bldg_sqft_public_admin = base.bldg_sqft_public_admin
end_state.bldg_sqft_education = base.bldg_sqft_education
end_state.bldg_sqft_medical_services = base.bldg_sqft_medical_services
end_state.bldg_sqft_wholesale = base.bldg_sqft_wholesale
end_state.bldg_sqft_transport_warehousing = base.bldg_sqft_transport_warehousing
end_state.residential_irrigated_sqft = base.residential_irrigated_sqft
end_state.commercial_irrigated_sqft = base.commercial_irrigated_sqft
end_state.acres_parcel_res = base.acres_parcel_res
end_state.acres_parcel_res_detsf = base.acres_parcel_res_detsf
end_state.acres_parcel_res_detsf_sl = base.acres_parcel_res_detsf_sl
end_state.acres_parcel_res_detsf_ll = base.acres_parcel_res_detsf_ll
end_state.acres_parcel_res_attsf = base.acres_parcel_res_attsf
end_state.acres_parcel_res_mf = base.acres_parcel_res_mf
end_state.acres_parcel_emp = base.acres_parcel_emp
end_state.acres_parcel_emp_off = base.acres_parcel_emp_off
end_state.acres_parcel_emp_ret = base.acres_parcel_emp_ret
end_state.acres_parcel_emp_ind = base.acres_parcel_emp_ind
end_state.acres_parcel_emp_ag = base.acres_parcel_emp_ag
end_state.acres_parcel_emp_mixed = base.acres_parcel_emp_mixed
end_state.acres_parcel_emp_military = base.acres_parcel_emp_military
end_state.acres_parcel_mixed = base.acres_parcel_mixed
end_state.acres_parcel_mixed_w_off = base.acres_parcel_mixed_w_off
end_state.acres_parcel_mixed_no_off = base.acres_parcel_mixed_no_off
end_state.acres_parcel_no_use = base.acres_parcel_no_use
end_state.save()
def core_increment_revert_to_base_condition(increment):
increment.built_form_key = None
increment.built_form_id = None
increment.land_development_category = None
increment.refill_flag = False
increment.pop = 0
increment.hh = 0
increment.du = 0
increment.emp = 0
increment.du_detsf = 0
increment.du_detsf_ll = 0
increment.du_detsf_sl = 0
increment.du_attsf = 0
increment.du_mf = 0
increment.emp_ret = 0
increment.emp_retail_services = 0
increment.emp_restaurant = 0
increment.emp_accommodation = 0
increment.emp_arts_entertainment = 0
increment.emp_other_services = 0
increment.emp_off = 0
increment.emp_office_services = 0
increment.emp_medical_services = 0
increment.emp_pub = 0
increment.emp_education = 0
increment.emp_public_admin = 0
increment.emp_ind = 0
increment.emp_wholesale = 0
increment.emp_transport_warehousing = 0
increment.emp_manufacturing = 0
increment.emp_utilities = 0
increment.emp_construction = 0
increment.emp_ag = 0
increment.emp_agriculture = 0
increment.emp_extraction = 0
increment.emp_military = 0
increment.save()
| gpl-3.0 |
laiqiqi886/kbengine | kbe/res/scripts/common/Lib/test/test_pep277.py | 73 | 7135 | # Test the Unicode versions of normal file functions
# open, os.open, os.stat. os.listdir, os.rename, os.remove, os.mkdir, os.chdir, os.rmdir
import os
import sys
import unittest
import warnings
from unicodedata import normalize
from test import support
filenames = [
'1_abc',
'2_ascii',
'3_Gr\xfc\xdf-Gott',
'4_\u0393\u03b5\u03b9\u03ac-\u03c3\u03b1\u03c2',
'5_\u0417\u0434\u0440\u0430\u0432\u0441\u0442\u0432\u0443\u0439\u0442\u0435',
'6_\u306b\u307d\u3093',
'7_\u05d4\u05e9\u05e7\u05e6\u05e5\u05e1',
'8_\u66e8\u66e9\u66eb',
'9_\u66e8\u05e9\u3093\u0434\u0393\xdf',
# Specific code points: fn, NFC(fn) and NFKC(fn) all differents
'10_\u1fee\u1ffd',
]
# Mac OS X decomposes Unicode names, using Normal Form D.
# http://developer.apple.com/mac/library/qa/qa2001/qa1173.html
# "However, most volume formats do not follow the exact specification for
# these normal forms. For example, HFS Plus uses a variant of Normal Form D
# in which U+2000 through U+2FFF, U+F900 through U+FAFF, and U+2F800 through
# U+2FAFF are not decomposed."
if sys.platform != 'darwin':
filenames.extend([
# Specific code points: NFC(fn), NFD(fn), NFKC(fn) and NFKD(fn) all differents
'11_\u0385\u03d3\u03d4',
'12_\u00a8\u0301\u03d2\u0301\u03d2\u0308', # == NFD('\u0385\u03d3\u03d4')
'13_\u0020\u0308\u0301\u038e\u03ab', # == NFKC('\u0385\u03d3\u03d4')
'14_\u1e9b\u1fc1\u1fcd\u1fce\u1fcf\u1fdd\u1fde\u1fdf\u1fed',
# Specific code points: fn, NFC(fn) and NFKC(fn) all differents
'15_\u1fee\u1ffd\ufad1',
'16_\u2000\u2000\u2000A',
'17_\u2001\u2001\u2001A',
'18_\u2003\u2003\u2003A', # == NFC('\u2001\u2001\u2001A')
'19_\u0020\u0020\u0020A', # '\u0020' == ' ' == NFKC('\u2000') ==
# NFKC('\u2001') == NFKC('\u2003')
])
# Is it Unicode-friendly?
if not os.path.supports_unicode_filenames:
fsencoding = sys.getfilesystemencoding()
try:
for name in filenames:
name.encode(fsencoding)
except UnicodeEncodeError:
raise unittest.SkipTest("only NT+ and systems with "
"Unicode-friendly filesystem encoding")
class UnicodeFileTests(unittest.TestCase):
files = set(filenames)
normal_form = None
def setUp(self):
try:
os.mkdir(support.TESTFN)
except FileExistsError:
pass
self.addCleanup(support.rmtree, support.TESTFN)
files = set()
for name in self.files:
name = os.path.join(support.TESTFN, self.norm(name))
with open(name, 'wb') as f:
f.write((name+'\n').encode("utf-8"))
os.stat(name)
files.add(name)
self.files = files
def norm(self, s):
if self.normal_form:
return normalize(self.normal_form, s)
return s
def _apply_failure(self, fn, filename,
expected_exception=FileNotFoundError,
check_filename=True):
with self.assertRaises(expected_exception) as c:
fn(filename)
exc_filename = c.exception.filename
if check_filename:
self.assertEqual(exc_filename, filename, "Function '%s(%a) failed "
"with bad filename in the exception: %a" %
(fn.__name__, filename, exc_filename))
def test_failures(self):
# Pass non-existing Unicode filenames all over the place.
for name in self.files:
name = "not_" + name
self._apply_failure(open, name)
self._apply_failure(os.stat, name)
self._apply_failure(os.chdir, name)
self._apply_failure(os.rmdir, name)
self._apply_failure(os.remove, name)
self._apply_failure(os.listdir, name)
if sys.platform == 'win32':
# Windows is lunatic. Issue #13366.
_listdir_failure = NotADirectoryError, FileNotFoundError
else:
_listdir_failure = NotADirectoryError
def test_open(self):
for name in self.files:
f = open(name, 'wb')
f.write((name+'\n').encode("utf-8"))
f.close()
os.stat(name)
self._apply_failure(os.listdir, name, self._listdir_failure)
# Skip the test on darwin, because darwin does normalize the filename to
# NFD (a variant of Unicode NFD form). Normalize the filename to NFC, NFKC,
# NFKD in Python is useless, because darwin will normalize it later and so
# open(), os.stat(), etc. don't raise any exception.
@unittest.skipIf(sys.platform == 'darwin', 'irrelevant test on Mac OS X')
def test_normalize(self):
files = set(self.files)
others = set()
for nf in set(['NFC', 'NFD', 'NFKC', 'NFKD']):
others |= set(normalize(nf, file) for file in files)
others -= files
for name in others:
self._apply_failure(open, name)
self._apply_failure(os.stat, name)
self._apply_failure(os.chdir, name)
self._apply_failure(os.rmdir, name)
self._apply_failure(os.remove, name)
self._apply_failure(os.listdir, name)
# Skip the test on darwin, because darwin uses a normalization different
# than Python NFD normalization: filenames are different even if we use
# Python NFD normalization.
@unittest.skipIf(sys.platform == 'darwin', 'irrelevant test on Mac OS X')
def test_listdir(self):
sf0 = set(self.files)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
f1 = os.listdir(support.TESTFN.encode(sys.getfilesystemencoding()))
f2 = os.listdir(support.TESTFN)
sf2 = set(os.path.join(support.TESTFN, f) for f in f2)
self.assertEqual(sf0, sf2, "%a != %a" % (sf0, sf2))
self.assertEqual(len(f1), len(f2))
def test_rename(self):
for name in self.files:
os.rename(name, "tmp")
os.rename("tmp", name)
def test_directory(self):
dirname = os.path.join(support.TESTFN, 'Gr\xfc\xdf-\u66e8\u66e9\u66eb')
filename = '\xdf-\u66e8\u66e9\u66eb'
oldwd = os.getcwd()
os.mkdir(dirname)
os.chdir(dirname)
try:
with open(filename, 'wb') as f:
f.write((filename + '\n').encode("utf-8"))
os.access(filename,os.R_OK)
os.remove(filename)
finally:
os.chdir(oldwd)
os.rmdir(dirname)
class UnicodeNFCFileTests(UnicodeFileTests):
normal_form = 'NFC'
class UnicodeNFDFileTests(UnicodeFileTests):
normal_form = 'NFD'
class UnicodeNFKCFileTests(UnicodeFileTests):
normal_form = 'NFKC'
class UnicodeNFKDFileTests(UnicodeFileTests):
normal_form = 'NFKD'
def test_main():
support.run_unittest(
UnicodeFileTests,
UnicodeNFCFileTests,
UnicodeNFDFileTests,
UnicodeNFKCFileTests,
UnicodeNFKDFileTests,
)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
tody411/ImageViewerFramework | ivf/core/sfs/detail_layer.py | 1 | 1105 | # -*- coding: utf-8 -*-
## @package ivf.core.sfs.detail_layer
#
# ivf.core.sfs.detail_layer utility package.
# @author tody
# @date 2016/02/10
import cv2
from ivf.cv.image import to8U, to32F
from ivf.core.guided_filter.guided_filter import GuidedFilter
def baseDetailSeparationGaussian(I_32F, sigma=5.0):
B = cv2.GaussianBlur(I_32F, (0, 0), sigma)
D = I_32F - B
return B, D
def baseDetailSeparationBilateral(I_32F, sigma_space=5.0, sigma_range=0.1):
B = cv2.bilateralFilter(I_32F, 0, sigma_range, sigma_space)
#g_filter = GuidedFilter(I_32F, radius=sigma_space, epsilon=sigma_range)
#B = g_filter.filter(I_32F)
D = I_32F - B
return B, D
def baseDetailSeprationDOG(I_32F, sigma=5.0, sigma_scale=1.6):
B_small = cv2.GaussianBlur(I_32F, (0, 0), sigma)
B_large = cv2.GaussianBlur(I_32F, (0, 0), sigma * sigma_scale)
D = B_small - B_large
B = I_32F - D
return B, D
def baseDetailSeparationMedian(I_32F, ksize=5):
ksize = 2 * (ksize / 2) + 1
B = to32F(cv2.medianBlur(to8U(I_32F), ksize))
D = I_32F - B
return B, D | mit |
sritchie73/liftOverPlink | liftOverPlink.py | 1 | 6309 | #!/usr/bin/python
# This script to be used to run liftOver on genotype data stored in
# the plink format.
# See: http://genome.sph.umich.edu/wiki/LiftOver
# Downloaded from: http://genome.sph.umich.edu/wiki/LiftMap.py
#
# Modified by Scott Ritchie:
# - to work with user specified chain files, rather than
# the original developer's specific chain file.
# - to not rely the original developer's path to liftOver.
# - to provide helpful usage documentation.
# - to clean up the intermediary BED files to avoid confusion.
# - to generally be slightly more PEP compliant.
#
import sys
import os
import argparse
import gzip
from string import Template
def die(msg):
print msg
sys.exit(2)
def myopen(fn):
try:
h = gzip.open(fn)
ln = h.read(2) # read arbitrary bytes so check if @param fn is a gzipped file
except:
# cannot read in gzip format
return open(fn)
h.close()
return gzip.open(fn)
def map2bed(fin, fout):
print "Converting MAP file to UCSC BED file..."
fo = open(fout, 'w')
for ln in myopen(fin):
chrom, rs, mdist, pos = ln.split()
chrom = 'chr' + chrom
pos = int(pos)
fo.write('%s\t%d\t%d\t%s\n' % (chrom, pos-1, pos, rs))
fo.close()
return True
# global var:
LIFTED_SET = set()
UNLIFTED_SET = set()
def liftBed(fin, fout, funlifted, chainFile, liftOverPath):
print "Lifting BED file..."
params = dict()
params['LIFTOVER_BIN'] = liftOverPath
params['OLD'] = fin
params['CHAIN'] = chainFile
params['NEW'] = fout
params['UNLIFTED'] = fout + '.unlifted'
cmd = Template('$LIFTOVER_BIN $OLD $CHAIN $NEW $UNLIFTED')
cmd = cmd.substitute(params)
os.system(cmd)
#record lifted/unliftd rs
for ln in myopen(params['UNLIFTED']):
if len(ln) == 0 or ln[0] == '#':continue
UNLIFTED_SET.add(ln.strip().split()[-1])
for ln in myopen(params['NEW']):
if len(ln) == 0 or ln[0] == '#':continue
LIFTED_SET.add(ln.strip().split()[-1])
return True
def bed2map(fin, fout):
print "Converting lifted BED file back to MAP..."
fo = open(fout, 'w')
for ln in myopen(fin):
chrom, pos0, pos1, rs = ln.split()
chrom = chrom.replace('chr', '')
fo.write('%s\t%s\t0.0\t%s\n' % (chrom, rs, pos1))
fo.close()
return True
def liftDat(fin, fout):
fo = open(fout, 'w')
for ln in myopen(fin):
if len(ln) == 0 or ln[0] != 'M':
fo.write(ln)
else:
t, rs = ln.strip().split()
if rs in LIFTED_SET:
fo.write(ln)
fo.close()
return True
def liftPed(fin, fout, fOldMap):
# two ways to do it:
# 1. write unlifted snp list
# use PLINK to do this job using --exclude
# 2. alternatively, we can write our own method
# we will use method 2
marker = [i.strip().split()[1] for i in open(fOldMap)]
flag = map(lambda x: x not in UNLIFTED_SET, marker)
# print marker[:10]
# print flag[:10]
fo = open(fout, 'w')
print "Updating PED file..."
for ln in myopen(fin):
f = ln.strip().split()
l = len(f)
f = f[:6] + [ f[i*2] + ' '+f[i*2 +1] for i in xrange(3, l/2 )]
fo.write('\t'.join(f[:6]))
fo.write('\t')
if len(f[6:]) != len(flag):
die('Inconsistent length of ped and map files')
newMarker = [m for i, m in enumerate(f[6:]) if flag[i]]
fo.write('\t'.join(newMarker))
fo.write('\n')
#print marker[:10]
#die('test')
return True
def makesure(result, succ_msg, fail_msg = "ERROR"):
if result:
print 'SUCC: ', succ_msg
else:
print 'FAIL: ', fail_msg
sys.exit(2)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="%(prog)s converts genotype data stored in plink's PED+MAP " +
"format from one genome build to another, using liftOver."
)
parser.add_argument('-m', "--map", dest='mapFile', required = True,
help='The plink MAP file to `liftOver`.')
parser.add_argument('-p', "--ped", dest='pedFile',
help='Optionally remove "unlifted SNPs" from the plink ' +
'PED file after running `liftOver`.')
parser.add_argument('-d', "--dat", dest='datFile',
help='Optionally remove "unlifted SNPs" from a data ' +
'file containing a list of SNPs (e.g. for ' +
' --exclude or --include in `plink`)')
parser.add_argument('-o', "--out", dest='prefix', required = True,
help='The prefix to give to the output files.')
parser.add_argument('-c', "--chain", dest='chainFile', required = True,
help='The location of the chain file to provide to ' +
'`liftOver`.')
parser.add_argument('-e', "--bin", dest='liftOverExecutable',
help='The location of the `liftOver` executable.')
# Show usage message if user hasn't provided any arguments, rather
# than giving a non-descript error message with the usage()
if len(sys.argv) == 1:
parser.print_help()
sys.exit()
args = parser.parse_args()
oldBed = args.mapFile + '.bed'
makesure(map2bed(args.mapFile, oldBed),
'map->bed succ')
# If a location is not specified for the liftOver executable.
# assume it is in the User's $PATH.
if args.liftOverExecutable:
liftOverPath = args.liftOverExecutable
else:
liftOverPath = "liftOver"
newBed = args.prefix + '.bed'
unlifted = args.prefix + '.unlifted'
makesure(liftBed(oldBed, newBed, unlifted, args.chainFile, liftOverPath),
'liftBed succ')
newMap = args.prefix + '.map'
makesure(bed2map(newBed, newMap),
'bed->map succ')
if args.datFile:
newDat = args.prefix + '.dat'
makesure(liftDat(args.datFile, newDat),
'liftDat succ')
if args.pedFile:
newPed = args.prefix + '.ped'
makesure(liftPed(args.pedFile, newPed, args.mapFile),
'liftPed succ')
print "cleaning up BED files..."
os.remove(newBed)
os.remove(oldBed)
| gpl-2.0 |
IMCG/RamCloud | bindings/python/test_testutil.py | 20 | 5234 | #!/usr/bin/env python
# Copyright (c) 2010 Stanford University
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Unit tests for utilities for unit tests.
See L{testutil}.
"""
from __future__ import with_statement
import unittest
import cPickle as pickle
import sys
import StringIO
from testutil import BreakException, Opaque, Counter, MockRetry
class TestOpaque(unittest.TestCase):
"""Unit tests for L{Opaque}."""
def test_equality(self):
x = Opaque()
y = Opaque()
self.assertEqual(x, x)
self.assertNotEqual(x, y)
self.assert_(x == x)
self.assert_(x != y)
def test_pickle(self):
x1 = Opaque()
x2 = pickle.loads(pickle.dumps(x1))
x3 = pickle.loads(pickle.dumps(x2, protocol=2))
self.assertEqual(x1, x2)
self.assertEqual(x1, x3)
self.assertEqual(x2, x3)
class TestCounter(unittest.TestCase):
"""Unit tests for L{Counter}."""
def test_normal(self):
counter = Counter(self, 3)
for i in range(3):
counter.bump()
counter.done()
def test_no_steps(self):
counter = Counter(self, 0)
counter.done()
def test_done_too_soon(self):
counter = Counter(self, 1)
self.assertRaises(self.failureException, counter.done)
def test_done_too_late(self):
counter = Counter(self, 2)
counter.bump()
counter.bump()
self.assertRaises(self.failureException, counter.bump)
def test_bump_int(self):
counter = Counter(self, 3)
counter.bump(0)
self.assertRaises(self.failureException, counter.bump, 0)
counter.bump(2)
counter.done()
def test_bump_list(self):
counter = Counter(self, 3)
counter.bump([0,7])
self.assertRaises(self.failureException, counter.bump, [0,2])
counter.bump([2,9])
counter.done()
def test_with(self):
with Counter(self, 3) as counter:
for i in range(3):
counter.bump(i)
def test_with_not_done(self):
def f():
with Counter(self, 3) as counter:
pass
self.assertRaises(self.failureException, f)
def test_with_not_done_suppressed(self):
class E(Exception):
pass
def f():
with Counter(self, 3) as counter:
raise E
s = StringIO.StringIO()
saved = sys.stdout
try:
sys.stdout = s
self.assertRaises(E, f)
finally:
sys.stdout = saved
self.assertEquals(s.getvalue(),
"Suppressed exception from Counter.__exit__()\n")
class TestMockRetry(unittest.TestCase):
"""Unit tests for L{testutil.MockRetry}."""
def test_next(self):
retries = MockRetry(self)
self.assertEquals(retries.next(), retries)
self.assertRaises(StopIteration, retries.next)
retries.done()
def test_later(self):
retries = MockRetry(self, expect_later=True)
retries.next()
retries.later()
self.assertRaises(BreakException, retries.next)
retries.done()
def test_later_bad(self):
retries = MockRetry(self)
retries.next()
self.assertRaises(self.failureException, retries.later)
def test_immediate(self):
retries = MockRetry(self, expect_immediate=True)
retries.next()
retries.immediate()
self.assertRaises(BreakException, retries.next)
retries.done()
def test_immediate_bad(self):
retries = MockRetry(self)
retries.next()
self.assertRaises(self.failureException, retries.immediate)
def test_with(self):
with MockRetry(self) as retries:
for i, retry in enumerate(retries):
pass
def test_with_not_done(self):
def f():
with MockRetry(self, expect_later=True) as retries:
pass
self.assertRaises(self.failureException, f)
def test_with_not_done_suppressed(self):
class E(Exception):
pass
def f():
with MockRetry(self, expect_later=True) as retries:
raise E
s = StringIO.StringIO()
saved = sys.stdout
try:
sys.stdout = s
self.assertRaises(E, f)
finally:
sys.stdout = saved
self.assertEquals(s.getvalue(),
"Suppressed exception from MockRetry.__exit__()\n")
if __name__ == '__main__':
unittest.main()
| isc |
ruimashita/django-haystack | haystack/utils/app_loading.py | 12 | 2913 | # encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from django import VERSION as DJANGO_VERSION
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from haystack.utils import importlib
__all__ = ['haystack_get_models', 'haystack_load_apps']
APP = 'app'
MODEL = 'model'
if DJANGO_VERSION >= (1, 7):
from django.apps import apps
def haystack_get_app_modules():
"""Return the Python module for each installed app"""
return [i.module for i in apps.get_app_configs()]
def haystack_load_apps():
"""Return a list of app labels for all installed applications which have models"""
return [i.label for i in apps.get_app_configs() if i.models_module is not None]
def haystack_get_models(label):
try:
app_mod = apps.get_app_config(label)
return app_mod.get_models()
except LookupError:
if '.' not in label:
raise ImproperlyConfigured('Unknown application label {}'.format(label))
app_label, model_name = label.rsplit('.', 1)
return [apps.get_model(app_label, model_name)]
except ImproperlyConfigured:
pass
def haystack_get_model(app_label, model_name):
return apps.get_model(app_label, model_name)
else:
from django.db.models.loading import get_app, get_model, get_models
def is_app_or_model(label):
label_bits = label.split('.')
if len(label_bits) == 1:
return APP
elif len(label_bits) == 2:
try:
get_model(*label_bits)
except LookupError:
return APP
return MODEL
else:
raise ImproperlyConfigured(
"'%s' isn't recognized as an app (<app_label>) or model (<app_label>.<model_name>)." % label)
def haystack_get_app_modules():
"""Return the Python module for each installed app"""
return [importlib.import_module(i) for i in settings.INSTALLED_APPS]
def haystack_load_apps():
# Do all, in an INSTALLED_APPS sorted order.
items = []
for app in settings.INSTALLED_APPS:
app_label = app.split('.')[-1]
try:
get_app(app_label)
except ImproperlyConfigured:
continue # Intentionally allow e.g. apps without models.py
items.append(app_label)
return items
def haystack_get_models(label):
app_or_model = is_app_or_model(label)
if app_or_model == APP:
app_mod = get_app(label)
return get_models(app_mod)
else:
app_label, model_name = label.rsplit('.', 1)
return [get_model(app_label, model_name)]
def haystack_get_model(app_label, model_name):
return get_model(app_label, model_name)
| bsd-3-clause |
flavour/cert | modules/pygsm/scanwin32.py | 62 | 8365 | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
import ctypes
import re
def ValidHandle(value):
if value == 0:
raise ctypes.WinError()
return value
NULL = 0
HDEVINFO = ctypes.c_int
BOOL = ctypes.c_int
CHAR = ctypes.c_char
PCTSTR = ctypes.c_char_p
HWND = ctypes.c_uint
DWORD = ctypes.c_ulong
PDWORD = ctypes.POINTER(DWORD)
ULONG = ctypes.c_ulong
ULONG_PTR = ctypes.POINTER(ULONG)
#~ PBYTE = ctypes.c_char_p
PBYTE = ctypes.c_void_p
class GUID(ctypes.Structure):
_fields_ = [
('Data1', ctypes.c_ulong),
('Data2', ctypes.c_ushort),
('Data3', ctypes.c_ushort),
('Data4', ctypes.c_ubyte*8),
]
def __str__(self):
return "{%08x-%04x-%04x-%s-%s}" % (
self.Data1,
self.Data2,
self.Data3,
''.join(["%02x" % d for d in self.Data4[:2]]),
''.join(["%02x" % d for d in self.Data4[2:]]),
)
class SP_DEVINFO_DATA(ctypes.Structure):
_fields_ = [
('cbSize', DWORD),
('ClassGuid', GUID),
('DevInst', DWORD),
('Reserved', ULONG_PTR),
]
def __str__(self):
return "ClassGuid:%s DevInst:%s" % (self.ClassGuid, self.DevInst)
PSP_DEVINFO_DATA = ctypes.POINTER(SP_DEVINFO_DATA)
class SP_DEVICE_INTERFACE_DATA(ctypes.Structure):
_fields_ = [
('cbSize', DWORD),
('InterfaceClassGuid', GUID),
('Flags', DWORD),
('Reserved', ULONG_PTR),
]
def __str__(self):
return "InterfaceClassGuid:%s Flags:%s" % (self.InterfaceClassGuid, self.Flags)
PSP_DEVICE_INTERFACE_DATA = ctypes.POINTER(SP_DEVICE_INTERFACE_DATA)
PSP_DEVICE_INTERFACE_DETAIL_DATA = ctypes.c_void_p
class dummy(ctypes.Structure):
_fields_=[("d1", DWORD), ("d2", CHAR)]
_pack_ = 1
SIZEOF_SP_DEVICE_INTERFACE_DETAIL_DATA_A = ctypes.sizeof(dummy)
SetupDiDestroyDeviceInfoList = ctypes.windll.setupapi.SetupDiDestroyDeviceInfoList
SetupDiDestroyDeviceInfoList.argtypes = [HDEVINFO]
SetupDiDestroyDeviceInfoList.restype = BOOL
SetupDiGetClassDevs = ctypes.windll.setupapi.SetupDiGetClassDevsA
SetupDiGetClassDevs.argtypes = [ctypes.POINTER(GUID), PCTSTR, HWND, DWORD]
SetupDiGetClassDevs.restype = ValidHandle # HDEVINFO
SetupDiEnumDeviceInterfaces = ctypes.windll.setupapi.SetupDiEnumDeviceInterfaces
SetupDiEnumDeviceInterfaces.argtypes = [HDEVINFO, PSP_DEVINFO_DATA, ctypes.POINTER(GUID), DWORD, PSP_DEVICE_INTERFACE_DATA]
SetupDiEnumDeviceInterfaces.restype = BOOL
SetupDiGetDeviceInterfaceDetail = ctypes.windll.setupapi.SetupDiGetDeviceInterfaceDetailA
SetupDiGetDeviceInterfaceDetail.argtypes = [HDEVINFO, PSP_DEVICE_INTERFACE_DATA, PSP_DEVICE_INTERFACE_DETAIL_DATA, DWORD, PDWORD, PSP_DEVINFO_DATA]
SetupDiGetDeviceInterfaceDetail.restype = BOOL
SetupDiGetDeviceRegistryProperty = ctypes.windll.setupapi.SetupDiGetDeviceRegistryPropertyA
SetupDiGetDeviceRegistryProperty.argtypes = [HDEVINFO, PSP_DEVINFO_DATA, DWORD, PDWORD, PBYTE, DWORD, PDWORD]
SetupDiGetDeviceRegistryProperty.restype = BOOL
GUID_CLASS_COMPORT = GUID(0x86e0d1e0L, 0x8089, 0x11d0,
(ctypes.c_ubyte*8)(0x9c, 0xe4, 0x08, 0x00, 0x3e, 0x30, 0x1f, 0x73))
DIGCF_PRESENT = 2
DIGCF_DEVICEINTERFACE = 16
INVALID_HANDLE_VALUE = 0
ERROR_INSUFFICIENT_BUFFER = 122
SPDRP_HARDWAREID = 1
SPDRP_FRIENDLYNAME = 12
SPDRP_LOCATION_INFORMATION = 13
ERROR_NO_MORE_ITEMS = 259
def comports(available_only=True):
"""This generator scans the device registry for com ports and yields
(order, port, desc, hwid). If available_only is true only return currently
existing ports. Order is a helper to get sorted lists. it can be ignored
otherwise."""
flags = DIGCF_DEVICEINTERFACE
if available_only:
flags |= DIGCF_PRESENT
g_hdi = SetupDiGetClassDevs(ctypes.byref(GUID_CLASS_COMPORT), None, NULL, flags);
#~ for i in range(256):
for dwIndex in range(256):
did = SP_DEVICE_INTERFACE_DATA()
did.cbSize = ctypes.sizeof(did)
if not SetupDiEnumDeviceInterfaces(
g_hdi,
None,
ctypes.byref(GUID_CLASS_COMPORT),
dwIndex,
ctypes.byref(did)
):
if ctypes.GetLastError() != ERROR_NO_MORE_ITEMS:
raise ctypes.WinError()
break
dwNeeded = DWORD()
# get the size
if not SetupDiGetDeviceInterfaceDetail(
g_hdi,
ctypes.byref(did),
None, 0, ctypes.byref(dwNeeded),
None
):
# Ignore ERROR_INSUFFICIENT_BUFFER
if ctypes.GetLastError() != ERROR_INSUFFICIENT_BUFFER:
raise ctypes.WinError()
# allocate buffer
class SP_DEVICE_INTERFACE_DETAIL_DATA_A(ctypes.Structure):
_fields_ = [
('cbSize', DWORD),
('DevicePath', CHAR*(dwNeeded.value - ctypes.sizeof(DWORD))),
]
def __str__(self):
return "DevicePath:%s" % (self.DevicePath,)
idd = SP_DEVICE_INTERFACE_DETAIL_DATA_A()
idd.cbSize = SIZEOF_SP_DEVICE_INTERFACE_DETAIL_DATA_A
devinfo = SP_DEVINFO_DATA()
devinfo.cbSize = ctypes.sizeof(devinfo)
if not SetupDiGetDeviceInterfaceDetail(
g_hdi,
ctypes.byref(did),
ctypes.byref(idd), dwNeeded, None,
ctypes.byref(devinfo)
):
raise ctypes.WinError()
# hardware ID
szHardwareID = ctypes.create_string_buffer(250)
if not SetupDiGetDeviceRegistryProperty(
g_hdi,
ctypes.byref(devinfo),
SPDRP_HARDWAREID,
None,
ctypes.byref(szHardwareID), ctypes.sizeof(szHardwareID) - 1,
None
):
# Ignore ERROR_INSUFFICIENT_BUFFER
if ctypes.GetLastError() != ERROR_INSUFFICIENT_BUFFER:
raise ctypes.WinError()
# friendly name
szFriendlyName = ctypes.create_string_buffer(1024)
if not SetupDiGetDeviceRegistryProperty(
g_hdi,
ctypes.byref(devinfo),
SPDRP_FRIENDLYNAME,
None,
ctypes.byref(szFriendlyName), ctypes.sizeof(szFriendlyName) - 1,
None
):
# Ignore ERROR_INSUFFICIENT_BUFFER
if ctypes.GetLastError() != ERROR_INSUFFICIENT_BUFFER:
#~ raise ctypes.WinError()
# not getting friendly name for com0com devices, try something else
szFriendlyName = ctypes.create_string_buffer(1024)
if SetupDiGetDeviceRegistryProperty(
g_hdi,
ctypes.byref(devinfo),
SPDRP_LOCATION_INFORMATION,
None,
ctypes.byref(szFriendlyName), ctypes.sizeof(szFriendlyName) - 1,
None
):
port_name = "\\\\.\\" + szFriendlyName.value
order = None
else:
port_name = szFriendlyName.value
order = None
else:
try:
m = re.search(r"\((.*?(\d+))\)", szFriendlyName.value)
#~ print szFriendlyName.value, m.groups()
port_name = m.group(1)
order = int(m.group(2))
except AttributeError, msg:
port_name = szFriendlyName.value
order = None
yield order, port_name, szFriendlyName.value, szHardwareID.value
SetupDiDestroyDeviceInfoList(g_hdi)
def scan():
"""Scans for available ports on Windows and returns a list of devices"""
import serial
ports = []
for port in sorted(comports()):
ports.append(port[1])
return ports
if __name__ == '__main__':
import serial
print "-"*78
print "Serial ports"
print "-"*78
for order, port, desc, hwid in sorted(comports()):
print "%-10s: %s (%s) ->" % (port, desc, hwid),
try:
serial.Serial(port) # test open
except serial.serialutil.SerialException:
print "can't be openend"
else:
print "Ready"
print
# list of all ports the system knows
print "-"*78
print "All serial ports (registry)"
print "-"*78
for order, port, desc, hwid in sorted(comports(False)):
print "%-10s: %s (%s)" % (port, desc, hwid)
| mit |
sacharya/nova | nova/openstack/common/eventlet_backdoor.py | 25 | 4844 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import errno
import gc
import os
import pprint
import socket
import sys
import traceback
import eventlet
import eventlet.backdoor
import greenlet
from oslo.config import cfg
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
help_for_backdoor_port = 'Acceptable ' + \
'values are 0, <port> and <start>:<end>, where 0 results in ' + \
'listening on a random tcp port number, <port> results in ' + \
'listening on the specified port number and not enabling backdoor' + \
'if it is in use and <start>:<end> results in listening on the ' + \
'smallest unused port number within the specified range of port ' + \
'numbers. The chosen port is displayed in the service\'s log file.'
eventlet_backdoor_opts = [
cfg.StrOpt('backdoor_port',
default=None,
help='Enable eventlet backdoor. %s' % help_for_backdoor_port)
]
CONF = cfg.CONF
CONF.register_opts(eventlet_backdoor_opts)
LOG = logging.getLogger(__name__)
class EventletBackdoorConfigValueError(Exception):
def __init__(self, port_range, help_msg, ex):
msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
'%(help)s' %
{'range': port_range, 'ex': ex, 'help': help_msg})
super(EventletBackdoorConfigValueError, self).__init__(msg)
self.port_range = port_range
def _dont_use_this():
print("Don't use this, just disconnect instead")
def _find_objects(t):
return filter(lambda o: isinstance(o, t), gc.get_objects())
def _print_greenthreads():
for i, gt in enumerate(_find_objects(greenlet.greenlet)):
print(i, gt)
traceback.print_stack(gt.gr_frame)
print()
def _print_nativethreads():
for threadId, stack in sys._current_frames().items():
print(threadId)
traceback.print_stack(stack)
print()
def _parse_port_range(port_range):
if ':' not in port_range:
start, end = port_range, port_range
else:
start, end = port_range.split(':', 1)
try:
start, end = int(start), int(end)
if end < start:
raise ValueError
return start, end
except ValueError as ex:
raise EventletBackdoorConfigValueError(port_range, ex,
help_for_backdoor_port)
def _listen(host, start_port, end_port, listen_func):
try_port = start_port
while True:
try:
return listen_func((host, try_port))
except socket.error as exc:
if (exc.errno != errno.EADDRINUSE or
try_port >= end_port):
raise
try_port += 1
def initialize_if_enabled():
backdoor_locals = {
'exit': _dont_use_this, # So we don't exit the entire process
'quit': _dont_use_this, # So we don't exit the entire process
'fo': _find_objects,
'pgt': _print_greenthreads,
'pnt': _print_nativethreads,
}
if CONF.backdoor_port is None:
return None
start_port, end_port = _parse_port_range(str(CONF.backdoor_port))
# NOTE(johannes): The standard sys.displayhook will print the value of
# the last expression and set it to __builtin__._, which overwrites
# the __builtin__._ that gettext sets. Let's switch to using pprint
# since it won't interact poorly with gettext, and it's easier to
# read the output too.
def displayhook(val):
if val is not None:
pprint.pprint(val)
sys.displayhook = displayhook
sock = _listen('localhost', start_port, end_port, eventlet.listen)
# In the case of backdoor port being zero, a port number is assigned by
# listen(). In any case, pull the port number out here.
port = sock.getsockname()[1]
LOG.info(_('Eventlet backdoor listening on %(port)s for process %(pid)d') %
{'port': port, 'pid': os.getpid()})
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
locals=backdoor_locals)
return port
| apache-2.0 |
yobennett/kafka | dev-utils/test-patch.py | 73 | 17007 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
# Pre Commit Hook for running tests and updating JIRA
#
# Original version was copied from SQOOP project.
#
import sys, os, re, urllib2, base64, subprocess, tempfile, shutil
import json
import datetime
from optparse import OptionParser
tmp_dir = None
BASE_JIRA_URL = 'https://issues.apache.org/jira'
BRANCHES = ["trunk", "0.7", "0.7.0", "0.7.1", "0.7.2", "0.8", "0.8.1", "0.8.2"]
# Write output to file
def write_file(filename, content):
with open(filename, "w") as text_file:
text_file.write(content)
# Guess branch for given versions
#
# Return None if detects that JIRA belongs to more than one branch
def kafka_guess_branch(versions):
if not versions:
return BRANCHES[0]
for version in versions:
for branch in BRANCHES:
if version == branch:
return branch
return BRANCHES[0]
# Verify supported branch
def kafka_verify_branch(branch):
return branch in BRANCHES
def execute(cmd, log=True):
if log:
print "INFO: Executing %s" % (cmd)
return subprocess.call(cmd, shell=True)
def jenkins_link_for_jira(name, endpoint):
if "BUILD_URL" in os.environ:
return "[%s|%s%s]" % (name, os.environ['BUILD_URL'], endpoint)
else:
return name
def jenkins_file_link_for_jira(name, file):
return jenkins_link_for_jira(name, "artifact/patch-process/%s" % file)
def jira_request(result, url, username, password, data, headers):
request = urllib2.Request(url, data, headers)
print "INFO: URL = %s, Username = %s, data = %s, headers = %s" % (url, username, data, str(headers))
if username and password:
base64string = base64.encodestring('%s:%s' % (username, password)).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
return urllib2.urlopen(request)
def jira_get_defect_html(result, defect, username, password):
url = "%s/browse/%s" % (BASE_JIRA_URL, defect)
return jira_request(result, url, username, password, None, {}).read()
def jira_get_defect(result, defect, username, password):
url = "%s/rest/api/2/issue/%s" % (BASE_JIRA_URL, defect)
return jira_request(result, url, username, password, None, {}).read()
def jira_generate_comment(result, branch):
body = [ "Testing file [%s|%s] against branch %s took %s." % (result.attachment.split('/')[-1] , result.attachment, branch, datetime.datetime.now() - result.start_time) ]
body += [ "" ]
if result._fatal:
result._error = [ result._fatal ] + result._error
if result._error:
count = len(result._error)
if count == 1:
body += [ "{color:red}Overall:{color} -1 due to an error" ]
else:
body += [ "{color:red}Overall:{color} -1 due to %d errors" % (count) ]
else:
body += [ "{color:green}Overall:{color} +1 all checks pass" ]
body += [ "" ]
for error in result._error:
body += [ "{color:red}ERROR:{color} %s" % (error.replace("\n", "\\n")) ]
for info in result._info:
body += [ "INFO: %s" % (info.replace("\n", "\\n")) ]
for success in result._success:
body += [ "{color:green}SUCCESS:{color} %s" % (success.replace("\n", "\\n")) ]
if "BUILD_URL" in os.environ:
body += [ "" ]
body += [ "Console output is available %s." % (jenkins_link_for_jira("here", "console")) ]
body += [ "" ]
body += [ "This message is automatically generated." ]
return "\\n".join(body)
def jira_post_comment(result, defect, branch, username, password):
url = "%s/rest/api/2/issue/%s/comment" % (BASE_JIRA_URL, defect)
# Generate body for the comment and save it to a file
body = jira_generate_comment(result, branch)
write_file("%s/jira-comment.txt" % output_dir, body.replace("\\n", "\n"))
# Send the comment to the JIRA
body = "{\"body\": \"%s\"}" % body
headers = {'Content-Type' : 'application/json'}
response = jira_request(result, url, username, password, body, headers)
body = response.read()
if response.code != 201:
msg = """Request for %s failed:
URL = '%s'
Code = '%d'
Comment = '%s'
Response = '%s'
""" % (defect, url, response.code, comment, body)
print "FATAL: %s" % (msg)
sys.exit(1)
# hack (from hadoop) but REST api doesn't list attachments?
def jira_get_attachment(result, defect, username, password):
html = jira_get_defect_html(result, defect, username, password)
escaped_colon = re.escape("%3A")
pattern = "(/secure/attachment/[0-9]+/(bug)?%s[0-9\-]*((\.|-)v?[0-9]+)?\.(patch|txt|patch\.txt))" % (re.escape(defect))
kafka_pattern = "(/secure/attachment/[0-9]+/(bug)?%s_[0-9]+-[0-9]+-[0-9]+_[0-9]+%s[0-9]+%s[0-9]+[0-9\-]*((\.|-)v?[0-9]+)?\.(patch|txt|patch\.txt))" % (re.escape(defect), escaped_colon, escaped_colon)
matches = []
for match in re.findall(kafka_pattern, html, re.IGNORECASE) or re.findall(pattern, html, re.IGNORECASE):
matches += [ match[0] ]
if matches:
matches.sort()
return "%s%s" % (BASE_JIRA_URL, matches.pop())
return None
# Get versions from JIRA JSON object
def json_get_version(json):
versions = []
# Load affectedVersion field
for version in json.get("fields").get("versions"):
versions = versions + [version.get("name").strip()]
# Load fixVersion field
for version in json.get("fields").get("fixVersions"):
versions = versions + [version.get("name").strip()]
if not versions:
print "No Affected or Fixed version found in JIRA"
return versions
def git_cleanup():
rc = execute("git clean -d -f", False)
if rc != 0:
print "ERROR: git clean failed"
rc = execute("git reset --hard HEAD", False)
if rc != 0:
print "ERROR: git reset failed"
def git_checkout(result, branch):
if not branch:
result.fatal("Branch wasn't specified nor was correctly guessed")
return
if execute("git checkout %s" % (branch)) != 0:
result.fatal("git checkout %s failed" % branch)
if execute("git clean -d -f") != 0:
result.fatal("git clean failed")
if execute("git reset --hard HEAD") != 0:
result.fatal("git reset failed")
if execute("git fetch origin") != 0:
result.fatal("git fetch failed")
if execute("git merge --ff-only origin/%s" % (branch)):
result.fatal("git merge failed")
def git_apply(result, cmd, patch_file, strip, output_dir):
output_file = "%s/apply.txt" % (output_dir)
rc = execute("%s -p%s < %s 1>%s 2>&1" % (cmd, strip, patch_file, output_file))
output = ""
if os.path.exists(output_file):
with open(output_file) as fh:
output = fh.read()
if rc == 0:
if output:
result.success("Patch applied, but there has been warnings:\n{code}%s{code}\n" % (output))
else:
result.success("Patch applied correctly")
else:
result.fatal("failed to apply patch (exit code %d):\n{code}%s{code}\n" % (rc, output))
def static_test(result, patch_file, output_dir):
output_file = "%s/static-test.txt" % (output_dir)
rc = execute("grep '^+++.*/test' %s 1>%s 2>&1" % (patch_file, output_file))
if rc == 0:
result.success("Patch add/modify test case")
else:
result.error("Patch does not add/modify any test case")
def gradle_bootstrap(result, output_dir):
rc = execute("gradle 1>%s/bootstrap.txt 2>&1" % output_dir)
if rc == 0:
result.success("Gradle bootstrap was successful")
else:
result.fatal("failed to bootstrap project (exit code %d, %s)" % (rc, jenkins_file_link_for_jira("report", "bootstrap.txt")))
def gradle_clean(result, output_dir):
rc = execute("./gradlew clean 1>%s/clean.txt 2>&1" % output_dir)
if rc == 0:
result.success("Clean was successful")
else:
result.fatal("failed to clean project (exit code %d, %s)" % (rc, jenkins_file_link_for_jira("report", "clean.txt")))
def gradle_install(result, output_dir):
rc = execute("./gradlew jarAll 1>%s/install.txt 2>&1" % output_dir)
if rc == 0:
result.success("Patch compiled")
else:
result.fatal("failed to build with patch (exit code %d, %s)" % (rc, jenkins_file_link_for_jira("report", "install.txt")))
def checkstyleMain(result, output_dir):
rc = execute("./gradlew checkstyleMain 1>%s/checkstyleMain.txt 2>&1" % output_dir)
if rc == 0:
result.success("Checked style for Main")
else:
result.fatal("checkstyleMain failed with patch (exit code %d, %s)" % (rc, jenkins_file_link_for_jira("report", "checkstyleMain.txt")))
def checkstyleTest(result, output_dir):
rc = execute("./gradlew checkstyleTest 1>%s/checkstyleTest.txt 2>&1" % output_dir)
if rc == 0:
result.success("Checked style for Test")
else:
result.fatal("checkstyleTest failed with patch (exit code %d, %s)" % (rc, jenkins_file_link_for_jira("report", "checkstyleTest.txt")))
def gradle_test(result, output_dir):
run_gradle_test("testAll", "unit", result, output_dir)
def run_gradle_test(command, test_type, result, output_dir):
rc = execute("./gradlew %s 1>%s/test_%s.txt 2>&1" % (command, output_dir, test_type))
if rc == 0:
result.success("All %s tests passed" % test_type)
else:
result.error("Some %s tests failed (%s)" % (test_type, jenkins_file_link_for_jira("report", "test_%s.txt" % test_type)))
failed_tests = []
fd = open("%s/test_%s.txt" % (output_dir, test_type), "r")
for line in fd:
if "FAILED" in line and " > " in line:
failed_tests += [line]
fd.close()
for failed_test in set(failed_tests):
result.error("Failed %s test: {{%s}}" % (test_type, failed_test))
def clean_folder(folder):
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception, e:
print e
class Result(object):
def __init__(self):
self._error = []
self._info = []
self._success = []
self._fatal = None
self.exit_handler = None
self.attachment = "Not Found"
self.start_time = datetime.datetime.now()
def error(self, msg):
self._error.append(msg)
def info(self, msg):
self._info.append(msg)
def success(self, msg):
self._success.append(msg)
def fatal(self, msg):
self._fatal = msg
self.exit_handler()
self.exit()
def exit(self):
git_cleanup()
global tmp_dir
global copy_output_dir
global output_dir
if copy_output_dir:
print "INFO: Moving output to %s" % (copy_output_dir)
os.renames(output_dir, copy_output_dir)
tmp_dir = None
if tmp_dir:
print "INFO: output is located %s" % (tmp_dir)
sys.exit(0)
usage = "usage: %prog [options]"
parser = OptionParser(usage)
parser.add_option("--branch", dest="branch",
help="Local git branch to test against", metavar="trunk")
parser.add_option("--defect", dest="defect",
help="Defect name", metavar="KAFKA-1856")
parser.add_option("--file", dest="filename",
help="Test patch file", metavar="FILE")
parser.add_option("--run-tests", dest="run_tests",
help="Run Tests", action="store_true")
parser.add_option("--username", dest="username",
help="JIRA Username", metavar="USERNAME", default="kafkaqa")
parser.add_option("--output", dest="output_dir",
help="Directory to write output", metavar="DIRECTORY")
parser.add_option("--post-results", dest="post_results",
help="Post results to JIRA (only works in defect mode)", action="store_true")
parser.add_option("--password", dest="password",
help="JIRA Password", metavar="PASSWORD")
parser.add_option("--patch-command", dest="patch_cmd", default="git apply",
help="Patch command such as `git apply' or `patch'", metavar="COMMAND")
parser.add_option("-p", "--strip", dest="strip", default="1",
help="Remove <n> leading slashes from diff paths", metavar="N")
parser.add_option("--get-latest-patch", dest="get_latest_patch",
help="Get the latest patch attached to JIRA", action="store_true")
(options, args) = parser.parse_args()
if not (options.defect or options.filename):
print "FATAL: Either --defect or --file is required."
sys.exit(1)
if options.defect and options.filename:
print "FATAL: Both --defect and --file cannot be specified."
sys.exit(1)
if options.post_results and not options.password:
print "FATAL: --post-results requires --password"
sys.exit(1)
if options.get_latest_patch and not options.defect:
print "FATAL: --get-latest-patch requires --defect"
sys.exit(1)
branch = options.branch
if options.output_dir and not options.output_dir.startswith('/'):
print "INFO: A temporary staging dir for output will be used to avoid deletion of output files during 'git reset'"
copy_output_dir = options.output_dir
output_dir = None
else:
output_dir = options.output_dir
copy_output_dir = None
defect = options.defect
username = options.username
password = options.password
run_tests = options.run_tests
post_results = options.post_results
strip = options.strip
get_latest_patch = options.get_latest_patch
patch_cmd = options.patch_cmd
result = Result()
if output_dir and os.path.isdir(output_dir):
clean_folder(output_dir)
if copy_output_dir and os.path.isdir(copy_output_dir):
clean_folder(copy_output_dir)
# Default exit handler in case that we do not want to submit results to JIRA
def log_and_exit():
# Write down comment generated for jira (won't be posted)
write_file("%s/jira-comment.txt" % output_dir, jira_generate_comment(result, branch).replace("\\n", "\n"))
if result._fatal:
print "FATAL: %s" % (result._fatal)
for error in result._error:
print "ERROR: %s" % (error)
for info in result._info:
print "INFO: %s" % (info)
for success in result._success:
print "SUCCESS: %s" % (success)
result.exit()
result.exit_handler = log_and_exit
if post_results:
def post_jira_comment_and_exit():
jira_post_comment(result, defect, branch, username, password)
result.exit()
result.exit_handler = post_jira_comment_and_exit
if not output_dir:
tmp_dir = tempfile.mkdtemp()
output_dir = tmp_dir
if output_dir.endswith("/"):
output_dir = output_dir[:-1]
if output_dir and not os.path.isdir(output_dir):
os.makedirs(output_dir)
def get_latest_patch():
global jira_json, json, versions, branch, attachment, patch_contents, patch_file, fh
print "Defect: %s" % defect
jira_json = jira_get_defect(result, defect, username, password)
json = json.loads(jira_json)
# JIRA must be in Patch Available state
if '"Patch Available"' not in jira_json:
print "ERROR: Defect %s not in patch available state" % (defect)
sys.exit(1)
# If branch is not specified, let's try to guess it from JIRA details
if not branch:
versions = json_get_version(json)
branch = kafka_guess_branch(versions)
if not branch:
print "ERROR: Can't guess branch name from %s" % (versions)
sys.exit(1)
else:
print "INFO: Guessed branch as %s" % (branch)
attachment = jira_get_attachment(result, defect, username, password)
if not attachment:
print "ERROR: No attachments found for %s" % (defect)
sys.exit(1)
result.attachment = attachment
patch_contents = jira_request(result, result.attachment, username, password, None, {}).read()
patch_file = "%s/%s.patch" % (output_dir, defect)
with open(patch_file, 'a') as fh:
fh.write(patch_contents)
if defect:
# If defect parameter is specified let's download the latest attachment
get_latest_patch()
if options.get_latest_patch:
print "Saving latest attachment of %s as %s/%s.patch" % (defect, output_dir, defect)
sys.exit(0)
elif options.filename:
patch_file = options.filename
else:
raise Exception("Not reachable")
# Verify that we are on supported branch
if not kafka_verify_branch(branch):
print "ERROR: Unsupported branch %s" % (branch)
sys.exit(1)
gradle_bootstrap(result, output_dir)
gradle_clean(result, output_dir)
git_checkout(result, branch)
git_apply(result, patch_cmd, patch_file, strip, output_dir)
static_test(result, patch_file, output_dir)
gradle_bootstrap(result, output_dir)
gradle_install(result, output_dir)
checkstyleMain(result, output_dir)
checkstyleTest(result, output_dir)
if run_tests:
gradle_test(result, output_dir)
else:
result.info("patch applied and built but tests did not execute")
result.exit_handler()
| apache-2.0 |
chiviak/headphones | lib/requests/packages/chardet/langcyrillicmodel.py | 2762 | 17725 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# KOI8-R language model
# Character Mapping Table:
KOI8R_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, # 80
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, # 90
223,224,225, 68,226,227,228,229,230,231,232,233,234,235,236,237, # a0
238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, # b0
27, 3, 21, 28, 13, 2, 39, 19, 26, 4, 23, 11, 8, 12, 5, 1, # c0
15, 16, 9, 7, 6, 14, 24, 10, 17, 18, 20, 25, 30, 29, 22, 54, # d0
59, 37, 44, 58, 41, 48, 53, 46, 55, 42, 60, 36, 49, 38, 31, 34, # e0
35, 43, 45, 32, 40, 52, 56, 33, 61, 62, 51, 57, 47, 63, 50, 70, # f0
)
win1251_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246, 68,247,248,249,250,251,252,253,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
)
latin5_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
macCyrillic_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246,247,248,249,250,251,252, 68, 16,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27,255,
)
IBM855_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194, 68,195,196,197,198,199,200,201,202,203,204,205,
206,207,208,209,210,211,212,213,214,215,216,217, 27, 59, 54, 70,
3, 37, 21, 44, 28, 58, 13, 41, 2, 48, 39, 53, 19, 46,218,219,
220,221,222,223,224, 26, 55, 4, 42,225,226,227,228, 23, 60,229,
230,231,232,233,234,235, 11, 36,236,237,238,239,240,241,242,243,
8, 49, 12, 38, 5, 31, 1, 34, 15,244,245,246,247, 35, 16,248,
43, 9, 45, 7, 32, 6, 40, 14, 52, 24, 56, 10, 33, 17, 61,249,
250, 18, 62, 20, 51, 25, 57, 30, 47, 29, 63, 22, 50,251,252,255,
)
IBM866_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 97.6601%
# first 1024 sequences: 2.3389%
# rest sequences: 0.1237%
# negative sequences: 0.0009%
RussianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,1,3,3,3,3,1,3,3,3,2,3,2,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,2,2,2,2,2,0,0,2,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,2,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,2,3,3,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,2,3,1,3,3,1,3,3,3,3,2,2,3,0,2,2,2,3,3,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,2,3,2,3,3,3,2,1,2,2,0,1,2,2,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,3,0,2,2,3,3,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,1,2,3,2,2,3,2,3,3,3,3,2,2,3,0,3,2,2,3,1,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,3,3,3,3,2,2,2,0,3,3,3,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,2,3,2,2,0,1,3,2,1,2,2,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,1,1,3,0,1,1,1,1,2,1,1,0,2,2,2,1,2,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,2,2,2,2,1,3,2,3,2,3,2,1,2,2,0,1,1,2,1,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,2,3,3,3,2,2,2,2,0,2,2,2,2,3,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,2,3,2,2,3,3,3,3,3,3,3,3,3,1,3,2,0,0,3,3,3,3,2,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,2,3,3,0,2,1,0,3,2,3,2,3,0,0,1,2,0,0,1,0,1,2,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,3,0,2,3,3,3,3,2,3,3,3,3,1,2,2,0,0,2,3,2,2,2,3,2,3,2,2,3,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,0,2,3,2,3,0,1,2,3,3,2,0,2,3,0,0,2,3,2,2,0,1,3,1,3,2,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,0,2,3,3,3,3,3,3,3,3,2,1,3,2,0,0,2,2,3,3,3,2,3,3,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,3,3,0,0,1,1,1,1,1,2,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,3,3,3,3,3,0,3,2,3,3,2,3,2,0,2,1,0,1,1,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,2,2,2,3,1,3,2,3,1,1,2,1,0,2,2,2,2,1,3,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
2,2,3,3,3,3,3,1,2,2,1,3,1,0,3,0,0,3,0,0,0,1,1,0,1,2,1,0,0,0,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,2,1,1,3,3,3,2,2,1,2,2,3,1,1,2,0,0,2,2,1,3,0,0,2,1,1,2,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,3,3,1,2,2,2,1,2,1,3,3,1,1,2,1,2,1,2,2,0,2,0,0,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,1,3,2,2,3,2,0,3,2,0,3,0,1,0,1,1,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,3,3,3,2,2,2,3,3,1,2,1,2,1,0,1,0,1,1,0,1,0,0,2,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,1,1,2,1,2,3,3,2,2,1,2,2,3,0,2,1,0,0,2,2,3,2,1,2,2,2,2,2,3,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,1,1,0,1,1,2,2,1,1,3,0,0,1,3,1,1,1,0,0,0,1,0,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,3,3,3,2,0,0,0,2,1,0,1,0,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,2,3,2,2,2,1,2,2,2,1,2,1,0,0,1,1,1,0,2,0,1,1,1,0,0,1,1,
1,0,0,0,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,0,0,0,1,0,0,0,0,3,0,1,2,1,0,0,0,0,0,0,0,1,1,0,0,1,1,
1,0,1,0,1,2,0,0,1,1,2,1,0,1,1,1,1,0,1,1,1,1,0,1,0,0,1,0,0,1,1,0,
2,2,3,2,2,2,3,1,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,0,1,0,1,1,1,0,2,1,
1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,0,1,1,0,
3,3,3,2,2,2,2,3,2,2,1,1,2,2,2,2,1,1,3,1,2,1,2,0,0,1,1,0,1,0,2,1,
1,1,1,1,1,2,1,0,1,1,1,1,0,1,0,0,1,1,0,0,1,0,1,0,0,1,0,0,0,1,1,0,
2,0,0,1,0,3,2,2,2,2,1,2,1,2,1,2,0,0,0,2,1,2,2,1,1,2,2,0,1,1,0,2,
1,1,1,1,1,0,1,1,1,2,1,1,1,2,1,0,1,2,1,1,1,1,0,1,1,1,0,0,1,0,0,1,
1,3,2,2,2,1,1,1,2,3,0,0,0,0,2,0,2,2,1,0,0,0,0,0,0,1,0,0,0,0,1,1,
1,0,1,1,0,1,0,1,1,0,1,1,0,2,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,3,2,3,2,1,2,2,2,2,1,0,0,0,2,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,2,1,
1,1,2,1,0,2,0,0,1,0,1,0,0,1,0,0,1,1,0,1,1,0,0,0,0,0,1,0,0,0,0,0,
3,0,0,1,0,2,2,2,3,2,2,2,2,2,2,2,0,0,0,2,1,2,1,1,1,2,2,0,0,0,1,2,
1,1,1,1,1,0,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,1,0,1,1,1,1,1,1,0,0,1,
2,3,2,3,3,2,0,1,1,1,0,0,1,0,2,0,1,1,3,1,0,0,0,0,0,0,0,1,0,0,2,1,
1,1,1,1,1,1,1,0,1,0,1,1,1,1,0,1,1,1,0,0,1,1,0,1,0,0,0,0,0,0,1,0,
2,3,3,3,3,1,2,2,2,2,0,1,1,0,2,1,1,1,2,1,0,1,1,0,0,1,0,1,0,0,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,2,0,0,1,1,2,2,1,0,0,2,0,1,1,3,0,0,1,0,0,0,0,0,1,0,1,2,1,
1,1,2,0,1,1,1,0,1,0,1,1,0,1,0,1,1,1,1,0,1,0,0,0,0,0,0,1,0,1,1,0,
1,3,2,3,2,1,0,0,2,2,2,0,1,0,2,0,1,1,1,0,1,0,0,0,3,0,1,1,0,0,2,1,
1,1,1,0,1,1,0,0,0,0,1,1,0,1,0,0,2,1,1,0,1,0,0,0,1,0,1,0,0,1,1,0,
3,1,2,1,1,2,2,2,2,2,2,1,2,2,1,1,0,0,0,2,2,2,0,0,0,1,2,1,0,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,2,1,1,1,0,1,0,1,1,0,1,1,1,0,0,1,
3,0,0,0,0,2,0,1,1,1,1,1,1,1,0,1,0,0,0,1,1,1,0,1,0,1,1,0,0,1,0,1,
1,1,0,0,1,0,0,0,1,0,1,1,0,0,1,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,1,
1,3,3,2,2,0,0,0,2,2,0,0,0,1,2,0,1,1,2,0,0,0,0,0,0,0,0,1,0,0,2,1,
0,1,1,0,0,1,1,0,0,0,1,1,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
2,3,2,3,2,0,0,0,0,1,1,0,0,0,2,0,2,0,2,0,0,0,0,0,1,0,0,1,0,0,1,1,
1,1,2,0,1,2,1,0,1,1,2,1,1,1,1,1,2,1,1,0,1,0,0,1,1,1,1,1,0,1,1,0,
1,3,2,2,2,1,0,0,2,2,1,0,1,2,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,1,1,
0,0,1,1,0,1,1,0,0,1,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,2,3,1,2,2,2,2,2,2,1,1,0,0,0,1,0,1,0,2,1,1,1,0,0,0,0,1,
1,1,0,1,1,0,1,1,1,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
2,0,2,0,0,1,0,3,2,1,2,1,2,2,0,1,0,0,0,2,1,0,0,2,1,1,1,1,0,2,0,2,
2,1,1,1,1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,0,0,0,1,1,1,1,0,1,0,0,1,
1,2,2,2,2,1,0,0,1,0,0,0,0,0,2,0,1,1,1,1,0,0,0,0,1,0,1,2,0,0,2,0,
1,0,1,1,1,2,1,0,1,0,1,1,0,0,1,0,1,1,1,0,1,0,0,0,1,0,0,1,0,1,1,0,
2,1,2,2,2,0,3,0,1,1,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,1,1,1,0,0,1,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,
1,2,2,3,2,2,0,0,1,1,2,0,1,2,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,2,1,1,2,1,2,2,2,2,2,1,2,2,0,1,0,0,0,1,2,2,2,1,2,1,1,1,1,1,2,1,
1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,0,1,
1,2,2,2,2,0,1,0,2,2,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
0,0,1,0,0,1,0,0,0,0,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,2,2,2,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,
0,1,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,0,1,0,0,1,1,2,0,0,0,0,1,0,1,0,0,1,0,0,2,0,0,0,1,
0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,1,1,2,0,2,1,1,1,1,0,2,2,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,1,2,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
1,0,0,0,0,2,0,1,2,1,0,1,1,1,0,1,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,1,
0,0,0,0,0,1,0,0,1,1,0,0,1,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,
2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,0,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,1,0,1,0,1,0,0,1,1,1,1,0,0,0,1,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,0,1,1,0,1,0,1,0,0,0,0,1,1,0,1,1,0,0,0,0,0,1,0,1,1,0,1,0,0,0,
0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
)
Koi8rModel = {
'charToOrderMap': KOI8R_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "KOI8-R"
}
Win1251CyrillicModel = {
'charToOrderMap': win1251_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
Latin5CyrillicModel = {
'charToOrderMap': latin5_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
MacCyrillicModel = {
'charToOrderMap': macCyrillic_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "MacCyrillic"
};
Ibm866Model = {
'charToOrderMap': IBM866_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM866"
}
Ibm855Model = {
'charToOrderMap': IBM855_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM855"
}
# flake8: noqa
| gpl-3.0 |
halberom/ansible | lib/ansible/modules/files/template.py | 20 | 4250 | # this is a virtual module that is entirely implemented server side
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = '''
---
module: template
version_added: historical
short_description: Templates a file out to a remote server.
description:
- Templates are processed by the Jinja2 templating language
(U(http://jinja.pocoo.org/docs/)) - documentation on the template
formatting can be found in the Template Designer Documentation
(U(http://jinja.pocoo.org/docs/templates/)).
- "Six additional variables can be used in templates:
C(ansible_managed) (configurable via the C(defaults) section of C(ansible.cfg)) contains a string which can be used to
describe the template name, host, modification time of the template file and the owner uid.
C(template_host) contains the node name of the template's machine.
C(template_uid) the numeric user id of the owner.
C(template_path) the path of the template.
C(template_fullpath) is the absolute path of the template.
C(template_run_date) is the date that the template was rendered."
options:
src:
description:
- Path of a Jinja2 formatted template on the Ansible controller. This can be a relative or absolute path.
required: true
dest:
description:
- Location to render the template to on the remote machine.
required: true
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
required: false
choices: [ "yes", "no" ]
default: "no"
force:
description:
- the default is C(yes), which will replace the remote file when contents
are different than the source. If C(no), the file will only be transferred
if the destination does not exist.
required: false
choices: [ "yes", "no" ]
default: "yes"
notes:
- Including a string that uses a date in the template will result in the template being marked 'changed' each time
- "Since Ansible version 0.9, templates are loaded with C(trim_blocks=True)."
- "Also, you can override jinja2 settings by adding a special header to template file.
i.e. C(#jinja2:variable_start_string:'[%' , variable_end_string:'%]', trim_blocks: False)
which changes the variable interpolation markers to [% var %] instead of {{ var }}.
This is the best way to prevent evaluation of things that look like, but should not be Jinja2.
raw/endraw in Jinja2 will not work as you expect because templates in Ansible are recursively evaluated."
author:
- Ansible Core Team
- Michael DeHaan
extends_documentation_fragment:
- files
- validate
'''
EXAMPLES = '''
# Example from Ansible Playbooks
- template:
src: /mytemplates/foo.j2
dest: /etc/file.conf
owner: bin
group: wheel
mode: 0644
# The same example, but using symbolic modes equivalent to 0644
- template:
src: /mytemplates/foo.j2
dest: /etc/file.conf
owner: bin
group: wheel
mode: "u=rw,g=r,o=r"
# Copy a new "sudoers" file into place, after passing validation with visudo
- template:
src: /mine/sudoers
dest: /etc/sudoers
validate: 'visudo -cf %s'
# Update sshd configuration safely, avoid locking yourself out
- template:
src: etc/ssh/sshd_config.j2
dest: /etc/ssh/sshd_config
owner: root
group: root
mode: '0600'
validate: /usr/sbin/sshd -t -f %s
backup: yes
'''
| gpl-3.0 |
ptitjes/quodlibet | gdist/search_provider.py | 4 | 1904 | # Copyright 2013-2016 Christoph Reiter
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
from .util import Command
class install_search_provider(Command):
user_options = []
def initialize_options(self):
self.install_dir = None
self.search_provider = None
self.outfiles = []
def finalize_options(self):
self.set_undefined_options('install',
('install_data', 'install_dir'))
self.search_provider = self.distribution.search_provider
def get_outputs(self):
return self.outfiles
def run(self):
basepath = os.path.join(
self.install_dir, 'share', 'gnome-shell', 'search-providers')
out = self.mkpath(basepath)
self.outfiles.extend(out or [])
(out, _) = self.copy_file(self.search_provider, basepath)
self.outfiles.append(out)
| gpl-2.0 |
chemelnucfin/tensorflow | tensorflow/python/kernel_tests/large_concat_op_test.py | 38 | 1690 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Concat Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class LargeConcatOpTest(test.TestCase):
"""Tests that belong in concat_op_test.py, but run over large tensors."""
def testConcatLargeTensors(self):
# CPU-only test, because it fails on GPUs with <= 4GB memory.
with ops.device("/cpu:0"):
a = array_ops.ones([2**31 + 6], dtype=dtypes.int8)
b = array_ops.zeros([1024], dtype=dtypes.int8)
onezeros = array_ops.concat([a, b], 0)
with self.session(use_gpu=False):
# TODO(dga): Add more depth to this test to validate correctness,
# not just non-crashingness, once other large tensor fixes have gone in.
_ = self.evaluate(onezeros)
if __name__ == "__main__":
test.main()
| apache-2.0 |
jiangzhonghui/viewfinder | backend/www/json_schema.py | 13 | 81009 | # Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""JSON Schemas for request/response pairs used with Viewfinder ops.
All times are expressed in seconds (possibly subsecond floating-point
precision) since the epoch (January 1st, 1970) in UTC.
Every mutating request may be run synchronously by specifying
'synchronous' = True in the request header.
"""
__authors__ = ['spencer@emailscrubbed.com (Spencer Kimball)',
'andy@emailscrubbed.com (Andy Kimball)']
from copy import deepcopy
from viewfinder.backend.db.follower import Follower
from viewfinder.backend.db.contact import Contact
from viewfinder.backend.db.settings import AccountSettings
from viewfinder.backend.db.viewpoint import Viewpoint
##
# HELPER METHODS
##
def _MakeOptional(property_dict, test_key):
"""Iterates through all key/value pairs in the property dictionary. If the "test_key" function
returns True for a particular property key, then makes that property optional. Returns the
updated property dict.
"""
for key, value in property_dict.items():
if test_key(key):
property_dict[key]['required'] = False
return property_dict
def _CopyProperties(target_dict, source_dict):
"""Deep copies properties in source_dict['properties'] to target_dict['properties']. Asserts
if a property of the same name already exists in source_dict['properties'], but has a
different value.
"""
for key, value in source_dict['properties'].items():
assert key not in target_dict['properties'] or target_dict['properties'][key] == value, (source_dict, target_dict)
target_dict['properties'][key] = deepcopy(value)
##
# COMMON DATA STRUCTURES
##
# Headers object that must be at the top-level of every request or response message.
HEADERS = {
'description': 'contains headers used for read-only methods',
'type': 'object',
'properties': {
'version': {
'description': 'version of the message format; this is not necessarily equal '
'to the max version supported by the sender; for example, the server will '
'respond in an older dialect to older clients',
'type': 'integer',
},
'min_required_version': {
'description': 'this field requires that the recipient be able to understand '
'messages of this version or greater; if not, the recipient may report an '
'error (server), or it may try again after upgrade to a later version (client)',
'type': 'integer',
'required': False,
},
'synchronous': {
'description': 'used in tests to wait until a requested operation completes',
'type': 'boolean',
'required': False,
},
},
}
OP_HEADERS = {
'description': 'contains headers used for mutable methods',
'type': 'object',
'properties': {
'op_id': {
'description': 'id of the requested operation; the id is a composite of '
'the device id and a unique operation id generated by that device; the '
'client may provide the op_id, or if it does not, the server will '
'generate one',
'type': 'string',
},
'op_timestamp': {
'description': 'timestamp of this requested operation; the client may '
'provide the op_timestamp, or if it does not, the server will generate one',
'type': 'number',
},
},
}
_CopyProperties(target_dict=OP_HEADERS, source_dict=HEADERS)
# Location in degrees of latitude and longitude and accuracy in meters.
LOCATION = {
'description': 'location in degrees of latitude & longitude and accuracy in meters',
'type': 'object',
'properties': {
'latitude': {'type': 'number'},
'longitude': {'type': 'number'},
'accuracy': {'type': 'number'},
},
}
OPTIONAL_LOCATION = deepcopy(LOCATION)
OPTIONAL_LOCATION['required'] = False
# Hierarchical place names from country to street level.
PLACEMARK = {
'description': 'placemark identifies a place by name from country to street level',
'type': 'object',
'properties': {
'iso_country_code': {'type': 'string', 'blank': True, 'required': False},
'country': {'type': 'string', 'blank': True, 'required': False},
'state': {'type': 'string', 'blank': True, 'required': False},
'locality': {'type': 'string', 'blank': True, 'required': False},
'sublocality': {'type': 'string', 'blank': True, 'required': False},
'thoroughfare': {'type': 'string', 'blank': True, 'required': False},
'subthoroughfare': {'type': 'string', 'blank': True, 'required': False},
},
}
OPTIONAL_PLACEMARK = deepcopy(PLACEMARK)
OPTIONAL_PLACEMARK['required'] = False
# Select a set of assets, with some control over the scope of projection.
VIEWPOINT_SELECTION = {
'description': 'select a set of viewpoints by id; if "get_attributes" is '
'True or not specified, then return all attributes on the viewpoints; '
'"get_followers", "get_activities", "get_episodes", and "get_comments" '
'specify whether to return the corresponding collections associated with '
'the viewpoint; "start_key" fields enable paging of the collections',
'type': 'array',
'items': {
'type': 'object',
'properties': {
'viewpoint_id': {'type': 'string'},
'get_attributes': {'type': 'boolean', 'required': False},
'get_followers': {'type': 'boolean', 'required': False},
'follower_start_key': {'type': 'string', 'required': False},
'get_activities': {'type': 'boolean', 'required': False},
'activity_start_key': {'type': 'string', 'required': False},
'get_episodes': {'type': 'boolean', 'required': False},
'episode_start_key': {'type': 'string', 'required': False},
'get_comments': {'type': 'boolean', 'required': False},
'comment_start_key': {'type': 'string', 'required': False},
},
},
}
EPISODE_SELECTION = {
'description': 'select a set of episodes by id; if "get_attributes" is '
'True or not specified, then return all attributes on the episodes; if '
'"get_photos" is True or not specified, return photos in the episode, '
'starting with "photo_start_key" if it is specified',
'type': 'array',
'items': {
'type': 'object',
'properties': {
'episode_id': {'type': 'string'},
'get_attributes': {'type': 'boolean', 'required': False},
'get_photos': {'type': 'boolean', 'required': False},
'photo_start_key': {'type': 'string', 'required': False},
},
},
}
USER_SELECTION = {
'description': 'select set of users by id',
'type': 'array',
'items': {'type': 'number'},
}
CONTACT_SELECTION = {
'description': 'select set of contacts that have a sort_key greater '
'than start_key',
'type': 'object',
'properties': {
'all': {
'description': 'invalidate all contacts, forcing complete client reload',
'type': 'boolean', 'required': False,
},
'start_key': {'type': 'string'},
},
}
# Cover photo metadata.
COVER_PHOTO_METADATA = {
'description': 'describes cover photo for a shared viewpoint',
'type': 'object',
'required': False,
'properties': {
'episode_id': {
'description': 'episode_id of episode that contains cover photo',
'type': 'string'
},
'photo_id': {
'description': 'photo_id of cover photo',
'type': 'string'
},
},
}
# Activity metadata.
CREATE_ACTIVITY_METADATA = {
'description': 'activity metadata for creation',
'type': 'object',
'properties': {
'activity_id': {'type': 'string'},
'timestamp': {
'description': 'time that activity was created on the client',
'type': 'number',
},
},
}
ACTIVITY_POST_ARRAY = {
'description': 'array of (episode, photo_id) tuples used by share '
'and unshare activities',
'type': 'array',
'items': {
'type': 'object',
'properties': {
'episode_id': {'type': 'string'},
'photo_ids': {
'type': 'array',
'items': {'type': 'string'},
},
},
},
}
ACTIVITY_METADATA = {
'description': 'full activity metadata (includes create metadata)',
'type': 'object',
'properties': {
'viewpoint_id': {'type': 'string'},
'user_id': {
'description': 'id of user that caused the activity to be created',
'type': 'number',
},
'update_seq': {
'description': 'set to the value of the viewpoint\'s update_seq '
'attribute after it is incremented during the creation of the '
'activity',
'type': 'number',
},
'add_followers': {
'description': 'new followers added to this viewpoint',
'required': False,
'type': 'object',
'properties': {
'follower_ids': {
'description': 'user ids of new viewpoint followers',
'type': 'array',
'items': {'type': 'number'},
},
},
},
'merge_accounts': {
'description': 'user accounts merged; target user added to this viewpoint',
'required': False,
'type': 'object',
'properties': {
'target_user_id': {
'description': 'user that receives the assets to be merged; this user remains after '
'the merge is completed',
'type': 'number',
},
'source_user_id': {
'description': 'user that provides the assets to be merged; the account of this user '
'is terminated after the merge is completed',
'type': 'number',
},
},
},
'post_comment': {
'description': 'comment posted to this viewpoint',
'required': False,
'type': 'object',
'properties': {
'comment_id': {'type': 'string'},
},
},
'remove_followers': {
'description': 'followers removed from this viewpoint',
'required': False,
'type': 'object',
'properties': {
'follower_ids': {
'description': 'user ids of removed viewpoint followers',
'type': 'array',
'items': {'type': 'number'},
},
},
},
'save_photos': {
'description': 'photos saved to default viewpoint',
'required': False,
'type': 'object',
'properties': {
'episodes': ACTIVITY_POST_ARRAY,
},
},
'share_existing': {
'description': 'photos shared to an already existing viewpoint',
'required': False,
'type': 'object',
'properties': {
'episodes': ACTIVITY_POST_ARRAY,
},
},
'share_new': {
'description': 'photos shared to a newly created viewpoint',
'required': False,
'type': 'object',
'properties': {
'episodes': ACTIVITY_POST_ARRAY,
'follower_ids': {
'description': 'user ids of new viewpoint followers; excludes '
'the creating user id',
'type': 'array',
'items': {'type': 'number'},
},
},
},
'unshare': {
'description': 'photos unshared from this viewpoint',
'required': False,
'type': 'object',
'properties': {
'episodes': ACTIVITY_POST_ARRAY,
},
},
'update_episode': {
'description': 'episode metadata updated in this viewpoint',
'required': False,
'type': 'object',
'properties': {
'episode_id': {'type': 'string'},
},
},
'update_viewpoint': {
'description': 'viewpoint metadata updated',
'required': False,
'type': 'object',
'properties': {
'viewpoint_id': {'type': 'string'},
'prev_title': {'type': 'string', 'required': False},
'prev_cover_photo': COVER_PHOTO_METADATA,
},
},
'upload_episode': {
'description': 'photos uploaded to an episode in this viewpoint',
'required': False,
'type': 'object',
'properties': {
'episode_id': {'type': 'string'},
'photo_ids': {
'type': 'array',
'items': {'type': 'string'},
},
},
},
},
}
_CopyProperties(target_dict=ACTIVITY_METADATA, source_dict=CREATE_ACTIVITY_METADATA)
# Photo metadata.
PHOTO_URL_METADATA = {
'description': 'metadata for signed S3 URLs that reference photo image data',
'type': 'object',
'properties': {
'tn_get_url': {
'description': 'url for thumbnail resolution image file; '
'URL expires in 24 hours--only returned with photo query responses',
'type': 'string', 'required': False,
},
'med_get_url': {
'description': 'url for medium-screen resolution image file (max 480 pixels); '
'URL expires in 24 hours--only returned with photo query responses',
'type': 'string', 'required': False,
},
'full_get_url': {
'description': 'url for full-screen resolution image file (max 960 pixels); '
'URL expires in 24 hours--only returned with photo query responses',
'type': 'string', 'required': False,
},
'orig_get_url': {
'description': 'url for full-screen resolution image file; '
'URL expires in 24 hours--only returned with photo query responses',
'type': 'string', 'required': False,
},
},
}
USER_PHOTO_METADATA = {
'description': 'per-user photo metadata',
'type': 'object',
'properties': {
'photo_id': {'type': 'string'},
'asset_keys': {
'description': 'identifiers for copies of this photo in the user\'s devices\' native '
'asset library. This field is per-user, and its format is client-specific',
'type': 'array',
'required': False,
'items': {'type': 'string'},
},
},
}
UPDATE_PHOTO_METADATA = {
'description': 'photo metadata for updates',
'type': 'object',
'properties': {
'location': OPTIONAL_LOCATION,
'placemark': OPTIONAL_PLACEMARK,
'caption': {'type': 'string', 'required': False},
'link': {'type': 'string', 'required': False},
},
}
_CopyProperties(target_dict=UPDATE_PHOTO_METADATA, source_dict=USER_PHOTO_METADATA)
UPLOAD_PHOTO_METADATA = {
'description': 'photo metadata for upload (includes update metadata)',
'type': 'object',
'properties': {
'timestamp': {
'description': 'time that photo was created on the client',
'type': 'number',
},
'aspect_ratio': {
'description': 'floating point value: width / height',
'type': 'number',
},
'tn_md5': {
'description': 'thumbnail resolution md5 csum',
'type': 'string',
},
'med_md5': {
'description': 'medium resolution md5 csum (max 480 pixels)',
'type': 'string',
},
'full_md5': {
'description': 'full-screen resolution md5 csum (max 960 pixels)',
'type': 'string',
},
'orig_md5': {
'description': 'original resolution md5 csum',
'type': 'string',
},
'tn_size': {
'description': 'thumbnail resolution size in bytes',
'type': 'integer', 'required': False,
},
'med_size': {
'description': 'medium resolution size in bytes (max 480 pixels)',
'type': 'integer', 'required': False,
},
'full_size': {
'description': 'full-screen resolution size in bytes (max 960 pixels)',
'type': 'integer', 'required': False,
},
'orig_size': {
'description': 'original resolution size in bytes',
'type': 'integer', 'required': False,
},
'content_type': {
'description': 'image file content type (e.g. image/jpeg)',
'type': 'string', 'required': False,
},
'parent_id': {
'description': 'if specified, this photo was derived from another',
'type': 'string', 'required': False,
},
},
}
_CopyProperties(target_dict=UPLOAD_PHOTO_METADATA, source_dict=UPDATE_PHOTO_METADATA)
PHOTO_METADATA = {
'description': 'full photo metadata (includes upload metadata)',
'type': 'object',
'properties': {
'user_id': {
'description': 'id of user that created the photo',
'type': 'number'
},
'episode_id': {
'description': 'episode in which the photo was originally uploaded',
'type': 'string', 'required': False,
},
'labels': {
'description': 'set of boolean modifiers affecting the photo (e.g. "removed")',
'type': 'array', 'required': False, 'items': {'type': 'string'},
},
'sharing_user_id': {
'description': 'user who shared this photo (if applicable)',
'type': 'number', 'required': False,
},
},
}
_CopyProperties(target_dict=PHOTO_METADATA, source_dict=UPLOAD_PHOTO_METADATA)
_CopyProperties(target_dict=PHOTO_METADATA, source_dict=PHOTO_URL_METADATA)
# Older photos may be missing one or more MD5 attributes.
PHOTO_METADATA['properties']['tn_md5']['required'] = False
PHOTO_METADATA['properties']['med_md5']['required'] = False
PHOTO_METADATA['properties']['full_md5']['required'] = False
PHOTO_METADATA['properties']['orig_md5']['required'] = False
POST_PHOTO_METADATA = deepcopy(PHOTO_METADATA)
# Episode metadata.
UPDATE_EPISODE_METADATA = {
'description': 'episode metadata for updates',
'type': 'object',
'properties': {
'episode_id': {'type': 'string'},
'title': {'type': 'string', 'required': False},
'description': {'type': 'string', 'required': False},
'location': OPTIONAL_LOCATION,
'placemark': OPTIONAL_PLACEMARK,
},
}
UPLOAD_EPISODE_METADATA = {
'description': 'episode metadata for upload (includes update metadata)',
'type': 'object',
'properties': {
'timestamp': {
'description': 'timestamp of the newest photo in the episode',
'type': 'number',
},
},
}
_CopyProperties(target_dict=UPLOAD_EPISODE_METADATA, source_dict=UPDATE_EPISODE_METADATA)
EPISODE_METADATA = {
'description': 'full episode metadata (includes upload metadata)',
'type': 'object',
'properties': {
'user_id': {
'description': 'id of user that created the episode',
'type': 'number'
},
'viewpoint_id': {
'description': 'viewpoint to which the episode belongs',
'type': 'string',
},
'publish_timestamp': {
'description': 'time at which the episode was uploaded',
'type': 'number',
},
'sharing_user_id': {
'description': 'user who shared this episode (if applicable)',
'type': 'number', 'required': False,
},
'parent_ep_id': {
'description': 'id of the parent episode, if one exists',
'type': 'string', 'required': False,
},
},
}
_CopyProperties(target_dict=EPISODE_METADATA, source_dict=UPLOAD_EPISODE_METADATA)
# Follower metadata.
UPDATE_FOLLOWER_METADATA = {
'description': 'follower metadata for updates',
'type': 'object',
'properties': {
'viewpoint_id': {'type': 'string'},
'labels': {
'description': 'set of boolean permissions and modifiers affecting '
'the viewpoint (e.g. "personal")',
'type': 'array', 'required': False, 'uniqueItems': True,
'items': {'type': 'string', 'enum': Follower.ALL_LABELS},
},
'viewed_seq': {
'description': 'sequence number of last viewpoint update that the '
'client has viewed on any device; the client and server will always '
'"ratchet up" this value; they will ignore any value that is smaller '
'than a value already received',
'type': 'number', 'required': False,
},
},
}
FRIEND_FOLLOWER_METADATA = {
'description': 'follower metadata that is returned to other followers of same viewpoint when '
'they invoke query_viewpoints',
'type': 'object',
'properties': {
'follower_id': {'type': 'number'},
'labels': {
'description': 'set of boolean permissions and modifiers affecting the follower\'s '
'relationship to the viewpoint',
'type': 'array', 'required': False, 'uniqueItems': True,
'items': {'type': 'string', 'enum': [Follower.REMOVED, Follower.UNREVIVABLE]},
},
'adding_user_id': {
'description': 'user who added this follower to the viewpoint; for older viewpoints, '
'this may not be present; it also is not present for the user that created the viewpoint',
'type': 'number', 'required': False,
},
'follower_timestamp': {
'description': 'timestamp at which follower was added to the viewpoint; if not present, '
'assume follower was added more than 7 days ago',
'type': 'number', 'required': False,
},
},
}
# Viewpoint metadata.
UPDATE_VIEWPOINT_METADATA = {
'description': 'viewpoint metadata for updates',
'type': 'object',
'properties': {
'viewpoint_id': {'type': 'string'},
'title': {'type': 'string', 'required': False},
'description': {'type': 'string', 'required': False},
'name': {'type': 'string', 'required': False},
'cover_photo': COVER_PHOTO_METADATA,
},
}
CREATE_VIEWPOINT_METADATA = {
'description': 'viewpoint metadata for create (includes update metadata)',
'type': 'object',
'properties': {
'type': {
'description': 'kind of viewpoint (only allow event viewpoint to be created by users)',
'type': 'string', 'enum': [Viewpoint.EVENT],
},
},
}
_CopyProperties(target_dict=CREATE_VIEWPOINT_METADATA, source_dict=UPDATE_VIEWPOINT_METADATA)
VIEWPOINT_METADATA = {
'description': 'full viewpoint metadata (includes create metadata)',
'type': 'object',
'properties': {
'follower_id': {
'description': 'id of the calling user who follows this viewpoint',
'type': 'number',
},
'user_id': {
'description': 'id of user that created the viewpoint',
'type': 'number'
},
'timestamp': {
'description': 'timestamp at which viewpoint was created',
'type': 'number',
},
'update_seq': {
'description': 'sequence number of the last add, remove, or update of '
'any assets or metadata within the viewpoint; only updates to shared '
'assets increment this value (i.e. not changes to user-specific '
'tables like Follower or UserPost)',
'type': 'number', 'required': False,
},
'adding_user_id': {
'description': 'user who added this follower to the viewpoint (if applicable)',
'type': 'number', 'required': False,
},
'last_updated': {
'description': 'timestamp of the activity that was last added to '
'the viewpoint',
'type': 'number', 'required': False,
},
},
}
_CopyProperties(target_dict=VIEWPOINT_METADATA, source_dict=CREATE_VIEWPOINT_METADATA)
_CopyProperties(target_dict=VIEWPOINT_METADATA, source_dict=UPDATE_FOLLOWER_METADATA)
_CopyProperties(target_dict=VIEWPOINT_METADATA['properties']['cover_photo'], source_dict=PHOTO_URL_METADATA)
VIEWPOINT_METADATA['properties']['type']['enum'] = Viewpoint.TYPES
# Copying episodes between viewpoints.
COPY_EPISODES_METADATA = {
'description': 'array of episode copy information; each item specifies the existing episode '
'id, the new episode id, and the photo ids to include in the copied episode',
'type': 'array',
'items': {
'type': 'object',
'properties': {
'existing_episode_id': {
'description': 'id of the episode from which the copy originates; '
'this will be the parent_ep_id of the new episode',
'type': 'string',
},
'new_episode_id': {
'description': 'id of the new episode to create',
'type': 'string',
},
'photo_ids': {
'description': 'ids of photos to copy from the existing episode',
'type': 'array',
'items': {'type': 'string'},
},
},
},
}
OPTIONAL_COPY_EPISODES_METADATA = deepcopy(COPY_EPISODES_METADATA)
OPTIONAL_COPY_EPISODES_METADATA['required'] = False
# Device metadata.
DEVICE_METADATA = {
'description': 'full device metadata properties',
'type': 'object',
'properties': {
'device_id': {
'description': 'unique identifier of the device. Generated on the server.',
'type': 'number',
},
'name': {
'description': 'name of device',
'type': 'string', 'blank': True, 'required': False,
},
'version': {
'description': 'version of the Viewfinder mobile application',
'type': 'string', 'blank': True, 'required': False,
},
'platform': {
'description': 'mobile platform (e.g. iPhone 4S, Samsung Galaxy S)',
'type': 'string', 'blank': True, 'required': False,
},
'os': {
'description': 'mobile os (e.g. iOS 5.0.1, Android 4.0)',
'type': 'string', 'blank': True, 'required': False,
},
'push_token': {
'description': 'opaque token for push notifications',
'type': 'string', 'blank': True, 'required': False,
},
'device_uuid': {
'description': 'per-install unique device id. Generated on the device.',
'type': 'string', 'blank': True, 'required': False,
},
'language': {
'description': 'device language code',
'type': 'string', 'blank': True, 'required': False,
},
'country': {
'description': 'device country code',
'type': 'string', 'blank': True, 'required': False,
},
'test_udid': {
'description': 'unique device ID. Only sent by DEV and ADHOC builds. ID matches that found on testflight.',
'type': 'string', 'blank': True, 'required': False,
},
},
}
# Device id is optional when registering a device.
REGISTER_DEVICE_METADATA = deepcopy(DEVICE_METADATA)
REGISTER_DEVICE_METADATA['required'] = False
REGISTER_DEVICE_METADATA['properties']['device_id']['required'] = False
# Information message sent as part of the ping response.
INFO_MESSAGE = {
'description': 'an informative message to the client',
'type': 'object',
'properties': {
'title': {'type': 'string'},
'body': {'type': 'string', 'required': False},
'link': {'type': 'string', 'required': False},
'identifier': {
'description': 'Unique identifier for this message. The client will not re-display a message with the '
'same identifier. However, a new identifier, then an old again will work (eg: A -> B -> A)',
'type': 'string',
},
'severity': {
'description': 'Severity level. One of "SILENT", "INFO", "ATTENTION", "DISABLE_NETWORK"',
'type': 'string',
},
},
}
PING_RESPONSE_MESSAGE = deepcopy(INFO_MESSAGE)
PING_RESPONSE_MESSAGE['required'] = False
# Comment metadata.
POST_COMMENT_METADATA = {
'description': 'comment metadata used when posting a comment',
'type': 'object',
'properties': {
'viewpoint_id': {'type': 'string'},
'comment_id': {'type': 'string'},
'asset_id': {
'description': 'id of the viewpoint asset to which this comment is '
'attached; this may be a photo, if the comment was about a photo; it '
'may be another comment, if this comment was a direct response to '
'that comment',
'type': 'string',
'required': False,
},
'timestamp': {
'description': 'timestamp of the new comment; this timestamp MUST '
'be the same across successive request attempts by the client in '
'order to guarantee idempotency',
'type': 'number',
},
'message': {
'description': 'text of the comment',
'type': 'string',
},
},
}
COMMENT_METADATA = {
'description': 'full comment metadata (includes post metadata)',
'type': 'object',
'properties': {
'user_id': {
'description': 'id of user that caused the comment to be created',
'type': 'number',
},
},
}
_CopyProperties(target_dict=COMMENT_METADATA, source_dict=POST_COMMENT_METADATA)
# Contact metadata.
UPLOAD_CONTACT_METADATA = {
'description': '(name, given_name, family_name, rank, contact_source, identities) tuple',
'type': 'object',
'properties': {
'contact_source': {
'description': 'Source of contacts: ip (iPhone), or m (Manual)',
'type': 'string',
'enum': Contact.UPLOAD_SOURCES
},
'identities': {
'description': 'Order of this list will be preserved by server for query_contacts responses',
'type': 'array',
'maxItems': 50,
'items': {
'type': 'object',
'properties': {
'identity': {'type': 'string', 'maxLength': 1000},
'description': {'type': 'string', 'maxLength': 1000, 'required': False},
},
},
},
'name': {'type': 'string', 'maxLength': 1000, 'required': False},
'given_name': {'type': 'string', 'maxLength': 1000, 'required': False},
'family_name': {'type': 'string', 'maxLength': 1000, 'required': False},
'rank': {'type': 'number', 'required': False},
},
}
QUERY_CONTACT_METADATA = {
'description': 'Metadata returned in query_contacts response',
'type': 'object',
'properties': {
'contact_id': {'type': 'string'},
'labels': {
'description': 'set of boolean modifiers affecting the contact (e.g. "removed")',
'type': 'array', 'required': False, 'items': {'type': 'string'},
},
},
}
_CopyProperties(target_dict=QUERY_CONTACT_METADATA, source_dict=UPLOAD_CONTACT_METADATA)
QUERY_CONTACT_METADATA['properties']['contact_source']['enum'] = Contact.ALL_SOURCES
QUERY_CONTACT_METADATA['properties']['identities']['items']['properties']['user_id'] = \
{'type': 'number', 'required': False}
QUERY_CONTACT_METADATA['properties']['identities']['required'] = False
FOLLOWER_CONTACTS_METADATA = {
'description': 'array of contacts to add as followers of a viewpoint',
'type': 'array',
'items': {
'description': 'contacts: identity key and name if available. '
'user_id is required if known.',
'type': 'object',
'properties': {
'user_id': {'type': 'number', 'required': False},
'identity': {'type': 'string', 'required': False},
'name': {'type': 'string', 'required': False},
},
},
}
# Invalidate structure.
INVALIDATE = {
'description': 'each notification can select parts of the asset tree to '
'invalidate if the operation that triggered the notification modified '
'the tree',
'type': 'object',
'properties': {
'all': {
'description': 'invalidate all assets, forcing complete client reload',
'type': 'boolean',
},
'viewpoints': VIEWPOINT_SELECTION,
'episodes': EPISODE_SELECTION,
'users': USER_SELECTION,
'contacts': CONTACT_SELECTION,
},
}
# Usage information for a given category.
USAGE_CATEGORY_METADATA = {
'description': 'usage information for a single category',
'type': 'object',
'required': False,
'properties': {
'num_photos': { 'type': 'number', 'required': False },
'tn_size': { 'type': 'number', 'required': False },
'med_size': { 'type': 'number', 'required': False },
'full_size': { 'type': 'number', 'required': False },
'orig_size': { 'type': 'number', 'required': False },
},
}
# Usage information for a single user.
USAGE_METADATA = {
'description': 'usage information by category',
'type': 'object',
'properties': {
'owned_by': deepcopy(USAGE_CATEGORY_METADATA),
'shared_by': deepcopy(USAGE_CATEGORY_METADATA),
'visible_to': deepcopy(USAGE_CATEGORY_METADATA),
},
}
# The optional variant is used in NOTIFICATION. Currently, the last notification in the response to
# QueryNotifications will have the usage information.
OPTIONAL_USAGE_METADATA = deepcopy(USAGE_METADATA)
OPTIONAL_USAGE_METADATA['required'] = False
# Notification structure.
NOTIFICATION = {
'description': 'a union of notifications delivered to client asynchronously',
'type': 'object',
'properties': {
'notification_id': {'type': 'number'},
'name': {'type': 'string'},
'sender_id': {'type': 'number'},
'op_id': {
'description': 'id of the operation that produced this notification; this attribute '
'will be missing if no operation was involved',
'type': 'string', 'required': False,
},
'timestamp': {'type': 'number'},
'invalidate': deepcopy(INVALIDATE),
'inline': {
'description': 'some common invalidations are in-lined in the notification '
'in order to avoid extra round-trips',
'type': 'object', 'required': False,
'properties': {
'activity': deepcopy(ACTIVITY_METADATA),
'viewpoint': {
'description': 'if this notification updates the value of the update_seq '
'and / or viewed_seq attributes, then in-line the changed value(s) in '
'order to reduce round-trips',
'type': 'object', 'required': False,
'properties': {
'viewpoint_id': {'type': 'string'},
'update_seq': {
'description': 'value of the viewpoint update_seq attribute after '
'it was incremented by the operation; the client will "ratchet up" '
'this value, discarding any that is smaller than a value already '
'received',
'type': 'number', 'required': False,
},
'viewed_seq': {
'description': 'value of the follower viewed_seq attribute after '
'it was incremented for the user who submitted the operation; '
'the client will "ratchet up" this value, discarding any that '
'is smaller than a value already received',
'type': 'number', 'required': False,
},
},
},
'comment': deepcopy(COMMENT_METADATA),
'user': {
'description': 'user information', 'type': 'object', 'required': False,
'usage': deepcopy(OPTIONAL_USAGE_METADATA),
},
},
},
},
}
_MakeOptional(NOTIFICATION['properties']['invalidate']['properties'], lambda key: True)
NOTIFICATION['properties']['invalidate']['required'] = False
NOTIFICATION['properties']['inline']['properties']['activity']['required'] = False
NOTIFICATION['properties']['inline']['properties']['comment']['required'] = False
# Error response.
ERROR_RESPONSE = {
'description': 'on an error, returns code and message for debugging client',
'type': 'object',
'properties': {
'error': {
'type': 'object',
'required': False,
'properties': {
'method': {'type': 'string', 'required': False},
'id': {'type': 'string', 'required': False},
'message': {'type': 'string', 'blank': True},
},
},
},
}
# Prospective user invitation.
PROSPECTIVE_USER_INVITATION = {
'description': 'format of the prospective user invitation query parameter',
'type': 'object',
'properties': {
'timestamp': {
'description': 'timestamp at which the invitation was issued',
'type': 'number'
},
'identity': {
'description': 'identity to which this invitation was made; this may '
'be different than the actual identity of the bearer, as in cases '
'where the link was forwarded',
'type': 'string'
},
'viewpoint_id': {
'description': 'viewpoint to which this invitation grants access; the '
'bearer should not have access to sensitive data in other viewpoints',
'type': 'string'
},
'service_sig': {
'description': 'signature with service-wide secret that is used only to '
'sign invitations; filters out attempted attempted forgeries at a minimal '
'cost in terms of server resources; the service_sig attribute is not '
'included in the signature',
'type': 'string'},
}
}
# Subscription metadata.
SUBSCRIPTION_METADATA = {
'description': 'information about a subscription',
'type': 'object',
'properties': {
'transaction_id': {'type': 'string'},
'subscription_id': {'type': 'string'},
'timestamp': {'type': 'number'},
'expiration_ts': {'type': 'number'},
'product_type': {'type': 'string'},
'quantity': {'type': 'number'},
'payment_type': {'type': 'string'},
'extra_info': {
'description': 'additional data about the transaction; format depends on payment_type',
'type': 'object',
'required': False,
'additionalProperties': {},
},
},
}
# Friend metadata.
FRIEND_METADATA = {
'description': 'information stored for per user about friends of that user; only the user '
'can view and update this information',
'type': 'object',
'required': False,
'properties': {
'user_id': {'type': 'number'},
'nickname': {
'type': ['string', 'null'],
'required': False,
},
},
}
# User account settings metadata.
UPDATE_ACCOUNT_SETTINGS_METADATA = {
'description': 'options and choices affecting the user account that can be updated by the '
'user; see header comment in the AccountSettings class for details on allowed settings',
'type': 'object',
'required': False,
'properties': {
'email_alerts': {'type': 'string', 'required': False, 'enum': AccountSettings.ALL_EMAIL_ALERTS},
'sms_alerts': {'type': 'string', 'required': False, 'enum': AccountSettings.ALL_SMS_ALERTS},
'push_alerts': {'type': 'string', 'required': False, 'enum': AccountSettings.ALL_PUSH_ALERTS},
'storage_options': {
'type': 'array', 'required': False, 'uniqueItems': True,
'items': {'type': 'string', 'enum': AccountSettings.ALL_STORAGE_OPTIONS},
},
},
}
# At this time, all user account settings can be updated by the user.
ACCOUNT_SETTINGS_METADATA = UPDATE_ACCOUNT_SETTINGS_METADATA
# Identity metadata.
IDENTITY_METADATA = {
'description': 'Identity metadata. Returned in list_identifies and query_users on self.',
'type': 'object',
'properties': {
'identity': {
'description': 'e.g. Email:spencer.kimball.gmail.com | Phone:6464174337 | FacebookGraph:62052443',
'type': 'string',
},
'authority': {
'description': 'e.g. Google | Facebook | Twitter | Viewfinder | <empty>',
'type': 'string', 'required': False,
},
},
}
# User profile metadata.
UPDATE_USER_PROFILE_METADATA = {
'description': 'public profile of a user; all properties can be updated by the user',
'type': 'object',
'properties': {
'name': {
'description': 'full name of the user; if any name part (name, given_name, or family_name) '
'is given, then all parts are set -- any missing parts are set to None; this helps to '
'avoid accidental divergence',
'type': 'string',
'required': False,
'dependencies': 'given_name',
},
'given_name': {'type': 'string', 'required': False, 'dependencies': 'name'},
'family_name': {'type': 'string', 'required': False, 'dependencies': 'name'},
'picture': {
'description': 'URL to avatar photo',
'type': 'string',
'required': False,
},
},
}
USER_PROFILE_METADATA = {
'description': 'additional user profile properties that cannot be updated by the user, '
'but are returned by query_users',
'type': 'object',
'properties': {
'email': {'type': 'string', 'required': False},
'labels': {
'description': 'set of boolean modifiers affecting the user (e.g. "terminated")',
'type': 'array', 'required': False, 'items': {'type': 'string'},
},
'merged_with': {'type': 'number', 'required': False},
'private': {
'description': 'additional fields that are only present when querying for the '
'authenticated user',
'type': 'object',
'required': False,
'properties': {
'subscriptions': {
'description': 'all active subscriptions for this user',
'type': 'array',
'items': SUBSCRIPTION_METADATA,
},
'account_settings': ACCOUNT_SETTINGS_METADATA,
'no_password': {
'description': 'if true, then this user has no password set',
'type': 'boolean', 'required': False,
},
'user_identities': {
'description': 'all identities for this user',
'type': 'array',
'items': IDENTITY_METADATA,
},
},
},
},
}
_CopyProperties(target_dict=USER_PROFILE_METADATA, source_dict=UPDATE_USER_PROFILE_METADATA)
# Don't require first name to be set on returned users, even if name is set.
del USER_PROFILE_METADATA['properties']['name']['dependencies']
# Add friend attributes.
_CopyProperties(target_dict=USER_PROFILE_METADATA, source_dict=FRIEND_METADATA)
# Confirmed identity.
CONFIRMED_IDENTITY = {
'description': 'an identity is confirmed when it is paired with an access token, the '
'possession of which proves control of the identity',
'type': 'object',
'properties': {
'identity': {
'description': 'identity to verify; make sure to use the full identity scheme '
'(e.g. Email:foo@example.com, Phone:+16461234567)',
'type': 'string',
},
'access_token': {
'description': 'N-digit access code sent to email address or SMS phone number',
'type': 'string',
},
},
}
##
# AUTH METHODS
##
# User cookies and identity access tokens.
#
# /link/facebook, /link/google, /link/viewfinder
# /login/facebook, /login/google, /login/viewfinder
# /register/facebook, /register/google, /register/viewfinder
# /login_reset/viewfinder
# /merge_token/viewfinder
AUTH_REQUEST = {
'description': 'registers new users, logs in existing users, or links identities to existing '
'users; connects to Facebook or Google to gather information about the user, including his/her '
'contacts',
'type': 'object',
'properties': {
'headers': HEADERS,
'use_session_cookie': {
'description': 'if true, then the user cookie is set to expire when the user ends the '
'session (e.g. by closing the browser)',
'type': 'boolean',
'required': False
},
},
}
AUTH_RESPONSE = {
'description': 'returns user-id & device-id for registered user',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'user_id': {
'description': 'id of the user that was registered',
'type': 'number',
},
'device_id': {
'description': 'id of the device that was registered; this will not be present if the '
'device section was not present in the original request',
'type': 'number', 'required': False,
},
},
}
# Used by /<auth>/facebook and /<auth>/google.
AUTH_FB_GOOGLE_REQUEST = deepcopy(AUTH_REQUEST)
AUTH_FB_GOOGLE_REQUEST['properties']['device'] = REGISTER_DEVICE_METADATA
# Used by /<auth>/viewfinder.
AUTH_VIEWFINDER_REQUEST = deepcopy(AUTH_REQUEST)
AUTH_VIEWFINDER_REQUEST['properties']['device'] = REGISTER_DEVICE_METADATA
AUTH_VIEWFINDER_REQUEST['properties']['auth_info'] = {
'description': 'identity and optional user registration information used in Viewfinder auth '
'requests; this is in addition to the inherited AUTH_REQUEST fields',
'type': 'object',
'properties': {
'identity': {
'description': 'identity to authenticate; make sure to use the full identity scheme '
'(e.g. Email:foo@example.com, Phone:6464174337)',
'type': 'string'
},
},
}
AUTH_VIEWFINDER_RESPONSE = {
'description': 'returned by Viewfinder auth methods after an email/SMS message has been sent',
'type': 'object',
'properties': {
'headers': HEADERS,
'token_digits': {
'description': 'number of digits in the access token that was sent',
'type': 'number',
},
},
}
# Used by /login/viewfinder.
LOGIN_VIEWFINDER_PROPERTIES = {
'description': 'additional auth_info properties used only in user login',
'type': 'object',
'properties': {
'password': {
'description': 'user\'s password; if not specified then an authentication email/SMS '
'will be sent; otherwise, the password will be validated and allow the auth process to '
'be short-circuited',
'type': 'string',
'required': False,
},
},
}
LOGIN_VIEWFINDER_REQUEST = deepcopy(AUTH_VIEWFINDER_REQUEST)
_CopyProperties(target_dict=LOGIN_VIEWFINDER_REQUEST['properties']['auth_info'],
source_dict=LOGIN_VIEWFINDER_PROPERTIES)
# Used by /merge_token/viewfinder
MERGE_TOKEN_REQUEST = {
'description': 'sends an access token which proves control of an identity, and which will '
'be passed to /service/merge_accounts',
'type': 'object',
'properties': {
'headers': HEADERS,
'identity': {
'description': 'e.g. Email:spencer.kimball.gmail.com | Phone:+16464174337',
'type': 'string',
},
'error_if_linked': {
'description': 'reports ALREADY_LINKED error if the identity is already linked to a user '
'account',
'type': 'boolean', 'required': False,
},
},
}
# Used by /register/viewfinder.
REGISTER_VIEWFINDER_PROPERTIES = {
'description': 'additional auth_info properties used only in user registration',
'type': 'object',
'properties': {
'password': {
'description': 'user password to set as part of registration',
'type': 'string',
'required': False,
},
'name': {
'description': 'full user name',
'type': 'string',
'dependencies': 'given_name',
},
'given_name': {
'description': 'user\'s given name (i.e. first name)',
'type': 'string',
'required': False,
'dependencies': 'name',
},
'family_name': {
'description': 'user\'s family name (i.e. last name)',
'type': 'string',
'required': False,
'dependencies': 'name',
},
},
}
REGISTER_VIEWFINDER_REQUEST = deepcopy(AUTH_VIEWFINDER_REQUEST)
_CopyProperties(target_dict=REGISTER_VIEWFINDER_REQUEST['properties']['auth_info'],
source_dict=REGISTER_VIEWFINDER_PROPERTIES)
# Used by /verify/viewfinder.
# Verifies access code and complete the auth operation that was started by a call to /<auth>/viewfinder.
VERIFY_VIEWFINDER_REQUEST = deepcopy(AUTH_REQUEST)
_CopyProperties(target_dict=VERIFY_VIEWFINDER_REQUEST, source_dict=CONFIRMED_IDENTITY)
# Used by auth.html.
CONFIRM_PASSWORD_REQUEST = {
'description': 'confirm user password before completing user registration',
'type': 'object',
'properties': {
'headers': HEADERS,
'password': {
'description': 'password which will be checked against the password that was supplied '
'during user registration',
'type': 'string',
},
},
}
CONFIRM_PASSWORD_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
##
# SERVICE METHODS
##
# Add followers to an existing viewpoint.
#
# /service/add_followers
ADD_FOLLOWERS_REQUEST = {
'description': 'add resolved contacts as followers of an existing '
'viewpoint',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'activity': CREATE_ACTIVITY_METADATA,
'viewpoint_id': {
'description': 'id of the viewpoint to which to add followers',
'type': 'string',
},
'contacts': FOLLOWER_CONTACTS_METADATA,
},
}
ADD_FOLLOWERS_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Allocate unique ids for use when uploading photos or creating episodes.
#
# /service/allocate_ids
ALLOCATE_IDS_REQUEST = {
'description': 'allocate unique ids for the requesting user',
'type': 'object',
'properties': {
'headers': HEADERS,
'asset_types': {
'description': 'An array of single-character prefixes describing the asset ids to be '
'generated. Assets can be of mixed type - for example, you may request an operation '
'id and an activity id in a single request by passing the array ["o", "a"].',
'type': 'array',
'items': {
'type' : 'string'
}
},
},
}
ALLOCATE_IDS_RESPONSE = {
'description': 'returns the first id in a pre-allocated block',
'type': 'object',
'properties': {
'headers': HEADERS,
'asset_ids': {
'description': 'An array of asset ids generated by the server. Ids are returned in the '
'same order they appeared in the "asset_types" array of the request.',
'type': 'array',
'items': {
'type' : 'string'
}
},
'timestamp': {
'description': 'The timestamp used by the server to generate ids which require a '
'timestamp component.',
'type': 'number',
}
},
}
# Build archive of a users photos/conversations/etc.
#
# /service/build_archive
BUILD_ARCHIVE_REQUEST = {
'description': 'build archive of photos/comments/etc for requesting user',
'type': 'object',
'properties': {
'headers': HEADERS,
'email': {'type': 'string'},
},
}
BUILD_ARCHIVE_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Fetch calendar(s) for user.
#
# /service/get_calendar
GET_CALENDAR_REQUEST = {
'description': 'fetch calendar(s) for user; specify "holidays" for locale-specific '
'holidays. Will default to en_US if the user\'s locale is not known.',
'type': 'object',
'properties': {
'headers': HEADERS,
'calendars': {
'description': 'calendar IDs; specify none for locale-specific holiday calendar',
'type': 'array',
'items': {
'type': 'object',
'properties': {
'calendar_id': {'type': 'string'},
'year': {'type': 'number'},
},
},
},
},
}
GET_CALENDAR_RESPONSE = {
'description': 'returns a list of events corresponding to each calendar and year',
'type': 'object',
'properties': {
'headers': HEADERS,
'calendars': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'calendar_id': {'type': 'string'},
'year': {'type': 'number'},
'events': {
'description': 'calendar events by name and date; dates are unix time in UTC',
'type': 'array',
'items': {
'type': 'object',
'properties': {
'name': {'type': 'string'},
'dtstart': {'type': 'number'},
'dtend': {'type': 'number'},
},
},
},
},
},
},
},
}
# Hide photos from user's personal library and inbox view.
#
# /service/hide_photos
HIDE_PHOTOS_REQUEST = {
'description': 'hide a list of posts by id from the user\'s personal '
'library or inbox view by labeling them as hidden for that user',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'episodes': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'episode_id': {
'description': 'id of the episode containing photos to hide '
'from the user\'s personal library or inbox view',
'type': 'string',
},
'photo_ids': {
'description': 'ids of photos to hide from the user\'s '
'personal library or inbox view',
'type': 'array',
'items': {'type': 'string'},
},
},
},
},
},
}
HIDE_PHOTOS_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# List all user identities.
#
# /service/list_identities
LIST_IDENTITIES_REQUEST = {
'description': 'list all identities linked to this account',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
LIST_IDENTITIES_RESPONSE = {
'description': 'the list of all linked identities',
'type': 'object',
'properties': {
'headers': HEADERS,
'identities': {
'type': 'array',
'items': IDENTITY_METADATA
},
},
}
# Merge one user account with another.
#
# /service/merge_accounts
MERGE_ACCOUNTS_REQUEST = {
'description': 'merge assets from another user account or identity into the account of the '
'current user',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'activity': CREATE_ACTIVITY_METADATA,
'source_user_cookie': {
'description': 'user cookie for the account from which to merge; this cookie can be '
'obtained by calling the /login handler and getting the contents of the "user" HTTP '
'cookie that it returns; this cookie must be confirmed, meaning that it cannot have '
'been created from a password; either this field or the source_identity need to be '
'specified',
'type': 'string', 'required': False,
},
'source_identity': {
'description': 'confirmed identity linked to the account from which to merge; it is also '
'possible for the identity to not be linked to any account, in which case it is simply '
'linked to the target account',
'type': 'object', 'required': False,
'properties': { },
},
},
}
_CopyProperties(target_dict=MERGE_ACCOUNTS_REQUEST['properties']['source_identity'],
source_dict=CONFIRMED_IDENTITY)
MERGE_ACCOUNTS_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Get a new client log upload URL from the server.
#
# /service/new_client_log_url
NEW_CLIENT_LOG_URL_REQUEST = {
'description': 'fetches an S3 PUT url for writing client device log '
'to server for debugging',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'timestamp': {'type': 'number'},
'client_log_id': {
'description': 'an arbitrary client identifier for the log; must be '
'unique across all calls made by the device',
'type': 'string'
},
'content_type': {
'description': 'optionally specify an alternate content-type for the '
'client log. By default, uses application/octet to support old clients '
'which incorrectly specify this. TODO(spencer): change default to octet-stream.',
'type': 'string',
'required': False
},
'content_md5': {'type': 'string', 'required': False},
'num_bytes': {'type': 'number', 'required': False},
},
}
NEW_CLIENT_LOG_URL_RESPONSE = {
'type': 'object',
'properties': {
'headers': HEADERS,
'client_log_put_url': {
'description': 'pre-authorized url for client log; '
'URL expires in 24 hours',
'type': 'string'
},
},
}
# Ping request. Unauthenticated request, periodically issued by the client.
# The response may contain an informative message (eg: new version available).
# Since the request does not require the user to be signed in, it is not handled by service.py
#
# /ping
PING_REQUEST = {
'description': 'device ping',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'device': DEVICE_METADATA,
},
}
PING_RESPONSE = {
'description': 'ping response',
'type': 'object',
'properties': {
'message': PING_RESPONSE_MESSAGE,
},
}
# Add new comment to the viewpoint.
#
# /service/post_comment
POST_COMMENT_REQUEST = {
'description': 'adds a new comment to the viewpoint, optionally attached to '
'another asset in the same viewpoint (such as a photo or another comment)',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'activity': CREATE_ACTIVITY_METADATA,
},
}
_CopyProperties(target_dict=POST_COMMENT_REQUEST, source_dict=POST_COMMENT_METADATA)
POST_COMMENT_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
}
}
# Fetch user contact list.
#
# /service/query_contacts
QUERY_CONTACTS_REQUEST = {
'description': 'fetch (name, identity, rank) contact tuples; specify '
'start_key to begin querying where a previous invocation left off',
'type': 'object',
'properties': {
'headers': HEADERS,
'start_key': {'type': 'string', 'required': False},
'limit': {'type': 'number', 'required': False},
},
}
QUERY_CONTACTS_RESPONSE = {
'description': 'returns a list of contacts',
'type': 'object',
'properties': {
'headers': HEADERS,
'num_contacts': {'type': 'number'},
'contacts': {
'type': 'array',
'items': QUERY_CONTACT_METADATA,
},
'last_key': {
'description': 'the last fetched contact sort key; supply this value to '
'the next invocation of query_contacts to continue scan',
'type': 'string', 'required': False,
},
}
}
# Query photos (posts) from episodes.
#
# /service/query_episodes
QUERY_EPISODES_REQUEST = {
'description': 'query photo metadata and associated post information '
'from specified episodes',
'type': 'object',
'properties': {
'headers': HEADERS,
'episodes': EPISODE_SELECTION,
'photo_limit': {
'description': 'maximum number of photos to query from each episode id',
'type': 'number', 'required': False,
},
},
}
QUERY_EPISODES_RESPONSE = {
'description': 'a list of photos from each requested episode. The photo metadata '
'is augmented by associated post information',
'type': 'object',
'properties': {
'headers': HEADERS,
'episodes': {
'description': 'episode query responses',
'type': 'array',
'items': {
'type': 'object',
'properties': {
'photos': {
'description': 'post + photo metadata',
'type': 'array', 'required': False,
'items': POST_PHOTO_METADATA,
},
'last_key': {
'description': 'the last-processed photo in the episode; supply with'
'next invocation of QUERY_EPISODES to continue',
'type': 'string', 'required': False,
},
},
},
},
},
}
_CopyProperties(target_dict=QUERY_EPISODES_RESPONSE['properties']['episodes']['items'],
source_dict=EPISODE_METADATA)
# If get_attributes=False, only episode_id will be returned; all other properties are optional.
_MakeOptional(QUERY_EPISODES_RESPONSE['properties']['episodes']['items']['properties'],
lambda key: key != 'episode_id')
# Query for viewpoints that are followed by the calling user.
#
# /service/query_followed
QUERY_FOLLOWED_REQUEST = {
'description': 'query metadata of viewpoints that are followed by calling user',
'type': 'object',
'properties': {
'headers': HEADERS,
'start_key': {'type': 'string', 'required': False},
'limit': {'type': 'number', 'required': False},
},
}
QUERY_FOLLOWED_RESPONSE = {
'description': 'a list of viewpoints that are followed by calling user',
'type': 'object',
'properties': {
'headers': HEADERS,
'viewpoints': {
'description': 'viewpoint query responses',
'type': 'array',
'items': VIEWPOINT_METADATA,
},
'last_key': {
'description': 'the last-processed followed viewpoint; supply with '
'next invocation of QUERY_FOLLOWED to continue; values can be sorted '
'lexicographically',
'type': 'string', 'required': False,
},
},
}
# Query notifications.
#
# /service/query_notifications
QUERY_NOTIFICATIONS_REQUEST = {
'description': 'poll list of pending notifications for the user',
'type': 'object',
'properties': {
'headers': HEADERS,
'scan_forward': {
'description': 'if true or not specified, then notifications are '
'queried in forward (ascending) order; otherwise, they are queried '
'in reverse order',
'type': 'boolean', 'required': False,
},
'start_key': {
'description': 'clients should supply the last_key returned with the '
'response to a prior invocation to get subsequent notifications',
'type': 'string', 'required': False,
},
'limit': {
'description': 'maximum number of notifications to return',
'type': 'number', 'required': False,
},
'max_long_poll': {
'description': 'maximum duration for long-polling requests (in seconds)',
'type': 'number', 'required': False,
},
},
}
QUERY_NOTIFICATIONS_RESPONSE = {
'description': 'Notifications pending for user',
'type': 'object',
'properties': {
'headers': HEADERS,
'notifications': {
'description': 'an array of notification objects',
'type': 'array',
'items': NOTIFICATION,
},
'last_key': {
'description': 'the last-processed notification key; supply this value with the '
'next invocation of QUERY_NOTIFICATIONS to continue; if not supplied, no '
'notifications were available; values can be sorted lexicographically',
'type': 'string', 'required': False
},
'retry_after': {
'description': 'advisory request from the server to wait before issuing another background query_notifications.',
'type': 'number', 'required': False
},
},
}
# Query user metadata by user id.
#
# /service/query_users
QUERY_USERS_REQUEST = {
'description': 'query user metadata by user ids; only users which consider the caller a '
'friend will provide profile info; in this case, a "friend" label is returned',
'type': 'object',
'properties': {
'headers': HEADERS,
'user_ids': USER_SELECTION,
},
}
QUERY_USERS_RESPONSE = {
'description': 'user metadata for each valid, supplied user id; if a user id was supplied '
'with the request, but not returned with the response, then the user does not exist',
'type': 'object',
'properties': {
'headers': HEADERS,
'users': {
'type': 'array',
'items': USER_PROFILE_METADATA,
},
},
}
# Query episodes in specified viewpoints.
#
# /service/query_viewpoints
QUERY_VIEWPOINTS_REQUEST = {
'description': 'query viewpoint and episode metadata from specified '
'viewpoints',
'type': 'object',
'properties': {
'headers': HEADERS,
'viewpoints': VIEWPOINT_SELECTION,
'limit': {
'description': 'maximum number of items to return in each episode, '
'follower, or activity collection in the response',
'type': 'number', 'required': False,
},
},
}
QUERY_VIEWPOINTS_RESPONSE = {
'description': 'a list of episodes from each requested viewpoint. The episode metadata '
'is augmented by associated member information',
'type': 'object',
'properties': {
'headers': HEADERS,
'viewpoints': {
'description': 'viewpoint query responses',
'type': 'array',
'items': {
'type': 'object',
'properties': {
'followers': {
'description': 'ids of users following the viewpoint',
'type': 'array', 'required': False,
'items': FRIEND_FOLLOWER_METADATA,
},
'follower_last_key': {
'description': 'the last-processed follower in the viewpoint; supply with'
'next invocation of QUERY_VIEWPOINTS to continue',
'type': 'string', 'required': False,
},
'activities': {
'description': 'viewpoint activity metadata',
'type': 'array', 'required': False,
'items': ACTIVITY_METADATA,
},
'activity_last_key': {
'description': 'the last-processed activity in the viewpoint; supply with'
'next invocation of QUERY_VIEWPOINTS to continue',
'type': 'string', 'required': False,
},
'episodes': {
'description': 'episode + member metadata',
'type': 'array', 'required': False,
'items': EPISODE_METADATA,
},
'episode_last_key': {
'description': 'the last-processed episode in the viewpoint; supply with'
'next invocation of QUERY_VIEWPOINTS to continue',
'type': 'string', 'required': False,
},
'comments': {
'description': 'comment + member metadata',
'type': 'array', 'required': False,
'items': COMMENT_METADATA,
},
'comment_last_key': {
'description': 'the last-processed comment in the viewpoint; supply with'
'next invocation of QUERY_VIEWPOINTS to continue',
'type': 'string', 'required': False,
},
},
},
},
},
}
_CopyProperties(target_dict=QUERY_VIEWPOINTS_RESPONSE['properties']['viewpoints']['items'],
source_dict=VIEWPOINT_METADATA)
# If get_attributes=False, only viewpoint_id will be returned; all other properties are optional.
_MakeOptional(QUERY_VIEWPOINTS_RESPONSE['properties']['viewpoints']['items']['properties'],
lambda key: key != 'viewpoint_id')
# Records an external (iTunes in-app purchase) subscription.
#
# /service/record_subscription
RECORD_SUBSCRIPTION_REQUEST = {
'description': 'records an external subscription',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'receipt_data': {
'description': 'base64-encoded itunes receipt data',
'type': 'string',
},
},
}
RECORD_SUBSCRIPTION_RESPONSE = {
'description': 'returns information from the decoded subscription',
'type': 'object',
'properties': {
'headers': HEADERS,
'subscription': SUBSCRIPTION_METADATA,
},
}
# Remove contacts from user contacts list.
#
# /service/remove_contacts
REMOVE_CONTACTS_REQUEST = {
'description': '',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'contacts': {
'description': 'list of contact_ids. Only AddressBook and Manual contact_ids may be removed though this API',
'type': 'array',
'uniqueItems': True,
'items': {'type': 'string'},
},
},
}
REMOVE_CONTACTS_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Remove followers from an existing viewpoint.
#
# /service/remove_followers
REMOVE_FOLLOWERS_REQUEST = {
'description': 'remove followers from an existing viewpoint',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'activity': CREATE_ACTIVITY_METADATA,
'viewpoint_id': {
'description': 'id of the viewpoint from which to remove followers',
'type': 'string',
},
'remove_ids': {
'description': 'ids of followers to remove from the viewpoint; if a follower does not '
'not exist on the viewpoint, it is ignored',
'type': 'array',
'items': {'type': 'integer'},
},
},
}
REMOVE_FOLLOWERS_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Remove photos from user's personal collection.
#
# /service/remove_photos
REMOVE_PHOTOS_REQUEST = {
'description': 'remove a list of photos by id from the user\'s personal '
'collection by labeling them as removed for that user',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'episodes': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'episode_id': {
'description': 'id of the episode containing photos to remove '
'from the user\'s personal collection',
'type': 'string',
},
'photo_ids': {
'description': 'ids of photos to remove from the user\'s '
'personal collection',
'type': 'array',
'items': {'type': 'string'},
},
},
},
},
},
}
REMOVE_PHOTOS_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Remove viewpoint from a user's inbox.
#
# /service/remove_viewpoint
REMOVE_VIEWPOINT_REQUEST = {
'description': 'remove viewpoint from a user\'s inbox',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'viewpoint_id': {
'description': 'id of viewpoint to be removed from user\'s inbox',
'type': 'string',
},
},
}
REMOVE_VIEWPOINT_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Resolve contacts to fill in missing information.
#
# Currently only supports retrieving metadata for email-based identities.
# Used to enable sharing to email addresses that are not currently in
# the user's contacts.
#
# /service/resolve_contacts
RESOLVE_CONTACTS_REQUEST = {
'description': 'resolve identities to contact metadata; each input identity '
'should begin with "Email:"',
'type': 'object',
'properties': {
'headers': HEADERS,
'identities': {
'type': 'array',
'items': {'type': 'string'},
},
},
}
RESOLVE_CONTACTS_RESPONSE = {
'description': 'returns a list of resolved contacts in the same order as the '
'request; if the identity matched an existing user the user_id will be '
'filled in',
'type': 'object',
'properties': {
'headers': HEADERS,
'contacts': {
'description': ('list of resolved contacts, in the same '
'order as the request. Email identities for existing '
'users will have user_id and other fields filled in; '
'otherwise only the identity field will be present.'),
'type': 'array',
'items': {
'description': 'partial user metadata',
'type': 'object',
'properties': {
'user_id': {'type': 'number', 'required': False},
'identity': {'type': 'string', 'required': False},
'name': {'type': 'string', 'required': False},
'given_name': {'type': 'string', 'required': False},
'family_name': {'type': 'string', 'required': False},
'labels': {
'description': ('set of boolean modifiers affecting the user (e.g. "registered"). '
'The "friend" label and any data requiring friend status will not be '
'returned by this method.'),
'type': 'array', 'required': False, 'items': {'type': 'string'},
},
}
}
},
}
}
# Save photos to default viewpoint.
#
# /service/save_photos
SAVE_PHOTOS_REQUEST = {
'description': 'save photos from existing episodes to new episodes in the current user\'s '
'default viewpoint',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'activity': CREATE_ACTIVITY_METADATA,
'viewpoint_ids': {
'description': 'server saves all episodes contained within these viewpoints; this is '
'in addition to episodes that may be given in the "episodes" field; if an episode is '
'specified in the "episodes" field, it is assumed to be complete and is skipped when '
'scanning the viewpoint',
'type': 'array',
'required': False,
'items': {'type': 'string'},
},
'episodes': OPTIONAL_COPY_EPISODES_METADATA,
},
}
SAVE_PHOTOS_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Share photos with the followers of an existing viewpoint.
#
# /service/share_existing
SHARE_EXISTING_REQUEST = {
'description': 'share episodes with the followers of an existing '
'viewpoint',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'activity': CREATE_ACTIVITY_METADATA,
'viewpoint_id': {'type': 'string'},
'episodes': COPY_EPISODES_METADATA,
},
}
SHARE_EXISTING_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Share photos with contacts in a new viewpoint.
#
# /service/share_new
SHARE_NEW_REQUEST = {
'description': 'share photos from existing episodes into a new viewpoint, '
'with the resolved contacts as followers',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'activity': CREATE_ACTIVITY_METADATA,
'viewpoint': CREATE_VIEWPOINT_METADATA,
'episodes': COPY_EPISODES_METADATA,
'contacts': FOLLOWER_CONTACTS_METADATA,
},
}
SHARE_NEW_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Terminate user account.
#
# /service/terminate_account
TERMINATE_ACCOUNT_REQUEST = {
'description': 'terminate a user account -- unlink all identities, mute '
'all alerts, disable all sharing',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
},
}
TERMINATE_ACCOUNT_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Unlink existing identity.
#
# /sevice/unlink_identity
UNLINK_IDENTITY_REQUEST = {
'description': 'unlink an identity from an account; succeeds if the specified identity '
'is in fact linked and if it is not the last identity authenticated via trusted authority',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'identity': {
'description': 'e.g. Email:spencer.kimball.gmail.com | Phone:6464174337 | FacebookGraph:62052443',
'type': 'string',
},
},
}
UNLINK_IDENTITY_RESPONSE = {
'description': 'empty response; on error, the standard error response',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Unshare photos from a viewpoint.
#
# /service/unshare
UNSHARE_REQUEST = {
'description': 'unshares photos from episodes in a viewpoint; also '
'recursively unshares from all derived episodes',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'activity': CREATE_ACTIVITY_METADATA,
'viewpoint_id': {'type': 'string'},
'episodes': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'episode_id': {
'description': 'id of the episode containing photos to unshare '
'from the viewpoint',
'type': 'string',
},
'photo_ids': {
'description': 'ids of photos to unshare from the viewpoint',
'type': 'array',
'items': {'type': 'string'},
},
},
},
},
},
}
UNSHARE_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Updates device information including last_access time. Returns a
# newly encrypted user cookie. This is necessary in the case of a
# device that isn't used often and has failed multiple push
# notifications and had its push_token reset.
#
# /service/update_device
UPDATE_DEVICE_REQUEST = {
'description': 'update device information',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'device_dict': DEVICE_METADATA,
},
}
UPDATE_DEVICE_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
}
}
# Per-user photo metadata update.
#
# /service/update_user_photo
UPDATE_USER_PHOTO_REQUEST = {
'description': 'updates the per-user metadata of an existing photo',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
},
}
_CopyProperties(target_dict=UPDATE_USER_PHOTO_REQUEST, source_dict=USER_PHOTO_METADATA)
UPDATE_USER_PHOTO_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Episode metadata update.
#
# /service/update_episode
UPDATE_EPISODE_REQUEST = {
'description': 'updates the metadata of an existing episode',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'activity': CREATE_ACTIVITY_METADATA,
},
}
_CopyProperties(target_dict=UPDATE_EPISODE_REQUEST, source_dict=UPDATE_EPISODE_METADATA)
UPDATE_EPISODE_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Follower metadata update.
#
# /service/update_follower
UPDATE_FOLLOWER_REQUEST = {
'description': 'updates the metadata of an existing follower',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'follower': UPDATE_FOLLOWER_METADATA,
},
}
UPDATE_FOLLOWER_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Friend metadata update.
#
# /service/update_friend
UPDATE_FRIEND_REQUEST = {
'description': 'updates the metadata of a friend; updates only affect the view of the calling '
'user',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'friend': FRIEND_METADATA,
},
}
UPDATE_FRIEND_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Photo metadata update.
#
# /service/update_photo
UPDATE_PHOTO_REQUEST = {
'description': 'updates the metadata of an existing photo',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'activity': CREATE_ACTIVITY_METADATA,
},
}
_CopyProperties(target_dict=UPDATE_PHOTO_REQUEST, source_dict=UPDATE_PHOTO_METADATA)
UPDATE_PHOTO_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# User metadata update.
#
# /service/update_user
UPDATE_USER_REQUEST = {
'description': 'updates the metadata of an existing user',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'account_settings': UPDATE_ACCOUNT_SETTINGS_METADATA,
'password': {
'description': 'new user password; this field can only be set if using a recently '
'confirmed user cookie, or if the old_password matches, or if no user password has '
'yet been set',
'type': 'string', 'required': False,
},
'old_password': {
'description': 'if this matches the old password, then a new password can be set '
'without needing a confirmed user cookie',
'type': 'string', 'required': False,
},
},
}
_CopyProperties(target_dict=UPDATE_USER_REQUEST, source_dict=UPDATE_USER_PROFILE_METADATA)
UPDATE_USER_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Viewpoint metadata update.
#
# /service/update_viewpoint
UPDATE_VIEWPOINT_REQUEST = {
'description': 'updates the metadata of an existing viewpoint',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'activity': CREATE_ACTIVITY_METADATA,
},
}
_CopyProperties(target_dict=UPDATE_VIEWPOINT_REQUEST, source_dict=UPDATE_VIEWPOINT_METADATA)
_CopyProperties(target_dict=UPDATE_VIEWPOINT_REQUEST, source_dict=UPDATE_FOLLOWER_METADATA)
UPDATE_VIEWPOINT_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Upload user contact list.
#
# /service/upload_contacts
UPLOAD_CONTACTS_REQUEST = {
'description': 'upload contact tuples',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'contacts': {
'type': 'array',
'maxItems': 50,
'items': UPLOAD_CONTACT_METADATA,
},
},
}
UPLOAD_CONTACTS_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
'contact_ids': {
'description': 'list of server computed contact_ids generated from '
'the list of contacts in the upload_contacts request',
'type': 'array',
'items': {'type': 'string'},
},
}
}
# Upload photo and episode metadata to service.
#
# /service/upload_episode
UPLOAD_EPISODE_REQUEST = {
'description': 'episode id, optional metadata, and list of photos',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'activity': CREATE_ACTIVITY_METADATA,
'episode': UPLOAD_EPISODE_METADATA,
'photos': {
'description': 'list of photos in episode',
'type': 'array',
'items': UPLOAD_PHOTO_METADATA,
},
},
}
UPLOAD_EPISODE_RESPONSE = {
'description': 'returns episode id and list of photo ids, one per metadata upload',
'type': 'object',
'properties': {
'headers': HEADERS,
'photos': {
'description': 'photo info for each metadata upload',
'type': 'array',
'items': {
'type': 'object',
'properties': {
'photo_id': {
'description': 'server-assigned base64hex-encoded photo id',
'type': 'string'
},
'orig_put_url': {
'description': 'pre-authorized url for original resolution image file upload; '
'URL expires in 24 hours',
'type': 'string'
},
'full_put_url': {
'description': 'pre-authorized url for full-screen resolution image file upload; '
'URL expires in 24 hours',
'type': 'string'
},
'med_put_url': {
'description': 'pre-authorized url for medium resolution image file upload; '
'URL expires in 24 hours',
'type': 'string'
},
'tn_put_url': {
'description': 'pre-authorized url for thumbnail resolution image file upload; '
'URL expires in 24 hours',
'type': 'string'
},
},
},
},
},
}
| apache-2.0 |
pchaigno/grr | lib/data_stores/sqlite_data_store_test.py | 1 | 1333 | #!/usr/bin/env python
"""Tests the SQLite data store."""
import shutil
from grr.lib import access_control
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import data_store_test
from grr.lib import flags
from grr.lib import test_lib
from grr.lib import utils
from grr.lib.data_stores import sqlite_data_store
# pylint: mode=test
class SqliteTestMixin(object):
def InitDatastore(self):
self.token = access_control.ACLToken(username="test",
reason="Running tests")
self.root_path = utils.SmartStr("%s/sqlite_test/" % self.temp_dir)
config_lib.CONFIG.Set("Datastore.location", self.root_path)
self.DestroyDatastore()
data_store.DB = sqlite_data_store.SqliteDataStore()
data_store.DB.security_manager = test_lib.MockSecurityManager()
def testCorrectDataStore(self):
self.assertTrue(isinstance(data_store.DB,
sqlite_data_store.SqliteDataStore))
def DestroyDatastore(self):
try:
if self.root_path:
shutil.rmtree(self.root_path)
except (OSError, IOError):
pass
class SqliteDataStoreTest(SqliteTestMixin, data_store_test._DataStoreTest):
"""Test the sqlite data store."""
def main(args):
test_lib.main(args)
if __name__ == "__main__":
flags.StartMain(main)
| apache-2.0 |
solidgoldbomb/letsencrypt | letsencrypt/plugins/standalone/tests/authenticator_test.py | 8 | 27875 | """Tests for letsencrypt.plugins.standalone.authenticator."""
import os
import psutil
import signal
import socket
import unittest
import mock
import OpenSSL
from acme import challenges
from acme import jose
from letsencrypt import achallenges
from letsencrypt.tests import acme_util
from letsencrypt.tests import test_util
ACCOUNT_KEY = jose.JWKRSA.load(test_util.load_vector("rsa512_key.pem"))
CHALL_KEY_PEM = test_util.load_vector("rsa512_key_2.pem")
CHALL_KEY = OpenSSL.crypto.load_privatekey(
OpenSSL.crypto.FILETYPE_PEM, CHALL_KEY_PEM)
CONFIG = mock.Mock(dvsni_port=5001)
# Classes based on to allow interrupting infinite loop under test
# after one iteration, based on.
# http://igorsobreira.com/2013/03/17/testing-infinite-loops.html
class _SocketAcceptOnlyNTimes(object):
# pylint: disable=too-few-public-methods
"""
Callable that will raise `CallableExhausted`
exception after `limit` calls, modified to also return
a tuple simulating the return values of a socket.accept()
call
"""
def __init__(self, limit):
self.limit = limit
self.calls = 0
def __call__(self):
self.calls += 1
if self.calls > self.limit:
raise CallableExhausted
# Modified here for a single use as socket.accept()
return (mock.MagicMock(), "ignored")
class CallableExhausted(Exception):
# pylint: disable=too-few-public-methods
"""Exception raised when a method is called more than the
specified number of times."""
class ChallPrefTest(unittest.TestCase):
"""Tests for chall_pref() method."""
def setUp(self):
from letsencrypt.plugins.standalone.authenticator import \
StandaloneAuthenticator
self.authenticator = StandaloneAuthenticator(config=CONFIG, name=None)
def test_chall_pref(self):
self.assertEqual(self.authenticator.get_chall_pref("example.com"),
[challenges.DVSNI])
class SNICallbackTest(unittest.TestCase):
"""Tests for sni_callback() method."""
def setUp(self):
from letsencrypt.plugins.standalone.authenticator import \
StandaloneAuthenticator
self.authenticator = StandaloneAuthenticator(config=CONFIG, name=None)
self.cert = achallenges.DVSNI(
challb=acme_util.DVSNI_P,
domain="example.com",
account_key=ACCOUNT_KEY
).gen_cert_and_response(key_pem=CHALL_KEY_PEM)[1]
self.authenticator.private_key = CHALL_KEY
self.authenticator.sni_names = {"abcdef.acme.invalid": self.cert}
self.authenticator.child_pid = 12345
def test_real_servername(self):
connection = mock.MagicMock()
connection.get_servername.return_value = "abcdef.acme.invalid"
self.authenticator.sni_callback(connection)
self.assertEqual(connection.set_context.call_count, 1)
called_ctx = connection.set_context.call_args[0][0]
self.assertTrue(isinstance(called_ctx, OpenSSL.SSL.Context))
def test_fake_servername(self):
"""Test behavior of SNI callback when an unexpected name is received.
(Currently the expected behavior in this case is to return the
"first" certificate with which the listener was configured,
although they are stored in an unordered data structure so
this might not be the one that was first in the challenge list
passed to the perform method. In the future, this might result
in dropping the connection instead.)"""
connection = mock.MagicMock()
connection.get_servername.return_value = "example.com"
self.authenticator.sni_callback(connection)
self.assertEqual(connection.set_context.call_count, 1)
called_ctx = connection.set_context.call_args[0][0]
self.assertTrue(isinstance(called_ctx, OpenSSL.SSL.Context))
class ClientSignalHandlerTest(unittest.TestCase):
"""Tests for client_signal_handler() method."""
def setUp(self):
from letsencrypt.plugins.standalone.authenticator import \
StandaloneAuthenticator
self.authenticator = StandaloneAuthenticator(config=CONFIG, name=None)
self.authenticator.tasks = {"footoken.acme.invalid": "stuff"}
self.authenticator.child_pid = 12345
def test_client_signal_handler(self):
self.assertTrue(self.authenticator.subproc_state is None)
self.authenticator.client_signal_handler(signal.SIGIO, None)
self.assertEqual(self.authenticator.subproc_state, "ready")
self.authenticator.client_signal_handler(signal.SIGUSR1, None)
self.assertEqual(self.authenticator.subproc_state, "inuse")
self.authenticator.client_signal_handler(signal.SIGUSR2, None)
self.assertEqual(self.authenticator.subproc_state, "cantbind")
# Testing the unreached path for a signal other than these
# specified (which can't occur in normal use because this
# function is only set as a signal handler for the above three
# signals).
self.assertRaises(
ValueError, self.authenticator.client_signal_handler,
signal.SIGPIPE, None)
class SubprocSignalHandlerTest(unittest.TestCase):
"""Tests for subproc_signal_handler() method."""
def setUp(self):
from letsencrypt.plugins.standalone.authenticator import \
StandaloneAuthenticator
self.authenticator = StandaloneAuthenticator(config=CONFIG, name=None)
self.authenticator.tasks = {"footoken.acme.invalid": "stuff"}
self.authenticator.child_pid = 12345
self.authenticator.parent_pid = 23456
@mock.patch("letsencrypt.plugins.standalone.authenticator.os.kill")
@mock.patch("letsencrypt.plugins.standalone.authenticator.sys.exit")
def test_subproc_signal_handler(self, mock_exit, mock_kill):
self.authenticator.ssl_conn = mock.MagicMock()
self.authenticator.connection = mock.MagicMock()
self.authenticator.sock = mock.MagicMock()
self.authenticator.subproc_signal_handler(signal.SIGINT, None)
self.assertEquals(self.authenticator.ssl_conn.shutdown.call_count, 1)
self.assertEquals(self.authenticator.ssl_conn.close.call_count, 1)
self.assertEquals(self.authenticator.connection.close.call_count, 1)
self.assertEquals(self.authenticator.sock.close.call_count, 1)
mock_kill.assert_called_once_with(
self.authenticator.parent_pid, signal.SIGUSR1)
mock_exit.assert_called_once_with(0)
@mock.patch("letsencrypt.plugins.standalone.authenticator.os.kill")
@mock.patch("letsencrypt.plugins.standalone.authenticator.sys.exit")
def test_subproc_signal_handler_trouble(self, mock_exit, mock_kill):
"""Test attempting to shut down a non-existent connection.
(This could occur because none was established or active at the
time the signal handler tried to perform the cleanup)."""
self.authenticator.ssl_conn = mock.MagicMock()
self.authenticator.connection = mock.MagicMock()
self.authenticator.sock = mock.MagicMock()
# AttributeError simulates the case where one of these properties
# is None because no connection exists. We raise it for
# ssl_conn.close() instead of ssl_conn.shutdown() for better code
# coverage.
self.authenticator.ssl_conn.close.side_effect = AttributeError("!")
self.authenticator.connection.close.side_effect = AttributeError("!")
self.authenticator.sock.close.side_effect = AttributeError("!")
self.authenticator.subproc_signal_handler(signal.SIGINT, None)
self.assertEquals(self.authenticator.ssl_conn.shutdown.call_count, 1)
self.assertEquals(self.authenticator.ssl_conn.close.call_count, 1)
self.assertEquals(self.authenticator.connection.close.call_count, 1)
self.assertEquals(self.authenticator.sock.close.call_count, 1)
mock_kill.assert_called_once_with(
self.authenticator.parent_pid, signal.SIGUSR1)
mock_exit.assert_called_once_with(0)
class AlreadyListeningTest(unittest.TestCase):
"""Tests for already_listening() method."""
def setUp(self):
from letsencrypt.plugins.standalone.authenticator import \
StandaloneAuthenticator
self.authenticator = StandaloneAuthenticator(config=CONFIG, name=None)
@mock.patch("letsencrypt.plugins.standalone.authenticator.psutil."
"net_connections")
@mock.patch("letsencrypt.plugins.standalone.authenticator.psutil.Process")
@mock.patch("letsencrypt.plugins.standalone.authenticator."
"zope.component.getUtility")
def test_race_condition(self, mock_get_utility, mock_process, mock_net):
# This tests a race condition, or permission problem, or OS
# incompatibility in which, for some reason, no process name can be
# found to match the identified listening PID.
from psutil._common import sconn
conns = [
sconn(fd=-1, family=2, type=1, laddr=("0.0.0.0", 30),
raddr=(), status="LISTEN", pid=None),
sconn(fd=3, family=2, type=1, laddr=("192.168.5.10", 32783),
raddr=("20.40.60.80", 22), status="ESTABLISHED", pid=1234),
sconn(fd=-1, family=10, type=1, laddr=("::1", 54321),
raddr=("::1", 111), status="CLOSE_WAIT", pid=None),
sconn(fd=3, family=2, type=1, laddr=("0.0.0.0", 17),
raddr=(), status="LISTEN", pid=4416)]
mock_net.return_value = conns
mock_process.side_effect = psutil.NoSuchProcess("No such PID")
# We simulate being unable to find the process name of PID 4416,
# which results in returning False.
self.assertFalse(self.authenticator.already_listening(17))
self.assertEqual(mock_get_utility.generic_notification.call_count, 0)
mock_process.assert_called_once_with(4416)
@mock.patch("letsencrypt.plugins.standalone.authenticator.psutil."
"net_connections")
@mock.patch("letsencrypt.plugins.standalone.authenticator.psutil.Process")
@mock.patch("letsencrypt.plugins.standalone.authenticator."
"zope.component.getUtility")
def test_not_listening(self, mock_get_utility, mock_process, mock_net):
from psutil._common import sconn
conns = [
sconn(fd=-1, family=2, type=1, laddr=("0.0.0.0", 30),
raddr=(), status="LISTEN", pid=None),
sconn(fd=3, family=2, type=1, laddr=("192.168.5.10", 32783),
raddr=("20.40.60.80", 22), status="ESTABLISHED", pid=1234),
sconn(fd=-1, family=10, type=1, laddr=("::1", 54321),
raddr=("::1", 111), status="CLOSE_WAIT", pid=None)]
mock_net.return_value = conns
mock_process.name.return_value = "inetd"
self.assertFalse(self.authenticator.already_listening(17))
self.assertEqual(mock_get_utility.generic_notification.call_count, 0)
self.assertEqual(mock_process.call_count, 0)
@mock.patch("letsencrypt.plugins.standalone.authenticator.psutil."
"net_connections")
@mock.patch("letsencrypt.plugins.standalone.authenticator.psutil.Process")
@mock.patch("letsencrypt.plugins.standalone.authenticator."
"zope.component.getUtility")
def test_listening_ipv4(self, mock_get_utility, mock_process, mock_net):
from psutil._common import sconn
conns = [
sconn(fd=-1, family=2, type=1, laddr=("0.0.0.0", 30),
raddr=(), status="LISTEN", pid=None),
sconn(fd=3, family=2, type=1, laddr=("192.168.5.10", 32783),
raddr=("20.40.60.80", 22), status="ESTABLISHED", pid=1234),
sconn(fd=-1, family=10, type=1, laddr=("::1", 54321),
raddr=("::1", 111), status="CLOSE_WAIT", pid=None),
sconn(fd=3, family=2, type=1, laddr=("0.0.0.0", 17),
raddr=(), status="LISTEN", pid=4416)]
mock_net.return_value = conns
mock_process.name.return_value = "inetd"
result = self.authenticator.already_listening(17)
self.assertTrue(result)
self.assertEqual(mock_get_utility.call_count, 1)
mock_process.assert_called_once_with(4416)
@mock.patch("letsencrypt.plugins.standalone.authenticator.psutil."
"net_connections")
@mock.patch("letsencrypt.plugins.standalone.authenticator.psutil.Process")
@mock.patch("letsencrypt.plugins.standalone.authenticator."
"zope.component.getUtility")
def test_listening_ipv6(self, mock_get_utility, mock_process, mock_net):
from psutil._common import sconn
conns = [
sconn(fd=-1, family=2, type=1, laddr=("0.0.0.0", 30),
raddr=(), status="LISTEN", pid=None),
sconn(fd=3, family=2, type=1, laddr=("192.168.5.10", 32783),
raddr=("20.40.60.80", 22), status="ESTABLISHED", pid=1234),
sconn(fd=-1, family=10, type=1, laddr=("::1", 54321),
raddr=("::1", 111), status="CLOSE_WAIT", pid=None),
sconn(fd=3, family=10, type=1, laddr=("::", 12345), raddr=(),
status="LISTEN", pid=4420),
sconn(fd=3, family=2, type=1, laddr=("0.0.0.0", 17),
raddr=(), status="LISTEN", pid=4416)]
mock_net.return_value = conns
mock_process.name.return_value = "inetd"
result = self.authenticator.already_listening(12345)
self.assertTrue(result)
self.assertEqual(mock_get_utility.call_count, 1)
mock_process.assert_called_once_with(4420)
class PerformTest(unittest.TestCase):
"""Tests for perform() method."""
def setUp(self):
from letsencrypt.plugins.standalone.authenticator import \
StandaloneAuthenticator
self.authenticator = StandaloneAuthenticator(config=CONFIG, name=None)
self.achall1 = achallenges.DVSNI(
challb=acme_util.chall_to_challb(
challenges.DVSNI(token=b"foo"), "pending"),
domain="foo.example.com", account_key=ACCOUNT_KEY)
self.achall2 = achallenges.DVSNI(
challb=acme_util.chall_to_challb(
challenges.DVSNI(token=b"bar"), "pending"),
domain="bar.example.com", account_key=ACCOUNT_KEY)
bad_achall = ("This", "Represents", "A Non-DVSNI", "Challenge")
self.achalls = [self.achall1, self.achall2, bad_achall]
def test_perform_when_already_listening(self):
self.authenticator.already_listening = mock.Mock()
self.authenticator.already_listening.return_value = True
result = self.authenticator.perform([self.achall1])
self.assertEqual(result, [None])
def test_can_perform(self):
"""What happens if start_listener() returns True."""
self.authenticator.start_listener = mock.Mock()
self.authenticator.start_listener.return_value = True
self.authenticator.already_listening = mock.Mock(return_value=False)
result = self.authenticator.perform(self.achalls)
self.assertEqual(len(self.authenticator.tasks), 2)
self.assertTrue(
self.authenticator.tasks.has_key(self.achall1.token))
self.assertTrue(
self.authenticator.tasks.has_key(self.achall2.token))
self.assertTrue(isinstance(result, list))
self.assertEqual(len(result), 3)
self.assertTrue(isinstance(result[0], challenges.ChallengeResponse))
self.assertTrue(isinstance(result[1], challenges.ChallengeResponse))
self.assertFalse(result[2])
self.authenticator.start_listener.assert_called_once_with(
CONFIG.dvsni_port)
def test_cannot_perform(self):
"""What happens if start_listener() returns False."""
self.authenticator.start_listener = mock.Mock()
self.authenticator.start_listener.return_value = False
self.authenticator.already_listening = mock.Mock(return_value=False)
result = self.authenticator.perform(self.achalls)
self.assertEqual(len(self.authenticator.tasks), 2)
self.assertTrue(
self.authenticator.tasks.has_key(self.achall1.token))
self.assertTrue(
self.authenticator.tasks.has_key(self.achall2.token))
self.assertTrue(isinstance(result, list))
self.assertEqual(len(result), 3)
self.assertEqual(result, [None, None, False])
self.authenticator.start_listener.assert_called_once_with(
CONFIG.dvsni_port)
def test_perform_with_pending_tasks(self):
self.authenticator.tasks = {"footoken.acme.invalid": "cert_data"}
extra_achall = acme_util.DVSNI_P
self.assertRaises(
ValueError, self.authenticator.perform, [extra_achall])
def test_perform_without_challenge_list(self):
extra_achall = acme_util.DVSNI_P
# This is wrong because a challenge must be specified.
self.assertRaises(ValueError, self.authenticator.perform, [])
# This is wrong because it must be a list, not a bare challenge.
self.assertRaises(
ValueError, self.authenticator.perform, extra_achall)
# This is wrong because the list must contain at least one challenge.
self.assertRaises(
ValueError, self.authenticator.perform, range(20))
class StartListenerTest(unittest.TestCase):
"""Tests for start_listener() method."""
def setUp(self):
from letsencrypt.plugins.standalone.authenticator import \
StandaloneAuthenticator
self.authenticator = StandaloneAuthenticator(config=CONFIG, name=None)
@mock.patch("letsencrypt.plugins.standalone.authenticator.os.fork")
def test_start_listener_fork_parent(self, mock_fork):
self.authenticator.do_parent_process = mock.Mock()
self.authenticator.do_parent_process.return_value = True
mock_fork.return_value = 22222
result = self.authenticator.start_listener(1717)
# start_listener is expected to return the True or False return
# value from do_parent_process.
self.assertTrue(result)
self.assertEqual(self.authenticator.child_pid, 22222)
self.authenticator.do_parent_process.assert_called_once_with(1717)
@mock.patch("letsencrypt.plugins.standalone.authenticator.os.fork")
def test_start_listener_fork_child(self, mock_fork):
self.authenticator.do_parent_process = mock.Mock()
self.authenticator.do_child_process = mock.Mock()
mock_fork.return_value = 0
self.authenticator.start_listener(1717)
self.assertEqual(self.authenticator.child_pid, os.getpid())
self.authenticator.do_child_process.assert_called_once_with(1717)
class DoParentProcessTest(unittest.TestCase):
"""Tests for do_parent_process() method."""
def setUp(self):
from letsencrypt.plugins.standalone.authenticator import \
StandaloneAuthenticator
self.authenticator = StandaloneAuthenticator(config=CONFIG, name=None)
@mock.patch("letsencrypt.plugins.standalone.authenticator."
"zope.component.getUtility")
def test_do_parent_process_ok(self, mock_get_utility):
self.authenticator.subproc_state = "ready"
result = self.authenticator.do_parent_process(1717)
self.assertTrue(result)
self.assertEqual(mock_get_utility.call_count, 1)
@mock.patch("letsencrypt.plugins.standalone.authenticator."
"zope.component.getUtility")
def test_do_parent_process_inuse(self, mock_get_utility):
self.authenticator.subproc_state = "inuse"
result = self.authenticator.do_parent_process(1717)
self.assertFalse(result)
self.assertEqual(mock_get_utility.call_count, 1)
@mock.patch("letsencrypt.plugins.standalone.authenticator."
"zope.component.getUtility")
def test_do_parent_process_cantbind(self, mock_get_utility):
self.authenticator.subproc_state = "cantbind"
result = self.authenticator.do_parent_process(1717)
self.assertFalse(result)
self.assertEqual(mock_get_utility.call_count, 1)
@mock.patch("letsencrypt.plugins.standalone.authenticator."
"zope.component.getUtility")
def test_do_parent_process_timeout(self, mock_get_utility):
# Normally times out in 5 seconds and returns False. We can
# now set delay_amount to a lower value so that it times out
# faster than it would under normal use.
result = self.authenticator.do_parent_process(1717, delay_amount=1)
self.assertFalse(result)
self.assertEqual(mock_get_utility.call_count, 1)
class DoChildProcessTest(unittest.TestCase):
"""Tests for do_child_process() method."""
def setUp(self):
from letsencrypt.plugins.standalone.authenticator import \
StandaloneAuthenticator
self.authenticator = StandaloneAuthenticator(config=CONFIG, name=None)
self.cert = achallenges.DVSNI(
challb=acme_util.chall_to_challb(
challenges.DVSNI(token=b"abcdef"), "pending"),
domain="example.com", account_key=ACCOUNT_KEY).gen_cert_and_response(
key_pem=CHALL_KEY_PEM)[1]
self.authenticator.private_key = CHALL_KEY
self.authenticator.tasks = {"abcdef.acme.invalid": self.cert}
self.authenticator.parent_pid = 12345
@mock.patch("letsencrypt.plugins.standalone.authenticator.socket.socket")
@mock.patch("letsencrypt.plugins.standalone.authenticator.os.kill")
@mock.patch("letsencrypt.plugins.standalone.authenticator.sys.exit")
def test_do_child_process_cantbind1(
self, mock_exit, mock_kill, mock_socket):
mock_exit.side_effect = IndentationError("subprocess would exit here")
eaccess = socket.error(socket.errno.EACCES, "Permission denied")
sample_socket = mock.MagicMock()
sample_socket.bind.side_effect = eaccess
mock_socket.return_value = sample_socket
# Using the IndentationError as an error that cannot easily be
# generated at runtime, to indicate the behavior of sys.exit has
# taken effect without actually causing the test process to exit.
# (Just replacing it with a no-op causes logic errors because the
# do_child_process code assumes that calling sys.exit() will
# cause subsequent code not to be executed.)
self.assertRaises(
IndentationError, self.authenticator.do_child_process, 1717)
mock_exit.assert_called_once_with(1)
mock_kill.assert_called_once_with(12345, signal.SIGUSR2)
@mock.patch("letsencrypt.plugins.standalone.authenticator.socket.socket")
@mock.patch("letsencrypt.plugins.standalone.authenticator.os.kill")
@mock.patch("letsencrypt.plugins.standalone.authenticator.sys.exit")
def test_do_child_process_cantbind2(self, mock_exit, mock_kill,
mock_socket):
mock_exit.side_effect = IndentationError("subprocess would exit here")
eaccess = socket.error(socket.errno.EADDRINUSE, "Port already in use")
sample_socket = mock.MagicMock()
sample_socket.bind.side_effect = eaccess
mock_socket.return_value = sample_socket
self.assertRaises(
IndentationError, self.authenticator.do_child_process, 1717)
mock_exit.assert_called_once_with(1)
mock_kill.assert_called_once_with(12345, signal.SIGUSR1)
@mock.patch("letsencrypt.plugins.standalone.authenticator."
"socket.socket")
def test_do_child_process_cantbind3(self, mock_socket):
"""Test case where attempt to bind socket results in an unhandled
socket error. (The expected behavior is arguably wrong because it
will crash the program; the reason for the expected behavior is
that we don't have a way to report arbitrary socket errors.)"""
eio = socket.error(socket.errno.EIO, "Imaginary unhandled error")
sample_socket = mock.MagicMock()
sample_socket.bind.side_effect = eio
mock_socket.return_value = sample_socket
self.assertRaises(
socket.error, self.authenticator.do_child_process, 1717)
@mock.patch("letsencrypt.plugins.standalone.authenticator."
"OpenSSL.SSL.Connection")
@mock.patch("letsencrypt.plugins.standalone.authenticator.socket.socket")
@mock.patch("letsencrypt.plugins.standalone.authenticator.os.kill")
def test_do_child_process_success(
self, mock_kill, mock_socket, mock_connection):
sample_socket = mock.MagicMock()
sample_socket.accept.side_effect = _SocketAcceptOnlyNTimes(2)
mock_socket.return_value = sample_socket
mock_connection.return_value = mock.MagicMock()
self.assertRaises(
CallableExhausted, self.authenticator.do_child_process, 1717)
mock_socket.assert_called_once_with()
sample_socket.bind.assert_called_once_with(("0.0.0.0", 1717))
sample_socket.listen.assert_called_once_with(1)
self.assertEqual(sample_socket.accept.call_count, 3)
mock_kill.assert_called_once_with(12345, signal.SIGIO)
# TODO: We could have some tests about the fact that the listener
# asks OpenSSL to negotiate a TLS connection (and correctly
# sets the SNI callback function).
class CleanupTest(unittest.TestCase):
"""Tests for cleanup() method."""
def setUp(self):
from letsencrypt.plugins.standalone.authenticator import \
StandaloneAuthenticator
self.authenticator = StandaloneAuthenticator(config=CONFIG, name=None)
self.achall = achallenges.DVSNI(
challb=acme_util.chall_to_challb(
challenges.DVSNI(token=b"footoken"), "pending"),
domain="foo.example.com", account_key="key")
self.authenticator.tasks = {self.achall.token: "stuff"}
self.authenticator.child_pid = 12345
@mock.patch("letsencrypt.plugins.standalone.authenticator.os.kill")
@mock.patch("letsencrypt.plugins.standalone.authenticator.time.sleep")
def test_cleanup(self, mock_sleep, mock_kill):
mock_sleep.return_value = None
mock_kill.return_value = None
self.authenticator.cleanup([self.achall])
mock_kill.assert_called_once_with(12345, signal.SIGINT)
mock_sleep.assert_called_once_with(1)
def test_bad_cleanup(self):
self.assertRaises(
ValueError, self.authenticator.cleanup, [achallenges.DVSNI(
challb=acme_util.chall_to_challb(
challenges.DVSNI(token=b"badtoken"), "pending"),
domain="bad.example.com", account_key="key")])
class MoreInfoTest(unittest.TestCase):
"""Tests for more_info() method. (trivially)"""
def setUp(self):
from letsencrypt.plugins.standalone.authenticator import (
StandaloneAuthenticator)
self.authenticator = StandaloneAuthenticator(config=CONFIG, name=None)
def test_more_info(self):
"""Make sure exceptions aren't raised."""
self.authenticator.more_info()
class InitTest(unittest.TestCase):
"""Tests for more_info() method. (trivially)"""
def setUp(self):
from letsencrypt.plugins.standalone.authenticator import (
StandaloneAuthenticator)
self.authenticator = StandaloneAuthenticator(config=CONFIG, name=None)
def test_prepare(self):
"""Make sure exceptions aren't raised.
.. todo:: Add on more once things are setup appropriately.
"""
self.authenticator.prepare()
if __name__ == "__main__":
unittest.main() # pragma: no cover
| apache-2.0 |
cgstudiomap/cgstudiomap | main/parts/odoo/addons/stock/wizard/__init__.py | 323 | 1149 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import stock_move
import stock_return_picking
import stock_change_product_qty
import make_procurement_product
import orderpoint_procurement
import stock_transfer_details
| agpl-3.0 |
asm666/sympy | sympy/parsing/tests/test_sympy_parser.py | 46 | 3372 | from sympy.core import Symbol, Function, Float, Rational, Integer, I, Mul, Pow
from sympy.functions import exp, factorial, sin
from sympy.logic import And
from sympy.series import Limit
from sympy.utilities.pytest import raises
from sympy.parsing.sympy_parser import (
parse_expr, standard_transformations, rationalize, TokenError,
split_symbols, implicit_multiplication,
)
def test_sympy_parser():
x = Symbol('x')
inputs = {
'2*x': 2 * x,
'3.00': Float(3),
'22/7': Rational(22, 7),
'2+3j': 2 + 3*I,
'exp(x)': exp(x),
'x!': factorial(x),
'3.[3]': Rational(10, 3),
'10!': 3628800,
'-(2)': -Integer(2),
'[-1, -2, 3]': [Integer(-1), Integer(-2), Integer(3)],
'Symbol("x").free_symbols': x.free_symbols,
"S('S(3).n(n=3)')": 3.00,
'factorint(12, visual=True)': Mul(
Pow(2, 2, evaluate=False),
Pow(3, 1, evaluate=False),
evaluate=False),
'Limit(sin(x), x, 0, dir="-")': Limit(sin(x), x, 0, dir='-'),
}
for text, result in inputs.items():
assert parse_expr(text) == result
def test_rationalize():
inputs = {
'0.123': Rational(123, 1000)
}
transformations = standard_transformations + (rationalize,)
for text, result in inputs.items():
assert parse_expr(text, transformations=transformations) == result
def test_factorial_fail():
inputs = ['x!!!', 'x!!!!', '(!)']
for text in inputs:
try:
parse_expr(text)
assert False
except TokenError:
assert True
def test_local_dict():
local_dict = {
'my_function': lambda x: x + 2
}
inputs = {
'my_function(2)': Integer(4)
}
for text, result in inputs.items():
assert parse_expr(text, local_dict=local_dict) == result
def test_global_dict():
global_dict = {
'Symbol': Symbol
}
inputs = {
'Q & S': And(Symbol('Q'), Symbol('S'))
}
for text, result in inputs.items():
assert parse_expr(text, global_dict=global_dict) == result
def test_issue_2515():
raises(TokenError, lambda: parse_expr('(()'))
raises(TokenError, lambda: parse_expr('"""'))
def test_issue_7663():
x = Symbol('x')
e = '2*(x+1)'
assert parse_expr(e, evaluate=0) == parse_expr(e, evaluate=False)
def test_split_symbols():
transformations = standard_transformations + \
(split_symbols, implicit_multiplication,)
x = Symbol('x')
y = Symbol('y')
xy = Symbol('xy')
assert parse_expr("xy") == xy
assert parse_expr("xy", transformations=transformations) == x*y
def test_split_symbols_function():
transformations = standard_transformations + \
(split_symbols, implicit_multiplication,)
x = Symbol('x')
y = Symbol('y')
a = Symbol('a')
f = Function('f')
assert parse_expr("ay(x+1)", transformations=transformations) == a*y*(x+1)
assert parse_expr("af(x+1)", transformations=transformations,
local_dict={'f':f}) == a*f(x+1)
def test_match_parentheses_implicit_multiplication():
transformations = standard_transformations + \
(implicit_multiplication,)
raises(TokenError, lambda: parse_expr('(1,2),(3,4]',transformations=transformations))
| bsd-3-clause |
kromain/chromium-tools | third_party/logilab/common/textutils.py | 16 | 17041 | # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Some text manipulation utility functions.
:group text formatting: normalize_text, normalize_paragraph, pretty_match,\
unquote, colorize_ansi
:group text manipulation: searchall, splitstrip
:sort: text formatting, text manipulation
:type ANSI_STYLES: dict(str)
:var ANSI_STYLES: dictionary mapping style identifier to ANSI terminal code
:type ANSI_COLORS: dict(str)
:var ANSI_COLORS: dictionary mapping color identifier to ANSI terminal code
:type ANSI_PREFIX: str
:var ANSI_PREFIX:
ANSI terminal code notifying the start of an ANSI escape sequence
:type ANSI_END: str
:var ANSI_END:
ANSI terminal code notifying the end of an ANSI escape sequence
:type ANSI_RESET: str
:var ANSI_RESET:
ANSI terminal code resetting format defined by a previous ANSI escape sequence
"""
__docformat__ = "restructuredtext en"
import sys
import re
import os.path as osp
from warnings import warn
from unicodedata import normalize as _uninormalize
try:
from os import linesep
except ImportError:
linesep = '\n' # gae
from logilab.common.deprecation import deprecated
MANUAL_UNICODE_MAP = {
u'\xa1': u'!', # INVERTED EXCLAMATION MARK
u'\u0142': u'l', # LATIN SMALL LETTER L WITH STROKE
u'\u2044': u'/', # FRACTION SLASH
u'\xc6': u'AE', # LATIN CAPITAL LETTER AE
u'\xa9': u'(c)', # COPYRIGHT SIGN
u'\xab': u'"', # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xe6': u'ae', # LATIN SMALL LETTER AE
u'\xae': u'(r)', # REGISTERED SIGN
u'\u0153': u'oe', # LATIN SMALL LIGATURE OE
u'\u0152': u'OE', # LATIN CAPITAL LIGATURE OE
u'\xd8': u'O', # LATIN CAPITAL LETTER O WITH STROKE
u'\xf8': u'o', # LATIN SMALL LETTER O WITH STROKE
u'\xbb': u'"', # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xdf': u'ss', # LATIN SMALL LETTER SHARP S
}
def unormalize(ustring, ignorenonascii=None, substitute=None):
"""replace diacritical characters with their corresponding ascii characters
Convert the unicode string to its long normalized form (unicode character
will be transform into several characters) and keep the first one only.
The normal form KD (NFKD) will apply the compatibility decomposition, i.e.
replace all compatibility characters with their equivalents.
:type substitute: str
:param substitute: replacement character to use if decomposition fails
:see: Another project about ASCII transliterations of Unicode text
http://pypi.python.org/pypi/Unidecode
"""
# backward compatibility, ignorenonascii was a boolean
if ignorenonascii is not None:
warn("ignorenonascii is deprecated, use substitute named parameter instead",
DeprecationWarning, stacklevel=2)
if ignorenonascii:
substitute = ''
res = []
for letter in ustring[:]:
try:
replacement = MANUAL_UNICODE_MAP[letter]
except KeyError:
replacement = _uninormalize('NFKD', letter)[0]
if ord(replacement) >= 2 ** 7:
if substitute is None:
raise ValueError("can't deal with non-ascii based characters")
replacement = substitute
res.append(replacement)
return u''.join(res)
def unquote(string):
"""remove optional quotes (simple or double) from the string
:type string: str or unicode
:param string: an optionally quoted string
:rtype: str or unicode
:return: the unquoted string (or the input string if it wasn't quoted)
"""
if not string:
return string
if string[0] in '"\'':
string = string[1:]
if string[-1] in '"\'':
string = string[:-1]
return string
_BLANKLINES_RGX = re.compile('\r?\n\r?\n')
_NORM_SPACES_RGX = re.compile('\s+')
def normalize_text(text, line_len=80, indent='', rest=False):
"""normalize a text to display it with a maximum line size and
optionally arbitrary indentation. Line jumps are normalized but blank
lines are kept. The indentation string may be used to insert a
comment (#) or a quoting (>) mark for instance.
:type text: str or unicode
:param text: the input text to normalize
:type line_len: int
:param line_len: expected maximum line's length, default to 80
:type indent: str or unicode
:param indent: optional string to use as indentation
:rtype: str or unicode
:return:
the input text normalized to fit on lines with a maximized size
inferior to `line_len`, and optionally prefixed by an
indentation string
"""
if rest:
normp = normalize_rest_paragraph
else:
normp = normalize_paragraph
result = []
for text in _BLANKLINES_RGX.split(text):
result.append(normp(text, line_len, indent))
return ('%s%s%s' % (linesep, indent, linesep)).join(result)
def normalize_paragraph(text, line_len=80, indent=''):
"""normalize a text to display it with a maximum line size and
optionally arbitrary indentation. Line jumps are normalized. The
indentation string may be used top insert a comment mark for
instance.
:type text: str or unicode
:param text: the input text to normalize
:type line_len: int
:param line_len: expected maximum line's length, default to 80
:type indent: str or unicode
:param indent: optional string to use as indentation
:rtype: str or unicode
:return:
the input text normalized to fit on lines with a maximized size
inferior to `line_len`, and optionally prefixed by an
indentation string
"""
text = _NORM_SPACES_RGX.sub(' ', text)
line_len = line_len - len(indent)
lines = []
while text:
aline, text = splittext(text.strip(), line_len)
lines.append(indent + aline)
return linesep.join(lines)
def normalize_rest_paragraph(text, line_len=80, indent=''):
"""normalize a ReST text to display it with a maximum line size and
optionally arbitrary indentation. Line jumps are normalized. The
indentation string may be used top insert a comment mark for
instance.
:type text: str or unicode
:param text: the input text to normalize
:type line_len: int
:param line_len: expected maximum line's length, default to 80
:type indent: str or unicode
:param indent: optional string to use as indentation
:rtype: str or unicode
:return:
the input text normalized to fit on lines with a maximized size
inferior to `line_len`, and optionally prefixed by an
indentation string
"""
toreport = ''
lines = []
line_len = line_len - len(indent)
for line in text.splitlines():
line = toreport + _NORM_SPACES_RGX.sub(' ', line.strip())
toreport = ''
while len(line) > line_len:
# too long line, need split
line, toreport = splittext(line, line_len)
lines.append(indent + line)
if toreport:
line = toreport + ' '
toreport = ''
else:
line = ''
if line:
lines.append(indent + line.strip())
return linesep.join(lines)
def splittext(text, line_len):
"""split the given text on space according to the given max line size
return a 2-uple:
* a line <= line_len if possible
* the rest of the text which has to be reported on another line
"""
if len(text) <= line_len:
return text, ''
pos = min(len(text)-1, line_len)
while pos > 0 and text[pos] != ' ':
pos -= 1
if pos == 0:
pos = min(len(text), line_len)
while len(text) > pos and text[pos] != ' ':
pos += 1
return text[:pos], text[pos+1:].strip()
def splitstrip(string, sep=','):
"""return a list of stripped string by splitting the string given as
argument on `sep` (',' by default). Empty string are discarded.
>>> splitstrip('a, b, c , 4,,')
['a', 'b', 'c', '4']
>>> splitstrip('a')
['a']
>>>
:type string: str or unicode
:param string: a csv line
:type sep: str or unicode
:param sep: field separator, default to the comma (',')
:rtype: str or unicode
:return: the unquoted string (or the input string if it wasn't quoted)
"""
return [word.strip() for word in string.split(sep) if word.strip()]
get_csv = deprecated('get_csv is deprecated, use splitstrip')(splitstrip)
def split_url_or_path(url_or_path):
"""return the latest component of a string containing either an url of the
form <scheme>://<path> or a local file system path
"""
if '://' in url_or_path:
return url_or_path.rstrip('/').rsplit('/', 1)
return osp.split(url_or_path.rstrip(osp.sep))
def text_to_dict(text):
"""parse multilines text containing simple 'key=value' lines and return a
dict of {'key': 'value'}. When the same key is encountered multiple time,
value is turned into a list containing all values.
>>> text_to_dict('''multiple=1
... multiple= 2
... single =3
... ''')
{'single': '3', 'multiple': ['1', '2']}
"""
res = {}
if not text:
return res
for line in text.splitlines():
line = line.strip()
if line and not line.startswith('#'):
key, value = [w.strip() for w in line.split('=', 1)]
if key in res:
try:
res[key].append(value)
except AttributeError:
res[key] = [res[key], value]
else:
res[key] = value
return res
_BLANK_URE = r'(\s|,)+'
_BLANK_RE = re.compile(_BLANK_URE)
__VALUE_URE = r'-?(([0-9]+\.[0-9]*)|((0x?)?[0-9]+))'
__UNITS_URE = r'[a-zA-Z]+'
_VALUE_RE = re.compile(r'(?P<value>%s)(?P<unit>%s)?'%(__VALUE_URE, __UNITS_URE))
BYTE_UNITS = {
"b": 1,
"kb": 1024,
"mb": 1024 ** 2,
"gb": 1024 ** 3,
"tb": 1024 ** 4,
}
TIME_UNITS = {
"ms": 0.0001,
"s": 1,
"min": 60,
"h": 60 * 60,
"d": 60 * 60 *24,
}
def apply_units(string, units, inter=None, final=float, blank_reg=_BLANK_RE,
value_reg=_VALUE_RE):
"""Parse the string applying the units defined in units
(e.g.: "1.5m",{'m',60} -> 80).
:type string: str or unicode
:param string: the string to parse
:type units: dict (or any object with __getitem__ using basestring key)
:param units: a dict mapping a unit string repr to its value
:type inter: type
:param inter: used to parse every intermediate value (need __sum__)
:type blank_reg: regexp
:param blank_reg: should match every blank char to ignore.
:type value_reg: regexp with "value" and optional "unit" group
:param value_reg: match a value and it's unit into the
"""
if inter is None:
inter = final
string = _BLANK_RE.sub('', string)
values = []
for match in value_reg.finditer(string):
dic = match.groupdict()
#import sys
#print >> sys.stderr, dic
lit, unit = dic["value"], dic.get("unit")
value = inter(lit)
if unit is not None:
try:
value *= units[unit.lower()]
except KeyError:
raise KeyError('invalid unit %s. valid units are %s' %
(unit, units.keys()))
values.append(value)
return final(sum(values))
_LINE_RGX = re.compile('\r\n|\r+|\n')
def pretty_match(match, string, underline_char='^'):
"""return a string with the match location underlined:
>>> import re
>>> print(pretty_match(re.search('mange', 'il mange du bacon'), 'il mange du bacon'))
il mange du bacon
^^^^^
>>>
:type match: _sre.SRE_match
:param match: object returned by re.match, re.search or re.finditer
:type string: str or unicode
:param string:
the string on which the regular expression has been applied to
obtain the `match` object
:type underline_char: str or unicode
:param underline_char:
character to use to underline the matched section, default to the
carret '^'
:rtype: str or unicode
:return:
the original string with an inserted line to underline the match
location
"""
start = match.start()
end = match.end()
string = _LINE_RGX.sub(linesep, string)
start_line_pos = string.rfind(linesep, 0, start)
if start_line_pos == -1:
start_line_pos = 0
result = []
else:
result = [string[:start_line_pos]]
start_line_pos += len(linesep)
offset = start - start_line_pos
underline = ' ' * offset + underline_char * (end - start)
end_line_pos = string.find(linesep, end)
if end_line_pos == -1:
string = string[start_line_pos:]
result.append(string)
result.append(underline)
else:
end = string[end_line_pos + len(linesep):]
string = string[start_line_pos:end_line_pos]
result.append(string)
result.append(underline)
result.append(end)
return linesep.join(result).rstrip()
# Ansi colorization ###########################################################
ANSI_PREFIX = '\033['
ANSI_END = 'm'
ANSI_RESET = '\033[0m'
ANSI_STYLES = {
'reset': "0",
'bold': "1",
'italic': "3",
'underline': "4",
'blink': "5",
'inverse': "7",
'strike': "9",
}
ANSI_COLORS = {
'reset': "0",
'black': "30",
'red': "31",
'green': "32",
'yellow': "33",
'blue': "34",
'magenta': "35",
'cyan': "36",
'white': "37",
}
def _get_ansi_code(color=None, style=None):
"""return ansi escape code corresponding to color and style
:type color: str or None
:param color:
the color name (see `ANSI_COLORS` for available values)
or the color number when 256 colors are available
:type style: str or None
:param style:
style string (see `ANSI_COLORS` for available values). To get
several style effects at the same time, use a coma as separator.
:raise KeyError: if an unexistent color or style identifier is given
:rtype: str
:return: the built escape code
"""
ansi_code = []
if style:
style_attrs = splitstrip(style)
for effect in style_attrs:
ansi_code.append(ANSI_STYLES[effect])
if color:
if color.isdigit():
ansi_code.extend(['38', '5'])
ansi_code.append(color)
else:
ansi_code.append(ANSI_COLORS[color])
if ansi_code:
return ANSI_PREFIX + ';'.join(ansi_code) + ANSI_END
return ''
def colorize_ansi(msg, color=None, style=None):
"""colorize message by wrapping it with ansi escape codes
:type msg: str or unicode
:param msg: the message string to colorize
:type color: str or None
:param color:
the color identifier (see `ANSI_COLORS` for available values)
:type style: str or None
:param style:
style string (see `ANSI_COLORS` for available values). To get
several style effects at the same time, use a coma as separator.
:raise KeyError: if an unexistent color or style identifier is given
:rtype: str or unicode
:return: the ansi escaped string
"""
# If both color and style are not defined, then leave the text as is
if color is None and style is None:
return msg
escape_code = _get_ansi_code(color, style)
# If invalid (or unknown) color, don't wrap msg with ansi codes
if escape_code:
return '%s%s%s' % (escape_code, msg, ANSI_RESET)
return msg
DIFF_STYLE = {'separator': 'cyan', 'remove': 'red', 'add': 'green'}
def diff_colorize_ansi(lines, out=sys.stdout, style=DIFF_STYLE):
for line in lines:
if line[:4] in ('--- ', '+++ '):
out.write(colorize_ansi(line, style['separator']))
elif line[0] == '-':
out.write(colorize_ansi(line, style['remove']))
elif line[0] == '+':
out.write(colorize_ansi(line, style['add']))
elif line[:4] == '--- ':
out.write(colorize_ansi(line, style['separator']))
elif line[:4] == '+++ ':
out.write(colorize_ansi(line, style['separator']))
else:
out.write(line)
| bsd-3-clause |
maazkhan/kubernetes | Godeps/_workspace/src/github.com/ugorji/go/codec/test.py | 1138 | 3876 | #!/usr/bin/env python
# This will create golden files in a directory passed to it.
# A Test calls this internally to create the golden files
# So it can process them (so we don't have to checkin the files).
# Ensure msgpack-python and cbor are installed first, using:
# sudo apt-get install python-dev
# sudo apt-get install python-pip
# pip install --user msgpack-python msgpack-rpc-python cbor
import cbor, msgpack, msgpackrpc, sys, os, threading
def get_test_data_list():
# get list with all primitive types, and a combo type
l0 = [
-8,
-1616,
-32323232,
-6464646464646464,
192,
1616,
32323232,
6464646464646464,
192,
-3232.0,
-6464646464.0,
3232.0,
6464646464.0,
False,
True,
None,
u"someday",
u"",
u"bytestring",
1328176922000002000,
-2206187877999998000,
270,
-2013855847999995777,
#-6795364578871345152,
]
l1 = [
{ "true": True,
"false": False },
{ "true": "True",
"false": False,
"uint16(1616)": 1616 },
{ "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
"int32":32323232, "bool": True,
"LONG STRING": "123456789012345678901234567890123456789012345678901234567890",
"SHORT STRING": "1234567890" },
{ True: "true", 8: False, "false": 0 }
]
l = []
l.extend(l0)
l.append(l0)
l.extend(l1)
return l
def build_test_data(destdir):
l = get_test_data_list()
for i in range(len(l)):
# packer = msgpack.Packer()
serialized = msgpack.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb')
f.write(serialized)
f.close()
serialized = cbor.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb')
f.write(serialized)
f.close()
def doRpcServer(port, stopTimeSec):
class EchoHandler(object):
def Echo123(self, msg1, msg2, msg3):
return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
def EchoStruct(self, msg):
return ("%s" % msg)
addr = msgpackrpc.Address('localhost', port)
server = msgpackrpc.Server(EchoHandler())
server.listen(addr)
# run thread to stop it after stopTimeSec seconds if > 0
if stopTimeSec > 0:
def myStopRpcServer():
server.stop()
t = threading.Timer(stopTimeSec, myStopRpcServer)
t.start()
server.start()
def doRpcClientToPythonSvc(port):
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("Echo123", "A1", "B2", "C3")
print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doRpcClientToGoSvc(port):
# print ">>>> port: ", port, " <<<<<"
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doMain(args):
if len(args) == 2 and args[0] == "testdata":
build_test_data(args[1])
elif len(args) == 3 and args[0] == "rpc-server":
doRpcServer(int(args[1]), int(args[2]))
elif len(args) == 2 and args[0] == "rpc-client-python-service":
doRpcClientToPythonSvc(int(args[1]))
elif len(args) == 2 and args[0] == "rpc-client-go-service":
doRpcClientToGoSvc(int(args[1]))
else:
print("Usage: test.py " +
"[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
if __name__ == "__main__":
doMain(sys.argv[1:])
| apache-2.0 |
hehongliang/tensorflow | tensorflow/python/layers/core_test.py | 8 | 22555 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.layers.core."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core as core_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class DenseTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testDenseProperties(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')
self.assertEqual(dense.units, 2)
self.assertEqual(dense.activation, nn_ops.relu)
self.assertEqual(dense.kernel_regularizer, None)
self.assertEqual(dense.bias_regularizer, None)
self.assertEqual(dense.activity_regularizer, None)
self.assertEqual(dense.use_bias, True)
# Test auto-naming
dense = core_layers.Dense(2, activation=nn_ops.relu)
dense.apply(random_ops.random_uniform((5, 2)))
self.assertEqual(dense.name, 'dense_1')
dense = core_layers.Dense(2, activation=nn_ops.relu)
dense.apply(random_ops.random_uniform((5, 2)))
self.assertEqual(dense.name, 'dense_2')
def testVariableInput(self):
with self.cached_session():
v = variable_scope.get_variable(
'X', initializer=init_ops.zeros_initializer(), shape=(1, 1))
x = core_layers.Dense(1)(v)
variables.global_variables_initializer().run()
self.assertAllEqual(x.eval(), [[0.0]])
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testCall(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')
inputs = random_ops.random_uniform((5, 4), seed=1)
outputs = dense(inputs)
self.assertListEqual([5, 2], outputs.get_shape().as_list())
self.assertListEqual(dense.variables, [dense.kernel, dense.bias])
self.assertListEqual(dense.trainable_variables,
[dense.kernel, dense.bias])
self.assertListEqual(dense.non_trainable_variables, [])
if not context.executing_eagerly():
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 2)
self.assertEqual(dense.kernel.name, 'my_dense/kernel:0')
self.assertEqual(dense.bias.name, 'my_dense/bias:0')
@test_util.assert_no_new_pyobjects_executing_eagerly
def testNoEagerLeak(self):
# Tests that repeatedly constructing and building a Layer does not leak
# Python objects.
inputs = random_ops.random_uniform((5, 4), seed=1)
core_layers.Dense(5)(inputs)
core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')(inputs)
@test_util.run_in_graph_and_eager_modes
def testCallTensorDot(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')
inputs = random_ops.random_uniform((5, 4, 3), seed=1)
outputs = dense(inputs)
self.assertListEqual([5, 4, 2], outputs.get_shape().as_list())
@test_util.run_in_graph_and_eager_modes
def testNoBias(self):
dense = core_layers.Dense(2, use_bias=False, name='my_dense')
inputs = random_ops.random_uniform((5, 2), seed=1)
_ = dense(inputs)
self.assertListEqual(dense.variables, [dense.kernel])
self.assertListEqual(dense.trainable_variables, [dense.kernel])
self.assertListEqual(dense.non_trainable_variables, [])
if not context.executing_eagerly():
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 1)
self.assertEqual(dense.kernel.name, 'my_dense/kernel:0')
self.assertEqual(dense.bias, None)
@test_util.run_in_graph_and_eager_modes
def testNonTrainable(self):
dense = core_layers.Dense(2, trainable=False, name='my_dense')
inputs = random_ops.random_uniform((5, 2), seed=1)
_ = dense(inputs)
self.assertListEqual(dense.variables, [dense.kernel, dense.bias])
self.assertListEqual(dense.non_trainable_variables,
[dense.kernel, dense.bias])
self.assertListEqual(dense.trainable_variables, [])
if not context.executing_eagerly():
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 0)
@test_util.run_in_graph_and_eager_modes
def testOutputShape(self):
dense = core_layers.Dense(7, activation=nn_ops.relu, name='my_dense')
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = dense.apply(inputs)
self.assertEqual(outputs.get_shape().as_list(), [5, 7])
inputs = random_ops.random_uniform((5, 2, 3), seed=1)
outputs = dense(inputs)
self.assertEqual(outputs.get_shape().as_list(), [5, 2, 7])
inputs = random_ops.random_uniform((1, 2, 4, 3), seed=1)
outputs = dense.apply(inputs)
self.assertEqual(outputs.get_shape().as_list(), [1, 2, 4, 7])
def testCallOnPlaceHolder(self):
inputs = array_ops.placeholder(dtype=dtypes.float32)
dense = core_layers.Dense(4, name='my_dense')
with self.assertRaises(ValueError):
dense(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None])
dense = core_layers.Dense(4, name='my_dense')
with self.assertRaises(ValueError):
dense(inputs)
inputs = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, None, None])
dense = core_layers.Dense(4, name='my_dense')
with self.assertRaises(ValueError):
dense(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, 3])
dense = core_layers.Dense(4, name='my_dense')
dense(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None, 3])
dense = core_layers.Dense(4, name='my_dense')
dense(inputs)
@test_util.run_in_graph_and_eager_modes
def testActivation(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='dense1')
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = dense(inputs)
if not context.executing_eagerly():
self.assertEqual(outputs.op.name, 'dense1/Relu')
dense = core_layers.Dense(2, name='dense2')
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = dense(inputs)
if not context.executing_eagerly():
self.assertEqual(outputs.op.name, 'dense2/BiasAdd')
def testActivityRegularizer(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
dense = core_layers.Dense(
2, name='my_dense', activity_regularizer=regularizer)
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = dense(inputs)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(dense.losses, loss_keys)
def testKernelRegularizer(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
dense = core_layers.Dense(
2, name='my_dense', kernel_regularizer=regularizer)
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = dense(inputs)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in dense.variables])
self.assertAllEqual(self.evaluate(dense.losses), self.evaluate(loss_keys))
def testKernelRegularizerWithReuse(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = core_layers.dense(
inputs, 2, name='my_dense', kernel_regularizer=regularizer)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
_ = core_layers.dense(
inputs, 2, name='my_dense', kernel_regularizer=regularizer, reuse=True)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
def testBiasRegularizer(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
dense = core_layers.Dense(2, name='my_dense', bias_regularizer=regularizer)
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = dense(inputs)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in dense.variables])
self.assertAllEqual(self.evaluate(dense.losses), self.evaluate(loss_keys))
def testFunctionalDense(self):
with self.cached_session():
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = core_layers.dense(
inputs, 2, activation=nn_ops.relu, name='my_dense')
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 2)
self.assertEqual(outputs.op.name, 'my_dense/Relu')
def testFunctionalDenseTwice(self):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2)
vars1 = _get_variable_dict_from_varstore().values()
core_layers.dense(inputs, 2)
vars2 = _get_variable_dict_from_varstore().values()
self.assertEqual(len(vars1), 2)
self.assertEqual(len(vars2), 4)
# TODO(alive): get this to work in eager mode.
def testFunctionalDenseTwiceReuse(self):
with self.cached_session():
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name='my_dense')
vars1 = variables.trainable_variables()
core_layers.dense(inputs, 2, name='my_dense', reuse=True)
vars2 = variables.trainable_variables()
self.assertEqual(vars1, vars2)
# TODO(alive): get this to work in eager mode.
def testFunctionalDenseTwiceReuseFromScope(self):
with self.cached_session():
with variable_scope.variable_scope('scope'):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name='my_dense')
vars1 = variables.trainable_variables()
with variable_scope.variable_scope('scope', reuse=True):
core_layers.dense(inputs, 2, name='my_dense')
vars2 = variables.trainable_variables()
self.assertEqual(vars1, vars2)
def testFunctionalDenseInitializerFromScope(self):
with variable_scope.variable_scope(
'scope',
initializer=init_ops.ones_initializer()), self.cached_session():
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2)
variables.global_variables_initializer().run()
weights = _get_variable_dict_from_varstore()
self.assertEqual(len(weights), 2)
# Check that the matrix weights got initialized to ones (from scope).
self.assertAllClose(weights['scope/dense/kernel'].read_value().eval(),
np.ones((3, 2)))
# Check that the bias still got initialized to zeros.
self.assertAllClose(weights['scope/dense/bias'].read_value().eval(),
np.zeros((2)))
def testEagerExecution(self):
with context.eager_mode():
container = variable_scope.EagerVariableStore()
x = constant_op.constant([[2.0]])
with container.as_default():
y = core_layers.dense(
x, 1, name='my_dense',
kernel_initializer=init_ops.ones_initializer())
self.assertAllEqual(y, [[2.0]])
self.assertEqual(len(container.variables()), 2)
# Recreate the layer to test reuse.
with container.as_default():
core_layers.dense(
x, 1, name='my_dense',
kernel_initializer=init_ops.ones_initializer())
self.assertEqual(len(container.variables()), 2)
def testFunctionalDenseWithCustomGetter(self):
called = [0]
def custom_getter(getter, *args, **kwargs):
called[0] += 1
return getter(*args, **kwargs)
with variable_scope.variable_scope('test', custom_getter=custom_getter):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2)
self.assertEqual(called[0], 2)
def testFunctionalDenseInScope(self):
with self.cached_session():
with variable_scope.variable_scope('test'):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name='my_dense')
var_dict = _get_variable_dict_from_varstore()
var_key = 'test/my_dense/kernel'
self.assertEqual(var_dict[var_key].name, '%s:0' % var_key)
with variable_scope.variable_scope('test1') as scope:
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name=scope)
var_dict = _get_variable_dict_from_varstore()
var_key = 'test1/kernel'
self.assertEqual(var_dict[var_key].name, '%s:0' % var_key)
with variable_scope.variable_scope('test2'):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2)
var_dict = _get_variable_dict_from_varstore()
var_key = 'test2/dense/kernel'
self.assertEqual(var_dict[var_key].name, '%s:0' % var_key)
@test_util.run_in_graph_and_eager_modes
def testComputeOutputShape(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='dense1')
ts = tensor_shape.TensorShape
# pylint: disable=protected-access
with self.assertRaises(ValueError):
dense.compute_output_shape(ts(None))
with self.assertRaises(ValueError):
dense.compute_output_shape(ts([]))
with self.assertRaises(ValueError):
dense.compute_output_shape(ts([1]))
self.assertEqual(
[None, 2],
dense.compute_output_shape((None, 3)).as_list())
self.assertEqual(
[None, 2],
dense.compute_output_shape(ts([None, 3])).as_list())
self.assertEqual(
[None, 4, 2],
dense.compute_output_shape(ts([None, 4, 3])).as_list())
# pylint: enable=protected-access
@test_util.run_in_graph_and_eager_modes
def testConstraints(self):
k_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
dense = core_layers.Dense(2,
kernel_constraint=k_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3), seed=1)
dense(inputs)
self.assertEqual(dense.kernel_constraint, k_constraint)
self.assertEqual(dense.bias_constraint, b_constraint)
def _get_variable_dict_from_varstore():
var_dict = variable_scope._get_default_variable_store()._vars # pylint: disable=protected-access
sorted_var_dict = collections.OrderedDict(
sorted(var_dict.items(), key=lambda t: t[0]))
return sorted_var_dict
class DropoutTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testDropoutProperties(self):
dp = core_layers.Dropout(0.5, name='dropout')
self.assertEqual(dp.rate, 0.5)
self.assertEqual(dp.noise_shape, None)
dp.apply(array_ops.ones(()))
self.assertEqual(dp.name, 'dropout')
@test_util.run_in_graph_and_eager_modes
def testBooleanLearningPhase(self):
dp = core_layers.Dropout(0.5)
inputs = array_ops.ones((5, 3))
dropped = dp.apply(inputs, training=True)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
np_output = self.evaluate(dropped)
self.assertAlmostEqual(0., np_output.min())
dropped = dp.apply(inputs, training=False)
np_output = self.evaluate(dropped)
self.assertAllClose(np.ones((5, 3)), np_output)
def testDynamicLearningPhase(self):
with self.cached_session() as sess:
dp = core_layers.Dropout(0.5, seed=1)
inputs = array_ops.ones((5, 5))
training = array_ops.placeholder(dtype='bool')
dropped = dp.apply(inputs, training=training)
self.evaluate(variables.global_variables_initializer())
np_output = sess.run(dropped, feed_dict={training: True})
self.assertAlmostEqual(0., np_output.min())
np_output = sess.run(dropped, feed_dict={training: False})
self.assertAllClose(np.ones((5, 5)), np_output)
@test_util.run_in_graph_and_eager_modes
def testDynamicNoiseShape(self):
inputs = array_ops.ones((5, 3, 2))
noise_shape = [None, 1, None]
dp = core_layers.Dropout(0.5, noise_shape=noise_shape, seed=1)
dropped = dp.apply(inputs, training=True)
self.evaluate(variables.global_variables_initializer())
np_output = self.evaluate(dropped)
self.assertAlmostEqual(0., np_output.min())
self.assertAllClose(np_output[:, 0, :], np_output[:, 1, :])
def testCustomNoiseShape(self):
inputs = array_ops.ones((5, 3, 2))
noise_shape = [5, 1, 2]
dp = core_layers.Dropout(0.5, noise_shape=noise_shape, seed=1)
dropped = dp.apply(inputs, training=True)
self.evaluate(variables.global_variables_initializer())
np_output = self.evaluate(dropped)
self.assertAlmostEqual(0., np_output.min())
self.assertAllClose(np_output[:, 0, :], np_output[:, 1, :])
def testFunctionalDropout(self):
with self.cached_session():
inputs = array_ops.ones((5, 5))
dropped = core_layers.dropout(inputs, 0.5, training=True, seed=1)
variables.global_variables_initializer().run()
np_output = self.evaluate(dropped)
self.assertAlmostEqual(0., np_output.min())
dropped = core_layers.dropout(inputs, 0.5, training=False, seed=1)
np_output = self.evaluate(dropped)
self.assertAllClose(np.ones((5, 5)), np_output)
def testDynamicRate(self):
with self.cached_session() as sess:
rate = array_ops.placeholder(dtype='float32', name='rate')
dp = core_layers.Dropout(rate, name='dropout')
inputs = array_ops.ones((5, 5))
dropped = dp.apply(inputs, training=True)
sess.run(variables.global_variables_initializer())
np_output = sess.run(dropped, feed_dict={rate: 0.5})
self.assertAlmostEqual(0., np_output.min())
np_output = sess.run(dropped, feed_dict={rate: 0.0})
self.assertAllClose(np.ones((5, 5)), np_output)
class FlattenTest(test.TestCase):
def testCreateFlatten(self):
with self.cached_session() as sess:
x = array_ops.placeholder(shape=(None, 2, 3), dtype='float32')
y = core_layers.Flatten()(x)
np_output = sess.run(y, feed_dict={x: np.zeros((3, 2, 3))})
self.assertEqual(list(np_output.shape), [3, 6])
self.assertEqual(y.get_shape().as_list(), [None, 6])
x = array_ops.placeholder(shape=(1, 2, 3, 2), dtype='float32')
y = core_layers.Flatten()(x)
np_output = sess.run(y, feed_dict={x: np.zeros((1, 2, 3, 2))})
self.assertEqual(list(np_output.shape), [1, 12])
self.assertEqual(y.get_shape().as_list(), [1, 12])
def testComputeShape(self):
shape = core_layers.Flatten().compute_output_shape((1, 2, 3, 2))
self.assertEqual(shape.as_list(), [1, 12])
shape = core_layers.Flatten().compute_output_shape((None, 3, 2))
self.assertEqual(shape.as_list(), [None, 6])
shape = core_layers.Flatten().compute_output_shape((None, 3, None))
self.assertEqual(shape.as_list(), [None, None])
def testDataFormat5d(self):
np_input_channels_last = np.arange(
120, dtype='float32').reshape([1, 5, 4, 3, 2])
with self.test_session() as sess:
x = array_ops.placeholder(shape=(1, 5, 4, 3, 2), dtype='float32')
y = core_layers.Flatten(data_format='channels_last')(x)
np_output_cl = sess.run(y, feed_dict={x: np_input_channels_last})
x = array_ops.placeholder(shape=(1, 2, 5, 4, 3), dtype='float32')
y = core_layers.Flatten(data_format='channels_first')(x)
np_input_channels_first = np.transpose(np_input_channels_last,
[0, 4, 1, 2, 3])
np_output_cf = sess.run(y, feed_dict={x: np_input_channels_first})
self.assertAllEqual(np_output_cl, np_output_cf)
def testDataFormat4d(self):
np_input_channels_last = np.arange(
24, dtype='float32').reshape([1, 4, 3, 2])
with self.test_session() as sess:
x = array_ops.placeholder(shape=(1, 4, 3, 2), dtype='float32')
y = core_layers.Flatten(data_format='channels_last')(x)
np_output_cl = sess.run(y, feed_dict={x: np_input_channels_last})
x = array_ops.placeholder(shape=(1, 2, 4, 3), dtype='float32')
y = core_layers.Flatten(data_format='channels_first')(x)
np_input_channels_first = np.transpose(np_input_channels_last,
[0, 3, 1, 2])
np_output_cf = sess.run(y, feed_dict={x: np_input_channels_first})
self.assertAllEqual(np_output_cl, np_output_cf)
def testFunctionalFlatten(self):
x = array_ops.placeholder(shape=(None, 2, 3), dtype='float32')
y = core_layers.flatten(x, name='flatten')
self.assertEqual(y.get_shape().as_list(), [None, 6])
def testFlattenValueError(self):
x = array_ops.placeholder(shape=(None,), dtype='float32')
with self.assertRaises(ValueError):
core_layers.Flatten()(x)
def testFlattenUnknownAxes(self):
with self.cached_session() as sess:
x = array_ops.placeholder(shape=(5, None, None), dtype='float32')
y = core_layers.Flatten()(x)
np_output = sess.run(y, feed_dict={x: np.zeros((5, 2, 3))})
self.assertEqual(list(np_output.shape), [5, 6])
self.assertEqual(y.get_shape().as_list(), [5, None])
x = array_ops.placeholder(shape=(5, None, 2), dtype='float32')
y = core_layers.Flatten()(x)
np_output = sess.run(y, feed_dict={x: np.zeros((5, 3, 2))})
self.assertEqual(list(np_output.shape), [5, 6])
self.assertEqual(y.get_shape().as_list(), [5, None])
if __name__ == '__main__':
test.main()
| apache-2.0 |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/aio/operations/_bastion_hosts_operations.py | 1 | 23288 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class BastionHostsOperations:
"""BastionHostsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
bastion_host_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'bastionHostName': self._serialize.url("bastion_host_name", bastion_host_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/bastionHosts/{bastionHostName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
bastion_host_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified Bastion Host.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param bastion_host_name: The name of the Bastion Host.
:type bastion_host_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
bastion_host_name=bastion_host_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'bastionHostName': self._serialize.url("bastion_host_name", bastion_host_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/bastionHosts/{bastionHostName}'} # type: ignore
async def get(
self,
resource_group_name: str,
bastion_host_name: str,
**kwargs
) -> "_models.BastionHost":
"""Gets the specified Bastion Host.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param bastion_host_name: The name of the Bastion Host.
:type bastion_host_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BastionHost, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.BastionHost
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BastionHost"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'bastionHostName': self._serialize.url("bastion_host_name", bastion_host_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BastionHost', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/bastionHosts/{bastionHostName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
bastion_host_name: str,
parameters: "_models.BastionHost",
**kwargs
) -> "_models.BastionHost":
cls = kwargs.pop('cls', None) # type: ClsType["_models.BastionHost"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'bastionHostName': self._serialize.url("bastion_host_name", bastion_host_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'BastionHost')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('BastionHost', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('BastionHost', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/bastionHosts/{bastionHostName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
bastion_host_name: str,
parameters: "_models.BastionHost",
**kwargs
) -> AsyncLROPoller["_models.BastionHost"]:
"""Creates or updates the specified Bastion Host.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param bastion_host_name: The name of the Bastion Host.
:type bastion_host_name: str
:param parameters: Parameters supplied to the create or update Bastion Host operation.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.BastionHost
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either BastionHost or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_03_01.models.BastionHost]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.BastionHost"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
bastion_host_name=bastion_host_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('BastionHost', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'bastionHostName': self._serialize.url("bastion_host_name", bastion_host_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/bastionHosts/{bastionHostName}'} # type: ignore
def list(
self,
**kwargs
) -> AsyncIterable["_models.BastionHostListResult"]:
"""Lists all Bastion Hosts in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BastionHostListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.BastionHostListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BastionHostListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('BastionHostListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/bastionHosts'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.BastionHostListResult"]:
"""Lists all Bastion Hosts in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BastionHostListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.BastionHostListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BastionHostListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('BastionHostListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/bastionHosts'} # type: ignore
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.