repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
cliffe/SecGen | modules/utilities/unix/audit_tools/ghidra/files/release/Ghidra/Features/Python/data/jython-2.7.1/Lib/distutils/tests/support.py | 81 | 6954 | """Support code for distutils test cases."""
import os
import sys
import shutil
import tempfile
import unittest
import sysconfig
from copy import deepcopy
import warnings
from distutils import log
from distutils.log import DEBUG, INFO, WARN, ERROR, FATAL
from distutils.core import Distribution
def capture_warnings(func):
def _capture_warnings(*args, **kw):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return func(*args, **kw)
return _capture_warnings
class LoggingSilencer(object):
def setUp(self):
super(LoggingSilencer, self).setUp()
self.threshold = log.set_threshold(log.FATAL)
# catching warnings
# when log will be replaced by logging
# we won't need such monkey-patch anymore
self._old_log = log.Log._log
log.Log._log = self._log
self.logs = []
def tearDown(self):
log.set_threshold(self.threshold)
log.Log._log = self._old_log
super(LoggingSilencer, self).tearDown()
def _log(self, level, msg, args):
if level not in (DEBUG, INFO, WARN, ERROR, FATAL):
raise ValueError('%s wrong log level' % str(level))
self.logs.append((level, msg, args))
def get_logs(self, *levels):
def _format(msg, args):
if len(args) == 0:
return msg
return msg % args
return [_format(msg, args) for level, msg, args
in self.logs if level in levels]
def clear_logs(self):
self.logs = []
class TempdirManager(object):
"""Mix-in class that handles temporary directories for test cases.
This is intended to be used with unittest.TestCase.
"""
def setUp(self):
super(TempdirManager, self).setUp()
self.old_cwd = os.getcwd()
self.tempdirs = []
def tearDown(self):
# Restore working dir, for Solaris and derivatives, where rmdir()
# on the current directory fails.
os.chdir(self.old_cwd)
super(TempdirManager, self).tearDown()
while self.tempdirs:
d = self.tempdirs.pop()
shutil.rmtree(d, os.name in ('nt', 'cygwin'))
def mkdtemp(self):
"""Create a temporary directory that will be cleaned up.
Returns the path of the directory.
"""
d = tempfile.mkdtemp()
self.tempdirs.append(d)
return d
def write_file(self, path, content='xxx'):
"""Writes a file in the given path.
path can be a string or a sequence.
"""
if isinstance(path, (list, tuple)):
path = os.path.join(*path)
f = open(path, 'w')
try:
f.write(content)
finally:
f.close()
def create_dist(self, pkg_name='foo', **kw):
"""Will generate a test environment.
This function creates:
- a Distribution instance using keywords
- a temporary directory with a package structure
It returns the package directory and the distribution
instance.
"""
tmp_dir = self.mkdtemp()
pkg_dir = os.path.join(tmp_dir, pkg_name)
os.mkdir(pkg_dir)
dist = Distribution(attrs=kw)
return pkg_dir, dist
class DummyCommand:
"""Class to store options for retrieval via set_undefined_options()."""
def __init__(self, **kwargs):
for kw, val in kwargs.items():
setattr(self, kw, val)
def ensure_finalized(self):
pass
class EnvironGuard(object):
def setUp(self):
super(EnvironGuard, self).setUp()
self.old_environ = deepcopy(os.environ)
def tearDown(self):
for key, value in self.old_environ.items():
if os.environ.get(key) != value:
os.environ[key] = value
for key in os.environ.keys():
if key not in self.old_environ:
del os.environ[key]
super(EnvironGuard, self).tearDown()
def copy_xxmodule_c(directory):
"""Helper for tests that need the xxmodule.c source file.
Example use:
def test_compile(self):
copy_xxmodule_c(self.tmpdir)
self.assertIn('xxmodule.c', os.listdir(self.tmpdir))
If the source file can be found, it will be copied to *directory*. If not,
the test will be skipped. Errors during copy are not caught.
"""
filename = _get_xxmodule_path()
if filename is None:
raise unittest.SkipTest('cannot find xxmodule.c (test must run in '
'the python build dir)')
shutil.copy(filename, directory)
def _get_xxmodule_path():
# FIXME when run from regrtest, srcdir seems to be '.', which does not help
# us find the xxmodule.c file
srcdir = sysconfig.get_config_var('srcdir')
candidates = [
# use installed copy if available
os.path.join(os.path.dirname(__file__), 'xxmodule.c'),
# otherwise try using copy from build directory
os.path.join(srcdir, 'Modules', 'xxmodule.c'),
# srcdir mysteriously can be $srcdir/Lib/distutils/tests when
# this file is run from its parent directory, so walk up the
# tree to find the real srcdir
os.path.join(srcdir, '..', '..', '..', 'Modules', 'xxmodule.c'),
]
for path in candidates:
if os.path.exists(path):
return path
def fixup_build_ext(cmd):
"""Function needed to make build_ext tests pass.
When Python was build with --enable-shared on Unix, -L. is not good
enough to find the libpython<blah>.so. This is because regrtest runs
it under a tempdir, not in the top level where the .so lives. By the
time we've gotten here, Python's already been chdir'd to the tempdir.
When Python was built with in debug mode on Windows, build_ext commands
need their debug attribute set, and it is not done automatically for
some reason.
This function handles both of these things. Example use:
cmd = build_ext(dist)
support.fixup_build_ext(cmd)
cmd.ensure_finalized()
Unlike most other Unix platforms, Mac OS X embeds absolute paths
to shared libraries into executables, so the fixup is not needed there.
"""
if os.name == 'nt':
cmd.debug = sys.executable.endswith('_d.exe')
elif sysconfig.get_config_var('Py_ENABLE_SHARED'):
# To further add to the shared builds fun on Unix, we can't just add
# library_dirs to the Extension() instance because that doesn't get
# plumbed through to the final compiler command.
runshared = sysconfig.get_config_var('RUNSHARED')
if runshared is None:
cmd.library_dirs = ['.']
else:
if sys.platform == 'darwin':
cmd.library_dirs = []
else:
name, equals, value = runshared.partition('=')
cmd.library_dirs = value.split(os.pathsep)
| gpl-3.0 |
gwulfs/zipline | zipline/finance/performance/position.py | 28 | 8015 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Position Tracking
=================
+-----------------+----------------------------------------------------+
| key | value |
+=================+====================================================+
| sid | the sid for the asset held in this position |
+-----------------+----------------------------------------------------+
| amount | whole number of shares in the position |
+-----------------+----------------------------------------------------+
| last_sale_price | price at last sale of the asset on the exchange |
+-----------------+----------------------------------------------------+
| cost_basis | the volume weighted average price paid per share |
+-----------------+----------------------------------------------------+
"""
from __future__ import division
from math import (
copysign,
floor,
)
from copy import copy
import logbook
import zipline.protocol as zp
from zipline.utils.serialization_utils import (
VERSION_LABEL
)
log = logbook.Logger('Performance')
class Position(object):
def __init__(self, sid, amount=0, cost_basis=0.0,
last_sale_price=0.0, last_sale_date=None):
self.sid = sid
self.amount = amount
self.cost_basis = cost_basis # per share
self.last_sale_price = last_sale_price
self.last_sale_date = last_sale_date
def earn_dividend(self, dividend):
"""
Register the number of shares we held at this dividend's ex date so
that we can pay out the correct amount on the dividend's pay date.
"""
assert dividend['sid'] == self.sid
out = {'id': dividend['id']}
# stock dividend
if dividend['payment_sid']:
out['payment_sid'] = dividend['payment_sid']
out['share_count'] = floor(self.amount * float(dividend['ratio']))
# cash dividend
if dividend['net_amount']:
out['cash_amount'] = self.amount * dividend['net_amount']
elif dividend['gross_amount']:
out['cash_amount'] = self.amount * dividend['gross_amount']
payment_owed = zp.dividend_payment(out)
return payment_owed
def handle_split(self, split):
"""
Update the position by the split ratio, and return the resulting
fractional share that will be converted into cash.
Returns the unused cash.
"""
if self.sid != split.sid:
raise Exception("updating split with the wrong sid!")
ratio = split.ratio
log.info("handling split for sid = " + str(split.sid) +
", ratio = " + str(split.ratio))
log.info("before split: " + str(self))
# adjust the # of shares by the ratio
# (if we had 100 shares, and the ratio is 3,
# we now have 33 shares)
# (old_share_count / ratio = new_share_count)
# (old_price * ratio = new_price)
# e.g., 33.333
raw_share_count = self.amount / float(ratio)
# e.g., 33
full_share_count = floor(raw_share_count)
# e.g., 0.333
fractional_share_count = raw_share_count - full_share_count
# adjust the cost basis to the nearest cent, e.g., 60.0
new_cost_basis = round(self.cost_basis * ratio, 2)
# adjust the last sale price
new_last_sale_price = round(self.last_sale_price * ratio, 2)
self.cost_basis = new_cost_basis
self.last_sale_price = new_last_sale_price
self.amount = full_share_count
return_cash = round(float(fractional_share_count * new_cost_basis), 2)
log.info("after split: " + str(self))
log.info("returning cash: " + str(return_cash))
# return the leftover cash, which will be converted into cash
# (rounded to the nearest cent)
return return_cash
def update(self, txn):
if self.sid != txn.sid:
raise Exception('updating position with txn for a '
'different sid')
total_shares = self.amount + txn.amount
if total_shares == 0:
self.cost_basis = 0.0
else:
prev_direction = copysign(1, self.amount)
txn_direction = copysign(1, txn.amount)
if prev_direction != txn_direction:
# we're covering a short or closing a position
if abs(txn.amount) > abs(self.amount):
# we've closed the position and gone short
# or covered the short position and gone long
self.cost_basis = txn.price
else:
prev_cost = self.cost_basis * self.amount
txn_cost = txn.amount * txn.price
total_cost = prev_cost + txn_cost
self.cost_basis = total_cost / total_shares
# Update the last sale price if txn is
# best data we have so far
if self.last_sale_date is None or txn.dt > self.last_sale_date:
self.last_sale_price = txn.price
self.last_sale_date = txn.dt
self.amount = total_shares
def adjust_commission_cost_basis(self, commission):
"""
A note about cost-basis in zipline: all positions are considered
to share a cost basis, even if they were executed in different
transactions with different commission costs, different prices, etc.
Due to limitations about how zipline handles positions, zipline will
currently spread an externally-delivered commission charge across
all shares in a position.
"""
if commission.sid != self.sid:
raise Exception('Updating a commission for a different sid?')
if commission.cost == 0.0:
return
# If we no longer hold this position, there is no cost basis to
# adjust.
if self.amount == 0:
return
prev_cost = self.cost_basis * self.amount
new_cost = prev_cost + commission.cost
self.cost_basis = new_cost / self.amount
def __repr__(self):
template = "sid: {sid}, amount: {amount}, cost_basis: {cost_basis}, \
last_sale_price: {last_sale_price}"
return template.format(
sid=self.sid,
amount=self.amount,
cost_basis=self.cost_basis,
last_sale_price=self.last_sale_price
)
def to_dict(self):
"""
Creates a dictionary representing the state of this position.
Returns a dict object of the form:
"""
return {
'sid': self.sid,
'amount': self.amount,
'cost_basis': self.cost_basis,
'last_sale_price': self.last_sale_price
}
def __getstate__(self):
state_dict = copy(self.__dict__)
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("Position saved state is too old.")
self.__dict__.update(state)
class positiondict(dict):
def __missing__(self, key):
pos = Position(key)
self[key] = pos
return pos
| apache-2.0 |
h2oai/h2o-dev | h2o-py/tests/testdir_demos/pyunit_assembly_demo.py | 8 | 1553 | import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
from h2o.assembly import H2OAssembly
from h2o.transforms.preprocessing import *
from h2o import H2OFrame
def assembly_demo():
fr = h2o.import_file(pyunit_utils.locate("smalldata/iris/iris_wheader.csv"), col_types=["numeric","numeric","numeric","numeric","string"]) # import data
assembly = H2OAssembly(steps=[("col_select", H2OColSelect(["sepal_len", "petal_len", "class"])), # col selection
("cos_sep_len", H2OColOp(op=H2OFrame.cos, col="sepal_len", inplace=True)), # math operation
("str_cnt_species", H2OColOp(op=H2OFrame.countmatches, col="class", inplace=False, pattern="s"))]) # string operation
result = assembly.fit(fr) # fit the assembly
result.show() # show the result of the fit
assembly.to_pojo("MungingPojoDemo") #, path="/Users/spencer/Desktop/munging_pojo") # export POJO
# java api usage:
#
# String rawRow = framework.nextTuple();
# H2OMungingPOJO munger = new GeneratedH2OMungingPojo_001();
# EasyPredictModelWrapper model = new EasyPredictModelWrapper(new GeneratedH2OGbmPojo_001());
#
# RowData row = new RowData();
# row.fill(rawRow);
# row = munger.fit(row);
# BinomialModelPrediction pred = model.predictBinomial(row);
# // Use prediction!
if __name__ == "__main__":
pyunit_utils.standalone_test(assembly_demo)
else:
assembly_demo()
| apache-2.0 |
bxlab/HiFive_Paper | Scripts/HiCLib/bx-python-0.7.1/build/lib.linux-x86_64-2.7/bx/motif/pwm.py | 7 | 5511 | """
Classes for working with position specific matrices.
"""
from numpy import *
from copy import copy
import _pwm
class BaseMatrix( object ):
"""
Base class for position specific matrices.
"""
def __init__( self, alphabet=None, sorted_alphabet=None,
char_to_index=None, values=None ):
self.alphabet = alphabet
self.sorted_alphabet = sorted_alphabet
self.char_to_index = char_to_index
self.values = values
@classmethod
def from_rows( Class, alphabet, rows ):
"""
Create a new matrix for a sequence over alphabet `alphabet` taking
values from `rows` which is a list whose length is the width of the
matrix, and whose elements are lists of values associated with each
character (in the order those characters appear in alphabet).
"""
# Sorted alphabet
sorted_alphabet = sorted( alphabet )
# Character to index mapping (initialized to -1)
char_to_index = zeros( (256), int16 ) - 1
for i, ch in enumerate( sorted_alphabet ):
char_to_index[ ord(ch) ] = i
# Array
values = zeros( ( len( rows) , len( alphabet ) ), float32 )
for i, row in enumerate( rows ):
assert len( row ) == len( alphabet )
for ch, val in zip( alphabet, row ):
values[i, char_to_index[ord(ch)]] = val
# Matrix
matrix = Class()
matrix.alphabet = alphabet
matrix.sorted_alphabet = sorted_alphabet
matrix.char_to_index = char_to_index
matrix.values = values
return matrix
@classmethod
def create_from_other( Class, other, values=None ):
"""
Create a new Matrix with attributes taken from `other` but with the
values taken from `values` if provided
"""
m = Class()
m.alphabet = other.alphabet
m.sorted_alphabet = other.sorted_alphabet
m.char_to_index = other.char_to_index
if values is not None:
m.values = values
else:
m.values = other.values
return m
@property
def width( self ):
"""
Return the width (size along the sequence axis) of this matrix.
"""
return self.values.shape[0]
def reverse_complement( self ):
"""
Create the reverse complement of this matrix. The result probably
only makese sense if the alphabet is that of DNA ('A','C','G','T').
"""
rval = copy( self )
# Conveniently enough, reversing rows and columns is exactly what we
# want, since this results in A swapping with T and C swapping with G.
rval.values = self.values[::-1,::-1].copy()
return rval
class FrequencyMatrix( BaseMatrix ):
"""
A position specific count/frequency matrix.
"""
DEFAULT_CORRECTION = 0.0000000001
"""
Default value to use for correcting when dealing with counts of zero,
chosen to produce scoring matrices that are the same as produced by CREAD.
"""
def to_logodds_scoring_matrix( self, background=None, correction=DEFAULT_CORRECTION ):
"""
Create a standard logodds scoring matrix.
"""
alphabet_size = len( self.alphabet )
if background is None:
background = ones( alphabet_size, float32 ) / alphabet_size
# Row totals as a one column array
totals = sum( self.values, 1 )[:,newaxis]
values = log2( maximum( self.values, correction ) ) \
- log2( totals ) \
- log2( maximum( background, correction ) )
return ScoringMatrix.create_from_other( self, values.astype( float32 ) )
def to_stormo_scoring_matrix( self, background=None ):
"""
Create a scoring matrix from this count matrix using the method from:
Hertz, G.Z. and G.D. Stormo (1999). Identifying DNA and protein patterns with statistically
significant alignments of multiple sequences. Bioinformatics 15(7): 563-577.
"""
alphabet_size = len( self.alphabet )
if background is None:
background = ones( alphabet_size, float32 ) / alphabet_size
# Row totals as a one column array
totals = sum( self.values, 1 )[:,newaxis]
values = log2( self.values + background ) \
- log2( totals + 1 ) - log2( background )
return ScoringMatrix.create_from_other( self, values.astype( float32 ) )
class ScoringMatrix( BaseMatrix ):
"""
A position specific matrix containing values that are suitable for
scoring a sequence.
"""
def score_string( self, string ):
"""
Score each valid position in `string` using this scoring matrix.
Positions which were not scored are set to nan.
"""
rval = zeros( len( string ), float32 )
rval[:] = nan
_pwm.score_string( self.values, self.char_to_index, string, rval )
return rval
def score_string_with_gaps( self, string ):
"""
Score each valid position in `string` using this scoring matrix.
Positions which were not scored are set to nan. Gap characters are
ignored (matrices score across them).
"""
rval = zeros( len( string ), float32 )
rval[:] = nan
_pwm.score_string_with_gaps( self.values, self.char_to_index, string, rval )
return rval | bsd-3-clause |
rlisagor/freshen | freshen/checks.py | 3 | 1024 | #-*- coding: utf8 -*-
from nose.tools import *
import re as _re
import difflib as _difflib
__unittest = 1
def assert_looks_like(first, second, msg=None):
""" Compare two strings if all contiguous whitespace is coalesced. """
first = _re.sub("\s+", " ", first.strip())
second = _re.sub("\s+", " ", second.strip())
if first != second:
raise AssertionError(msg or "%r does not look like %r" % (first, second))
_assert_equal = assert_equal
def assert_equal(first, second, msg=None):
doit = all(isinstance(s, basestring) for s in [first, second]) and \
any("\n" in s for s in [first, second])
if not doit:
return _assert_equal(first, second, msg)
if first != second:
diff = _difflib.unified_diff(first.split("\n"), second.split("\n"),
"expected", "actual", lineterm="")
diff = " " + "\n ".join(diff)
raise AssertionError(msg or "Strings not equal\n" + diff)
assert_equals = assert_equal
| gpl-3.0 |
muntasirsyed/intellij-community | python/lib/Lib/site-packages/django/contrib/localflavor/uk/forms.py | 313 | 1943 | """
UK-specific Form helpers
"""
import re
from django.forms.fields import CharField, Select
from django.forms import ValidationError
from django.utils.translation import ugettext_lazy as _
class UKPostcodeField(CharField):
"""
A form field that validates its input is a UK postcode.
The regular expression used is sourced from the schema for British Standard
BS7666 address types: http://www.govtalk.gov.uk/gdsc/schemas/bs7666-v2-0.xsd
The value is uppercased and a space added in the correct place, if required.
"""
default_error_messages = {
'invalid': _(u'Enter a valid postcode.'),
}
outcode_pattern = '[A-PR-UWYZ]([0-9]{1,2}|([A-HIK-Y][0-9](|[0-9]|[ABEHMNPRVWXY]))|[0-9][A-HJKSTUW])'
incode_pattern = '[0-9][ABD-HJLNP-UW-Z]{2}'
postcode_regex = re.compile(r'^(GIR 0AA|%s %s)$' % (outcode_pattern, incode_pattern))
space_regex = re.compile(r' *(%s)$' % incode_pattern)
def clean(self, value):
value = super(UKPostcodeField, self).clean(value)
if value == u'':
return value
postcode = value.upper().strip()
# Put a single space before the incode (second part).
postcode = self.space_regex.sub(r' \1', postcode)
if not self.postcode_regex.search(postcode):
raise ValidationError(self.error_messages['invalid'])
return postcode
class UKCountySelect(Select):
"""
A Select widget that uses a list of UK Counties/Regions as its choices.
"""
def __init__(self, attrs=None):
from uk_regions import UK_REGION_CHOICES
super(UKCountySelect, self).__init__(attrs, choices=UK_REGION_CHOICES)
class UKNationSelect(Select):
"""
A Select widget that uses a list of UK Nations as its choices.
"""
def __init__(self, attrs=None):
from uk_regions import UK_NATIONS_CHOICES
super(UKNationSelect, self).__init__(attrs, choices=UK_NATIONS_CHOICES)
| apache-2.0 |
8u1a/plaso | tests/parsers/plist_plugins/spotlight_volume.py | 3 | 1837 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the Spotlight Volume configuration plist plugin."""
import unittest
# pylint: disable=unused-import
from plaso.formatters import plist as plist_formatter
from plaso.parsers import plist
from plaso.parsers.plist_plugins import spotlight_volume
from tests.parsers.plist_plugins import test_lib
class SpotlightVolumePluginTest(test_lib.PlistPluginTestCase):
"""Tests for the Spotlight Volume configuration plist plugin."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._plugin = spotlight_volume.SpotlightVolumePlugin()
self._parser = plist.PlistParser()
def testProcess(self):
"""Tests the Process function."""
plist_name = u'VolumeConfiguration.plist'
event_queue_consumer = self._ParsePlistFileWithPlugin(
self._parser, self._plugin, [plist_name], plist_name)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEqual(len(event_objects), 2)
timestamps = []
for event_object in event_objects:
timestamps.append(event_object.timestamp)
expected_timestamps = frozenset([
1372139683000000, 1369657656000000])
self.assertTrue(set(timestamps) == expected_timestamps)
event_object = event_objects[0]
self.assertEqual(event_object.key, u'')
self.assertEqual(event_object.root, u'/Stores')
expected_desc = (u'Spotlight Volume 4D4BFEB5-7FE6-4033-AAAA-'
u'AAAABBBBCCCCDDDD (/.MobileBackups) activated.')
self.assertEqual(event_object.desc, expected_desc)
expected_string = u'/Stores/ {0:s}'.format(expected_desc)
expected_short = expected_string[:77] + u'...'
self._TestGetMessageStrings(
event_object, expected_string, expected_short)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
donnydevito/py-academicstoday | academicstoday_project/student/tests/test_exam.py | 3 | 8464 | from django.core.urlresolvers import resolve
from django.http import HttpRequest
from django.http import QueryDict
from django.test import TestCase
from django.test import Client
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.conf.urls.static import static, settings
import json
from registrar.models import Course
from registrar.models import Teacher
from registrar.models import Student
from registrar.models import Exam
from registrar.models import ExamSubmission
from registrar.models import MultipleChoiceQuestion
from registrar.models import MultipleChoiceSubmission
from student.views import exam
TEST_USER_EMAIL = "ledo@gah.com"
TEST_USER_USERNAME = "Ledo"
TEST_USER_PASSWORD = "ContinentalUnion"
TEST_USER_EMAIL2 = "whalesquid@hideauze.com"
TEST_USER_USERNAME2 = "whalesquid"
TEST_USER_PASSWORD2 = "Evolvers"
class ExamTestCase(TestCase):
def tearDown(self):
courses = Course.objects.all()
for course in courses:
course.delete()
User.objects.all().delete()
def setUp(self):
# Create our Trudy student
User.objects.create_user(
email=TEST_USER_EMAIL2,
username=TEST_USER_USERNAME2,
password=TEST_USER_PASSWORD2
)
user = User.objects.get(email=TEST_USER_EMAIL2)
teacher = Teacher.objects.create(user=user)
Student.objects.create(user=user).save()
# Create our Student.
User.objects.create_user(
email=TEST_USER_EMAIL,
username=TEST_USER_USERNAME,
password=TEST_USER_PASSWORD
)
user = User.objects.get(email=TEST_USER_EMAIL)
teacher = Teacher.objects.create(user=user)
student = Student.objects.create(user=user)
# Create a test course.
Course.objects.create(
id=1,
title="Comics Book Course",
sub_title="The definitive course on comics!",
category="",
teacher=teacher,
)
course = Course.objects.get(id=1)
# Create our assignment(s)
Exam.objects.create(
exam_id=1,
exam_num=1,
title="Hideauze",
description="Anime related assignment.",
worth=50,
course=course,
)
exam = Exam.objects.get(exam_id=1)
# Create questions
MultipleChoiceQuestion.objects.create(
question_id=2,
exam=exam,
title="Hideauze",
description="Who where the Hideauze?",
a="Former Humans",
a_is_correct=True,
b="Aliens",
b_is_correct=False,
c="Magical or Supernatural Creatures",
c_is_correct=False,
d="Transhumanists",
d_is_correct=True,
e="Heavenly Creatures",
e_is_correct=True,
)
def get_logged_in_client(self):
client = Client()
client.login(
username=TEST_USER_USERNAME,
password=TEST_USER_PASSWORD
)
return client
def test_url_resolves_to_exams_page_view(self):
found = resolve('/course/1/exams')
self.assertEqual(found.func, exam.exams_page)
def test_exams_page_with_no_submissions(self):
client = self.get_logged_in_client()
response = client.post('/course/1/exams')
self.assertEqual(response.status_code, 200)
self.assertIn(b'Comics Book Course',response.content)
self.assertIn(b'view_exam(1);',response.content)
def test_url_resolves_to_exams_table_view(self):
found = resolve('/course/1/exams_table')
self.assertEqual(found.func, exam.exams_table)
def test_exams_table_returns_with_no_submissions(self):
client = self.get_logged_in_client()
response = client.post('/course/1/exams_table')
self.assertEqual(response.status_code, 200)
self.assertIn(b'view_exam(1);',response.content)
def test_url_resolves_to_delete_exam(self):
found = resolve('/course/1/delete_exam')
self.assertEqual(found.func, exam.delete_exam)
def test_delete_exam_with_no_submissions(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/course/1/delete_exam',{
'exam_id': 1,
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['status'], 'failed')
self.assertEqual(array['message'], 'record does not exist')
def test_delete_exam_with_submissions_and_correct_user(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/course/1/exam/1/submit_exam',{}, **kwargs)
self.assertEqual(response.status_code, 200)
response = client.post('/course/1/delete_exam',{
'exam_id': 1,
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['status'], 'success')
self.assertEqual(array['message'], 'exam was deleted')
def test_delete_exam_with_submissions_and_incorrect_user(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/course/1/exam/1/submit_exam',{}, **kwargs)
self.assertEqual(response.status_code, 200)
client.logout()
client.login(
username=TEST_USER_USERNAME2,
password=TEST_USER_PASSWORD2
)
response = client.post('/course/1/delete_exam',{
'exam_id': 1,
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['status'], 'failed')
self.assertEqual(array['message'], 'record does not exist')
def test_url_resolves_to_exam_page_view(self):
found = resolve('/course/1/exam/1')
self.assertEqual(found.func, exam.exam_page)
def test_assignment_page(self):
client = self.get_logged_in_client()
response = client.post('/course/1/exam/1')
self.assertEqual(response.status_code, 200)
self.assertIn(b'Exam #1',response.content)
def test_submit_mc_exam_answer_with_submissions(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/course/1/exam/1/submit_mc_exam_answer',{
'question_id': 2,
'answer': 'A',
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['status'], 'success')
self.assertEqual(array['message'], 'submitted')
def test_submit_exam_without_answering_questions(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/course/1/exam/1/submit_exam',{}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'submitted')
self.assertEqual(array['status'], 'success')
def test_submit_quiz_with_answering_questions(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
client.post('/course/1/exam/1/submit_tf_exam_answer',{
'question_id': 1,
'answer': 'A',
}, **kwargs)
response = client.post('/course/1/exam/1/submit_exam',{}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'submitted')
self.assertEqual(array['status'], 'success') | apache-2.0 |
DailyActie/Surrogate-Model | 01-codes/tensorflow-master/tensorflow/python/framework/importer_test.py | 1 | 27395 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.importer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.core.framework import op_def_pb2
from tensorflow.python.framework import device
from tensorflow.python.framework import op_def_registry
_op_list = op_def_pb2.OpList()
text_format.Merge("""
op {
name: 'None'
}
op {
name: 'Oi'
output_arg { name: 'a' type: DT_INT32 }
}
op {
name: 'Or'
output_arg { name: 'a' type: DT_INT32 is_ref: true }
}
op {
name: 'Of'
output_arg { name: 'a' type: DT_FLOAT }
}
op {
name: 'Ii'
input_arg { name: 'a' type: DT_INT32 }
}
op {
name: 'If'
input_arg { name: 'a' type: DT_FLOAT }
}
op {
name: 'Oii'
output_arg { name: 'a' type: DT_INT32 }
output_arg { name: 'b' type: DT_INT32 }
}
op {
name: 'Oif'
output_arg { name: 'a' type: DT_INT32 }
output_arg { name: 'b' type: DT_FLOAT }
}
op {
name: 'Iii'
input_arg { name: 'a' type: DT_INT32 }
input_arg { name: 'b' type: DT_INT32 }
}
op {
name: 'Iff'
input_arg { name: 'a' type: DT_FLOAT }
input_arg { name: 'b' type: DT_FLOAT }
}
op {
name: 'Iif'
input_arg { name: 'a' type: DT_INT32 }
input_arg { name: 'b' type: DT_FLOAT }
}
op {
name: 'Iri'
input_arg { name: 'a' type: DT_INT32 is_ref: true }
input_arg { name: 'b' type: DT_INT32 }
}
op {
name: 'In'
input_arg { name: 'a' number_attr: 'N' type_attr: 'T' }
attr { name: 'N' type: 'int' minimum: 1 }
attr { name: 'T' type: 'type' }
}
op {
name: 'Otl'
output_arg { name: 'a' type_list_attr: 't' }
attr { name: 'T' type: 'list(type)' minimum: 1 }
}
op {
name: 'Unary'
input_arg { name: 'a' type_attr: 'T' }
output_arg { name: 'b' type_attr: 'T' }
attr { name: 'T' type: 'type' }
}
op {
name: 'OpWithDefaultAttr'
output_arg { name: 'a' type: DT_INT32 }
attr { name: 'default_float' type: 'float' default_value { f: 123.0 } }
}
""", _op_list)
op_def_registry.register_op_list(_op_list)
# NOTE(mrry): Dummy shape registrations for ops used in the tests.
for op_def in _op_list.op:
tf.RegisterShape(op_def.name)(None)
class ImportGraphDefTest(tf.test.TestCase):
def _MakeGraphDef(self, text, producer=tf.GRAPH_DEF_VERSION,
min_consumer=tf.GRAPH_DEF_VERSION_MIN_CONSUMER):
text = "versions: { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, text)
ret = tf.GraphDef()
text_format.Merge(text, ret)
return ret
def testBasic(self):
with tf.Graph().as_default():
a, b, c, d = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oif' }
node { name: 'B' op: 'Otl'
attr { key: 't'
value { list { type: DT_INT32 type: DT_FLOAT } } } }
node { name: 'C' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_FLOAT } }
input: 'A:1' input: 'B:1' }
"""),
return_elements=['A', 'B', 'C', 'D'],
name='import')
# Assert that the import process creates distinct tensors.
self.assertNotEqual(a.outputs[0].name, a.outputs[1].name)
self.assertNotEqual(b.outputs[0].name, b.outputs[1].name)
self.assertNotEqual(a.outputs[0].name, b.outputs[0].name)
self.assertNotEqual(a.outputs[0].name, b.outputs[1].name)
self.assertNotEqual(a.outputs[1].name, b.outputs[0].name)
self.assertNotEqual(a.outputs[1].name, b.outputs[1].name)
# Assert that the ops are connected according to the GraphDef topology.
self.assertEqual(c.inputs[0], a.outputs[0])
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], b.outputs[1])
# Check the types of the returned ops and tensors.
self.assertEqual(a.type, 'Oif')
self.assertEqual(b.type, 'Otl')
self.assertEqual(c.type, 'In')
self.assertEqual(d.type, 'In')
self.assertEqual(a.outputs[0].dtype, tf.int32)
self.assertEqual(a.outputs[1].dtype, tf.float32)
self.assertEqual(b.outputs[0].dtype, tf.int32)
self.assertEqual(b.outputs[1].dtype, tf.float32)
# Check the names of the returned ops.
self.assertEqual(a.name, 'import/A')
self.assertEqual(b.name, 'import/B')
self.assertEqual(c.name, 'import/C')
self.assertEqual(d.name, 'import/D')
def testInputMap(self):
with tf.Graph().as_default():
feed_a_0 = tf.constant(0, dtype=tf.int32)
feed_b_1 = tf.constant(1, dtype=tf.int32)
a, b, c, d = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oii' }
node { name: 'B' op: 'Oii' }
node { name: 'C' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:1' input: 'B:1' }
"""),
input_map={'A:0': feed_a_0, 'B:1': feed_b_1},
return_elements=['A', 'B', 'C', 'D'])
self.assertEqual(c.inputs[0], feed_a_0)
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], feed_b_1)
def testInputMapBytes(self):
with tf.Graph().as_default():
feed_a_0 = tf.constant(0, dtype=tf.int32)
feed_b_1 = tf.constant(1, dtype=tf.int32)
a, b, c, d = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oii' }
node { name: 'B' op: 'Oii' }
node { name: 'C' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:1' input: 'B:1' }
"""),
input_map={b'A:0': feed_a_0, b'B:1': feed_b_1},
return_elements=[b'A', b'B', b'C', b'D'])
self.assertEqual(c.inputs[0], feed_a_0)
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], feed_b_1)
def testInputMapUnicode(self):
with tf.Graph().as_default():
feed_a_0 = tf.constant(0, dtype=tf.int32)
feed_b_1 = tf.constant(1, dtype=tf.int32)
a, b, c, d = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oii' }
node { name: 'B' op: 'Oii' }
node { name: 'C' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:1' input: 'B:1' }
"""),
input_map={u'A:0': feed_a_0, u'B:1': feed_b_1},
return_elements=[u'A', u'B', u'C', u'D'])
self.assertEqual(c.inputs[0], feed_a_0)
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], feed_b_1)
def testImplicitZerothOutput(self):
with tf.Graph().as_default():
a, b = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oii' }
node { name: 'B' op: 'Ii' input: 'A' }
"""),
return_elements=['A', 'B'])
self.assertEqual(b.inputs[0], a.outputs[0])
def testInputMapImplicitZerothOutput(self):
with tf.Graph().as_default():
feed_a_0 = tf.constant(0, dtype=tf.int32)
b, = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oii' }
node { name: 'B' op: 'Ii' input: 'A:0' }
"""),
input_map={'A': feed_a_0},
return_elements=['B'])
self.assertEqual(b.inputs[0], feed_a_0)
def testWithControlDependency(self):
with tf.Graph().as_default():
a, b = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' input: '^A' }
"""),
return_elements=['A', 'B'])
self.assertEqual(b.control_inputs, [a])
def testWithRefs(self):
with tf.Graph().as_default():
a, b, c, d = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Or' }
node { name: 'B' op: 'Oi' }
node { name: 'C' op: 'Iii' input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'Iri' input: 'A:0' input: 'B:0' }
"""),
return_elements=['A', 'B', 'C', 'D'])
self.assertEqual(c.inputs[0], a.outputs[0])
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[0])
self.assertEqual(d.inputs[1], b.outputs[0])
self.assertEqual(a.outputs[0].dtype, tf.int32_ref)
self.assertEqual(c._input_dtypes, [tf.int32, tf.int32])
self.assertEqual(c.outputs, [])
self.assertEqual(d._input_dtypes,
[tf.int32_ref, tf.int32])
self.assertEqual(d.outputs, [])
def testCyclic(self):
with tf.Graph().as_default():
a, b = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Unary'
attr { key: 'T' value { type: DT_INT32 } } input: 'B:0' }
node { name: 'B' op: 'Unary'
attr { key: 'T' value { type: DT_INT32 } } input: 'A:0' }
"""),
return_elements=['A', 'B'])
self.assertEqual(a.inputs[0], b.outputs[0])
self.assertEqual(b.inputs[0], a.outputs[0])
def testTypeMismatchInGraphDef(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
node { name: 'B' op: 'If' input: 'A:0' }
"""))
self.assertTrue(
'Cannot convert a tensor of type int32 to an input of type float' in
str(e.exception))
def testInvalidSignatureTooManyInputsInGraphDef(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
node { name: 'B' op: 'None' input: 'A:0' }
"""))
self.assertTrue('More inputs specified (\'A:0\') than the op expects' in
str(e.exception))
def testInvalidSignatureNotEnoughInputsInGraphDef(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
node { name: 'B' op: 'Iif' input: 'A:0' }
"""))
self.assertTrue('Input types mismatch (expected \'int32, float32\' but '
'got \'int32\')' in str(e.exception))
def testMissingInputOpInGraphDef(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'If' input: 'A:0' }
"""))
self.assertTrue("Input tensor 'A:0' not found" in str(e.exception))
def testMissingInputOpInGraphDefButAppearsInInputMap(self):
with tf.Graph().as_default():
feed_a_0 = tf.constant(5.0)
b, = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'If' input: 'A:0' }
"""),
input_map={'A:0': feed_a_0},
return_elements=['B'])
self.assertEqual(b.inputs[0], feed_a_0)
def testMissingInputTensorInGraphDef(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Of' }
node { name: 'B' op: 'If' input: 'A:1' }
"""))
self.assertTrue("Input tensor 'A:1' not found" in str(e.exception))
def testMissingControlInputInGraphDef(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'None' input: '^A' }
"""))
self.assertTrue("Control input '^A' not found" in str(e.exception))
def testInvalidTensorNameOutputIndexInGraphDef(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'None' input: 'A:B' }
"""))
self.assertEqual("Cannot convert 'A:B' to a tensor name.",
str(e.exception))
def testInvalidTensorNameInGraphDef(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'None' input: 'A:B:0' }
"""))
self.assertEqual("Cannot convert 'A:B:0' to a tensor name.",
str(e.exception))
def testMissingReturnOperation(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
return_elements=['B'])
self.assertTrue("return_element 'B' not found in graph_def." in
str(e.exception))
def testMissingReturnTensor(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
"""),
return_elements=['A:1'])
self.assertTrue("return_element 'A:1' not found in graph_def." in
str(e.exception))
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
"""),
return_elements=['B:0'])
self.assertTrue("return_element 'B:0' not found in graph_def." in
str(e.exception))
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
"""),
return_elements=['A:B:0'])
self.assertTrue("return_element 'A:B:0' not found in graph_def." in
str(e.exception))
def testMissingInputMap(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
input_map={'B:0': tf.constant(5.0)})
self.assertTrue('not found in graph_def: [B:0]' in str(e.exception))
def testInputMapTypeMismatch(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
node { name: 'B' op: 'Ii' input: 'A:0' }
"""),
input_map={'A:0': tf.constant(5.0)})
self.assertTrue(
'Cannot convert a tensor of type float32 to an input of type int32.'
in str(e.exception))
def testNoReturns(self):
with tf.Graph().as_default() as g:
ret = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""))
self.assertEqual(ret, None)
a = g.get_operation_by_name('import/A')
self.assertEqual(a.type, 'None')
def testOverrideNamePrefix(self):
with tf.Graph().as_default():
a, = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
return_elements=['A'], name='imported_graph')
self.assertEqual(a.name, 'imported_graph/A')
def testEmptyGraph(self):
with tf.Graph().as_default() as g:
init_version = g.version
tf.import_graph_def(self._MakeGraphDef(''))
self.assertEqual(init_version, g.version)
def testInvalidInputForGraphDef(self):
with tf.Graph().as_default():
with self.assertRaises(TypeError) as e:
tf.import_graph_def('')
self.assertEqual(
'graph_def must be a GraphDef proto.', str(e.exception))
def testInvalidInputForInputMap(self):
with tf.Graph().as_default():
with self.assertRaises(TypeError) as e:
tf.import_graph_def(self._MakeGraphDef(''),
input_map=[tf.constant(5.0)])
self.assertEqual('input_map must be a dictionary mapping strings to '
'Tensor objects.', str(e.exception))
def testInvalidInputForReturnOperations(self):
with tf.Graph().as_default():
with self.assertRaises(TypeError) as e:
tf.import_graph_def(self._MakeGraphDef(''), return_elements=[7])
self.assertEqual(
'return_elements must be a list of strings.', str(e.exception))
def testWithExtensionAndAttr(self):
with tf.Graph().as_default() as g:
c = tf.constant(5.0, dtype=tf.float32, name='c')
tf.pack([c, c], name='pack')
gdef = g.as_graph_def()
with self.test_session():
pack, = tf.import_graph_def(gdef, return_elements=['pack'])
self.assertAllEqual(pack.outputs[0].eval(), [5.0, 5.0])
def testWithDevice(self):
with tf.Graph().as_default() as g:
# No device.
a = tf.constant(3.0, name='a')
with tf.device('/cpu:0'):
b = tf.constant(4.0, name='b')
with tf.device('/job:worker'):
c = tf.constant(5.0, name='c')
gdef = g.as_graph_def()
with tf.Graph().as_default():
a2, b2, c2 = tf.import_graph_def(
gdef, return_elements=['a', 'b', 'c'])
self.assertEqual(a.device, a2.device)
self.assertEqual(b.device, b2.device)
self.assertEqual(c.device, c2.device)
with tf.Graph().as_default():
with tf.device(device.merge_device('/task:0')):
a3, b3, c3 = tf.import_graph_def(
gdef, return_elements=['a', 'b', 'c'])
self.assertEqual('/task:0', a3.device)
self.assertEqual('/task:0/device:CPU:0', b3.device) # canonicalized.
self.assertEqual(c.device + '/task:0', c3.device)
with tf.Graph().as_default():
with tf.device(device.merge_device('/job:ps')):
a4, b4, c4 = tf.import_graph_def(
gdef, return_elements=['a', 'b', 'c'])
self.assertEqual('/job:ps', a4.device)
self.assertEqual('/job:ps/device:CPU:0', b4.device) # canonicalized.
self.assertEqual(c.device, c4.device) # worker overrides ps.
with tf.Graph().as_default():
with tf.device(device.merge_device('/gpu:0')):
a5, b5, c5 = tf.import_graph_def(
gdef, return_elements=['a', 'b', 'c'])
self.assertEqual('/device:GPU:0', a5.device)
self.assertEqual('/device:CPU:0', b5.device) # cpu overrides gpu.
self.assertEqual(c.device + '/device:GPU:0', c5.device)
def testWithDeviceFunctionDependingOnInputs(self):
with tf.Graph().as_default() as g:
with tf.device("/job:ps"):
v = tf.Variable(1.0)
unused_assign_op = v.assign(2.0)
unused_assign_2_op = v.assign(3.0)
unused_add_t = v + v
gdef = g.as_graph_def()
# We'll use the following device function to observe ops with two inputs.
ops_with_two_inputs = []
def input_counter(op):
if any(in_t.dtype.is_ref_dtype for in_t in op.inputs):
ops_with_two_inputs.append(op)
return ""
with tf.Graph().as_default() as g:
with tf.device(input_counter):
tf.import_graph_def(gdef)
# We expect to see the initializer, two assign operations, and the add op.
self.assertEqual(4, len(ops_with_two_inputs))
def testGradient(self):
with tf.Graph().as_default() as g:
inputs = tf.placeholder(tf.float32, shape=[None, 100], name="input")
weights = tf.placeholder(tf.float32, shape=[100, 10], name="weights")
biases = tf.placeholder(tf.float32, shape=[10], name="biases")
activations = tf.nn.relu(tf.matmul(inputs, weights) + biases,
name="activations")
loss = tf.reduce_mean(activations, name="loss")
gdef = g.as_graph_def()
with tf.Graph().as_default() as g:
input_placeholder = tf.placeholder(tf.float32, shape=[32, 100])
weights_var = tf.Variable(tf.truncated_normal([100, 10]), name="weights")
biases_var = tf.Variable(tf.zeros([10]), name="biases")
activations, loss = tf.import_graph_def(
gdef,
input_map={"input:0": input_placeholder,
"weights:0": weights_var,
"biases:0": biases_var},
return_elements=["activations:0", "loss:0"])
self.assertEqual([32, 10], activations.get_shape())
self.assertEqual([], loss.get_shape())
weights_grad, biases_grad = tf.gradients(loss, [weights_var, biases_var])
self.assertEqual([100, 10], weights_grad.get_shape())
self.assertEqual([10], biases_grad.get_shape())
def testLargeGraph(self):
with self.test_session():
# The default message byte limit is 64M. Ours is 2G with a warning at 512.
# Adding a 150M entries float32 tensor should blow through the warning,
# but not the hard limit.
input_shape = [150, 1024, 1024]
tensor_input = np.random.rand(*input_shape).astype(np.float32)
t = tf.constant(tensor_input, shape=input_shape)
g = tf.identity(t)
g.eval()
def testVersion(self):
v0 = tf.GRAPH_DEF_VERSION_MIN_CONSUMER
v2 = tf.GRAPH_DEF_VERSION
v1 = (v0 + v2) // 2
for producer in v0, v1, v2:
for min_consumer in v0, v1, v2:
with tf.Graph().as_default():
a, = tf.import_graph_def(
self._MakeGraphDef("node { name: 'A' op: 'Oii' }",
producer=producer, min_consumer=min_consumer),
return_elements=['A'])
self.assertEqual(a.graph.graph_def_versions.producer, producer)
self.assertEqual(a.graph.graph_def_versions.min_consumer,
min_consumer)
def testVersionLow(self):
with tf.Graph().as_default() as g:
pat = (r"GraphDef producer version -1 below min producer %d supported "
r"by TensorFlow \S+\. Please regenerate your graph.$" %
tf.GRAPH_DEF_VERSION_MIN_PRODUCER)
tf.import_graph_def(self._MakeGraphDef("", producer=-1))
x = tf.constant(7) # Need at least one op to get a C++ graph generated
with self.test_session(graph=g) as sess:
with self.assertRaisesRegexp(Exception, pat):
sess.run(x)
def testVersionHigh(self):
with tf.Graph().as_default() as g:
pat = (r"GraphDef min consumer version %d above current version %d "
r"for TensorFlow \S+\. Please upgrade TensorFlow\.$" %
(1 << 30, tf.GRAPH_DEF_VERSION))
tf.import_graph_def(self._MakeGraphDef("", min_consumer=1 << 30))
x = tf.constant(7) # Need at least one op to get a C++ graph generated
with self.test_session(graph=g) as sess:
with self.assertRaisesRegexp(Exception, pat):
sess.run(x)
def testDefaultAttrsAdded(self):
with tf.Graph().as_default():
a = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'OpWithDefaultAttr' }
"""),
return_elements=['A'])
self.assertEqual(123.0, a[0].get_attr("default_float"))
if __name__ == '__main__':
tf.test.main()
| mit |
ryuunosukeyoshi/PartnerPoi-Bot | lib/youtube_dl/extractor/howstuffworks.py | 34 | 4741 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
find_xpath_attr,
int_or_none,
js_to_json,
unescapeHTML,
determine_ext,
)
class HowStuffWorksIE(InfoExtractor):
_VALID_URL = r'https?://[\da-z-]+\.howstuffworks\.com/(?:[^/]+/)*(?:\d+-)?(?P<id>.+?)-video\.htm'
_TESTS = [
{
'url': 'http://adventure.howstuffworks.com/5266-cool-jobs-iditarod-musher-video.htm',
'info_dict': {
'id': '450221',
'ext': 'flv',
'title': 'Cool Jobs - Iditarod Musher',
'description': 'Cold sleds, freezing temps and warm dog breath... an Iditarod musher\'s dream. Kasey-Dee Gardner jumps on a sled to find out what the big deal is.',
'display_id': 'cool-jobs-iditarod-musher',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 161,
},
'skip': 'Video broken',
},
{
'url': 'http://adventure.howstuffworks.com/7199-survival-zone-food-and-water-in-the-savanna-video.htm',
'info_dict': {
'id': '453464',
'ext': 'mp4',
'title': 'Survival Zone: Food and Water In the Savanna',
'description': 'Learn how to find both food and water while trekking in the African savannah. In this video from the Discovery Channel.',
'display_id': 'survival-zone-food-and-water-in-the-savanna',
'thumbnail': r're:^https?://.*\.jpg$',
},
},
{
'url': 'http://entertainment.howstuffworks.com/arts/2706-sword-swallowing-1-by-dan-meyer-video.htm',
'info_dict': {
'id': '440011',
'ext': 'mp4',
'title': 'Sword Swallowing #1 by Dan Meyer',
'description': 'Video footage (1 of 3) used by permission of the owner Dan Meyer through Sword Swallowers Association International <www.swordswallow.org>',
'display_id': 'sword-swallowing-1-by-dan-meyer',
'thumbnail': r're:^https?://.*\.jpg$',
},
},
{
'url': 'http://shows.howstuffworks.com/stuff-to-blow-your-mind/optical-illusions-video.htm',
'only_matching': True,
}
]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
clip_js = self._search_regex(
r'(?s)var clip = ({.*?});', webpage, 'clip info')
clip_info = self._parse_json(
clip_js, display_id, transform_source=js_to_json)
video_id = clip_info['content_id']
formats = []
m3u8_url = clip_info.get('m3u8')
if m3u8_url and determine_ext(m3u8_url) == 'm3u8':
formats.extend(self._extract_m3u8_formats(m3u8_url, video_id, 'mp4', format_id='hls', fatal=True))
flv_url = clip_info.get('flv_url')
if flv_url:
formats.append({
'url': flv_url,
'format_id': 'flv',
})
for video in clip_info.get('mp4', []):
formats.append({
'url': video['src'],
'format_id': 'mp4-%s' % video['bitrate'],
'vbr': int_or_none(video['bitrate'].rstrip('k')),
})
if not formats:
smil = self._download_xml(
'http://services.media.howstuffworks.com/videos/%s/smil-service.smil' % video_id,
video_id, 'Downloading video SMIL')
http_base = find_xpath_attr(
smil,
'./{0}head/{0}meta'.format('{http://www.w3.org/2001/SMIL20/Language}'),
'name',
'httpBase').get('content')
URL_SUFFIX = '?v=2.11.3&fp=LNX 11,2,202,356&r=A&g=A'
for video in smil.findall(
'./{0}body/{0}switch/{0}video'.format('{http://www.w3.org/2001/SMIL20/Language}')):
vbr = int_or_none(video.attrib['system-bitrate'], scale=1000)
formats.append({
'url': '%s/%s%s' % (http_base, video.attrib['src'], URL_SUFFIX),
'format_id': '%dk' % vbr,
'vbr': vbr,
})
self._sort_formats(formats)
return {
'id': '%s' % video_id,
'display_id': display_id,
'title': unescapeHTML(clip_info['clip_title']),
'description': unescapeHTML(clip_info.get('caption')),
'thumbnail': clip_info.get('video_still_url'),
'duration': int_or_none(clip_info.get('duration')),
'formats': formats,
}
| gpl-3.0 |
xneby/tcs-web | tcsweb/settings.py | 1 | 2781 | import os
from django.conf import global_settings
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Karol Farbiś', 'kpchwk@gmail.com'),
)
MANAGERS = ADMINS
INSTALL_DIR = os.path.split(os.path.realpath(__file__ + '/../'))[0]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': INSTALL_DIR + '/database/dev.db',
}
}
ALLOWED_HOSTS = []
TIME_ZONE = 'Europe/Warsaw'
LANGUAGE_CODE = 'pl'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_ROOT = ''
MEDIA_URL = ''
STATIC_ROOT = INSTALL_DIR + '/static/'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
SECRET_KEY = 'ssq1vd!z)rt%2lxf9+4izyi86)aej4y4ny^+wv25me00*+w)pp'
TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
'django.core.context_processors.request',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'tcsweb.urls'
WSGI_APPLICATION = 'tcsweb.wsgi.application'
TEMPLATE_DIRS = (
# INSTALL_DIR + '/tcs/templates/',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap_toolkit',
'tcs',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
LOGIN_URL = '/tcs/login/'
LOGIN_REDIRECT_URL = '/'
| gpl-2.0 |
ltiao/scikit-learn | sklearn/linear_model/tests/test_sgd.py | 30 | 44274 | import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import ignore_warnings
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
# Input format tests.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
# Test whether clone works ok.
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
@raises(ValueError)
def test_sgd_bad_alpha_for_optimal_learning_rate(self):
# Check whether expected ValueError on bad alpha, i.e. 0
# since alpha is used to compute the optimal learning rate
self.factory(alpha=0, learning_rate="optimal")
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDClassifier
def test_sgd(self):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
# Check whether expected ValueError on bad l1_ratio
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
# Check whether expected ValueError on bad learning_rate
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
# Check whether expected ValueError on bad eta0
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
# Check whether expected ValueError on bad alpha
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
# Test parameter validity check
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
# Test parameter validity check
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
# Checks intercept_ shape for the warm starts in binary case
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
# Target must have at least two labels
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_balanced(self):
# partial_fit with class_weight='balanced' not supported"""
assert_raises_regexp(ValueError,
"class_weight 'balanced' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight\('balanced', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='balanced').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
# Multi-class average test case
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
# Multi-class test case with multi-core support
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
# Checks coef_init and intercept_init shape for for multi-class
# problems
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([[3, 2]])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([[-1, -1]])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([[3, 2]])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([[-1, -1]])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([[-1, -1]])
d = clf.decision_function([[-1, -1]])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([[3, 2]])
p = clf.predict_proba([[3, 2]])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([[-1, -1]])
p = clf.predict_proba([[-1, -1]])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([[3, 2]])
p = clf.predict_proba([[3, 2]])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function([x])
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba([x])
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
# ValueError due to not existing class label.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
# ValueError due to wrong class_weight argument type.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_weights_multiplied(self):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
sample_weights = np.random.random(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, n_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_balanced_weight(self):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96,
decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = self.factory(alpha=0.0001, n_iter=1000,
class_weight="balanced",
shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_balanced.predict(X), average='weighted'), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit a model with balanced class_weight enabled
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
def test_sample_weights(self):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
# Test if ValueError is raised if sample_weight has wrong shape
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_partial_fit_multiclass_average(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01, average=X2.shape[0])
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
clf.partial_fit(X2[third:], Y2[third:])
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
def test_fit_then_partial_fit(self):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
# Test multiple calls of fit w/ different shaped inputs.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
# Check that SGD gives any results.
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
@ignore_warnings
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.predict([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
# Run exactly the same tests using the sparse representation variant
factory_class = SparseSGDRegressor
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999, random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001, random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert_true(np.isfinite(X).all())
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert_true(np.isfinite(X_scaled).all())
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert_true(np.isfinite(model.coef_).all())
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
n_iter=5, penalty=penalty, shuffle=False)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
| bsd-3-clause |
asadziach/tensorflow | tensorflow/contrib/keras/api/keras/losses/__init__.py | 46 | 2097 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras built-in loss functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Loss functions.
from tensorflow.contrib.keras.python.keras.losses import binary_crossentropy
from tensorflow.contrib.keras.python.keras.losses import categorical_crossentropy
from tensorflow.contrib.keras.python.keras.losses import cosine_proximity
from tensorflow.contrib.keras.python.keras.losses import hinge
from tensorflow.contrib.keras.python.keras.losses import kullback_leibler_divergence
from tensorflow.contrib.keras.python.keras.losses import mean_absolute_error
from tensorflow.contrib.keras.python.keras.losses import mean_absolute_percentage_error
from tensorflow.contrib.keras.python.keras.losses import mean_squared_error
from tensorflow.contrib.keras.python.keras.losses import mean_squared_logarithmic_error
from tensorflow.contrib.keras.python.keras.losses import poisson
from tensorflow.contrib.keras.python.keras.losses import sparse_categorical_crossentropy
from tensorflow.contrib.keras.python.keras.losses import squared_hinge
# Auxiliary utils.
# pylint: disable=g-bad-import-order
from tensorflow.contrib.keras.python.keras.losses import deserialize
from tensorflow.contrib.keras.python.keras.losses import serialize
from tensorflow.contrib.keras.python.keras.losses import get
del absolute_import
del division
del print_function
| apache-2.0 |
zstackio/zstack-woodpecker | integrationtest/vm/mini/poweroff/test_poweroff_host.py | 1 | 3324 | '''
Integration test for testing power off mini hosts.
#1.operations & power off random hosts
#2.start hosts
#3.duplicated operation
@author: zhaohao.chen
'''
import apibinding.inventory as inventory
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import zstackwoodpecker.zstack_test.zstack_test_volume as test_volume_header
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.volume_operations as vol_ops
import zstackwoodpecker.operations.host_operations as host_ops
import zstackwoodpecker.operations.scenario_operations as sce_ops
import time
import os
import random
import threading
import hashlib
import random
MN_IP = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName
admin_password = hashlib.sha512('password').hexdigest()
test_obj_dict = test_state.TestStateDict()
zstack_management_ip = os.environ.get('zstackManagementIp')
def recover_hosts(host_uuids, host_ips, wait_time):
for ip in host_ips:
cond = res_ops.gen_query_conditions('vmNics.ip', '=', ip)
vm = sce_ops.query_resource(zstack_management_ip, res_ops.VM_INSTANCE, cond).inventories[0]
if vm.state != 'Stopped':
test_util.test_fail("Fail to power off host:{}".format(vm.uuid))
sce_ops.start_vm(zstack_management_ip, vm.uuid)
time.sleep(wait_time)#wait MN
for uuid in host_uuids:
host_ops.reconnect_host(uuid)
def operations_shutdown(shutdown_thread, host_uuids, host_ips, wait_time, operation_thread=None):
if operation_thread:
operation_thread.start()
shutdown_thread.start()
shutdown_thread.join()
time.sleep(180)
recover_hosts(host_uuids, host_ips, wait_time)
def test():
global test_obj_dict
wait_time = 120
round = 2
test_util.test_logger("@@:mnip:{}".format(zstack_management_ip))
cond = res_ops.gen_query_conditions('managementIp', '=', MN_IP)
MN_HOST = res_ops.query_resource(res_ops.HOST, cond)[0]
cluster_list = res_ops.get_resource(res_ops.CLUSTER)
for i in range(round):
host_uuids = []
host_ips = []
mn_flag = None # if candidate hosts including MN node
#operations & power off random hosts
test_util.test_logger("round {}".format(i))
cluster_uuid = random.choice(cluster_list).uuid
cond = res_ops.gen_query_conditions('cluster.uuid', '=', cluster_uuid)
cluster_hosts = res_ops.query_resource(res_ops.HOST, cond)
for host in cluster_hosts:
if host.uuid == MN_HOST.uuid:
mn_flag = 1
wait_time = 900 #wait mn up
host_uuids.append(host.uuid)
host_ips.append(host.managementIp)
power_off_thread = threading.Thread(target=host_ops.poweroff_host, args=(host_uuids, admin_password, mn_flag))
operations_shutdown(power_off_thread, host_uuids, host_ips, wait_time)
test_util.test_pass("pass")
def error_cleanup():
global test_obj_dict
test_lib.lib_error_cleanup(test_obj_dict)
def env_recover():
global test_obj_dict
test_lib.lib_error_cleanup(test_obj_dict)
| apache-2.0 |
Russell-IO/ansible | lib/ansible/modules/network/illumos/flowadm.py | 43 | 14799 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Adam Števko <adam.stevko@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: flowadm
short_description: Manage bandwidth resource control and priority for protocols, services and zones on Solaris/illumos systems
description:
- Create/modify/remove networking bandwidth and associated resources for a type of traffic on a particular link.
version_added: "2.2"
author: Adam Števko (@xen0l)
options:
name:
description: >
- A flow is defined as a set of attributes based on Layer 3 and Layer 4
headers, which can be used to identify a protocol, service, or a zone.
required: true
aliases: [ 'flow' ]
link:
description:
- Specifiies a link to configure flow on.
required: false
local_ip:
description:
- Identifies a network flow by the local IP address.
required: false
remote_ip:
description:
- Identifies a network flow by the remote IP address.
required: false
transport:
description: >
- Specifies a Layer 4 protocol to be used. It is typically used in combination with I(local_port) to
identify the service that needs special attention.
required: false
local_port:
description:
- Identifies a service specified by the local port.
required: false
dsfield:
description: >
- Identifies the 8-bit differentiated services field (as defined in
RFC 2474). The optional dsfield_mask is used to state the bits of interest in
the differentiated services field when comparing with the dsfield
value. Both values must be in hexadecimal.
required: false
maxbw:
description: >
- Sets the full duplex bandwidth for the flow. The bandwidth is
specified as an integer with one of the scale suffixes(K, M, or G
for Kbps, Mbps, and Gbps). If no units are specified, the input
value will be read as Mbps.
required: false
priority:
description:
- Sets the relative priority for the flow.
required: false
default: 'medium'
choices: [ 'low', 'medium', 'high' ]
temporary:
description:
- Specifies that the configured flow is temporary. Temporary
flows do not persist across reboots.
required: false
default: false
type: bool
state:
description:
- Create/delete/enable/disable an IP address on the network interface.
required: false
default: present
choices: [ 'absent', 'present', 'resetted' ]
'''
EXAMPLES = '''
# Limit SSH traffic to 100M via vnic0 interface
- flowadm:
link: vnic0
flow: ssh_out
transport: tcp
local_port: 22
maxbw: 100M
state: present
# Reset flow properties
- flowadm:
name: dns
state: resetted
# Configure policy for EF PHB (DSCP value of 101110 from RFC 2598) with a bandwidth of 500 Mbps and a high priority.
- flowadm:
link: bge0
dsfield: '0x2e:0xfc'
maxbw: 500M
priority: high
flow: efphb-flow
state: present
'''
RETURN = '''
name:
description: flow name
returned: always
type: string
sample: "http_drop"
link:
description: flow's link
returned: if link is defined
type: string
sample: "vnic0"
state:
description: state of the target
returned: always
type: string
sample: "present"
temporary:
description: flow's persistence
returned: always
type: boolean
sample: "True"
priority:
description: flow's priority
returned: if priority is defined
type: string
sample: "low"
transport:
description: flow's transport
returned: if transport is defined
type: string
sample: "tcp"
maxbw:
description: flow's maximum bandwidth
returned: if maxbw is defined
type: string
sample: "100M"
local_Ip:
description: flow's local IP address
returned: if local_ip is defined
type: string
sample: "10.0.0.42"
local_port:
description: flow's local port
returned: if local_port is defined
type: int
sample: 1337
remote_Ip:
description: flow's remote IP address
returned: if remote_ip is defined
type: string
sample: "10.0.0.42"
dsfield:
description: flow's differentiated services value
returned: if dsfield is defined
type: string
sample: "0x2e:0xfc"
'''
import socket
from ansible.module_utils.basic import AnsibleModule
SUPPORTED_TRANSPORTS = ['tcp', 'udp', 'sctp', 'icmp', 'icmpv6']
SUPPORTED_PRIORITIES = ['low', 'medium', 'high']
SUPPORTED_ATTRIBUTES = ['local_ip', 'remote_ip', 'transport', 'local_port', 'dsfield']
SUPPORTPED_PROPERTIES = ['maxbw', 'priority']
class Flow(object):
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.link = module.params['link']
self.local_ip = module.params['local_ip']
self.remote_ip = module.params['remote_ip']
self.transport = module.params['transport']
self.local_port = module.params['local_port']
self.dsfield = module.params['dsfield']
self.maxbw = module.params['maxbw']
self.priority = module.params['priority']
self.temporary = module.params['temporary']
self.state = module.params['state']
self._needs_updating = {
'maxbw': False,
'priority': False,
}
@classmethod
def is_valid_port(cls, port):
return 1 <= int(port) <= 65535
@classmethod
def is_valid_address(cls, ip):
if ip.count('/') == 1:
ip_address, netmask = ip.split('/')
else:
ip_address = ip
if len(ip_address.split('.')) == 4:
try:
socket.inet_pton(socket.AF_INET, ip_address)
except socket.error:
return False
if not 0 <= netmask <= 32:
return False
else:
try:
socket.inet_pton(socket.AF_INET6, ip_address)
except socket.error:
return False
if not 0 <= netmask <= 128:
return False
return True
@classmethod
def is_hex(cls, number):
try:
int(number, 16)
except ValueError:
return False
return True
@classmethod
def is_valid_dsfield(cls, dsfield):
dsmask = None
if dsfield.count(':') == 1:
dsval = dsfield.split(':')[0]
else:
dsval, dsmask = dsfield.split(':')
if dsmask and not 0x01 <= int(dsmask, 16) <= 0xff and not 0x01 <= int(dsval, 16) <= 0xff:
return False
elif not 0x01 <= int(dsval, 16) <= 0xff:
return False
return True
def flow_exists(self):
cmd = [self.module.get_bin_path('flowadm')]
cmd.append('show-flow')
cmd.append(self.name)
(rc, _, _) = self.module.run_command(cmd)
if rc == 0:
return True
else:
return False
def delete_flow(self):
cmd = [self.module.get_bin_path('flowadm')]
cmd.append('remove-flow')
if self.temporary:
cmd.append('-t')
cmd.append(self.name)
return self.module.run_command(cmd)
def create_flow(self):
cmd = [self.module.get_bin_path('flowadm')]
cmd.append('add-flow')
cmd.append('-l')
cmd.append(self.link)
if self.local_ip:
cmd.append('-a')
cmd.append('local_ip=' + self.local_ip)
if self.remote_ip:
cmd.append('-a')
cmd.append('remote_ip=' + self.remote_ip)
if self.transport:
cmd.append('-a')
cmd.append('transport=' + self.transport)
if self.local_port:
cmd.append('-a')
cmd.append('local_port=' + self.local_port)
if self.dsfield:
cmd.append('-a')
cmd.append('dsfield=' + self.dsfield)
if self.maxbw:
cmd.append('-p')
cmd.append('maxbw=' + self.maxbw)
if self.priority:
cmd.append('-p')
cmd.append('priority=' + self.priority)
if self.temporary:
cmd.append('-t')
cmd.append(self.name)
return self.module.run_command(cmd)
def _query_flow_props(self):
cmd = [self.module.get_bin_path('flowadm')]
cmd.append('show-flowprop')
cmd.append('-c')
cmd.append('-o')
cmd.append('property,possible')
cmd.append(self.name)
return self.module.run_command(cmd)
def flow_needs_udpating(self):
(rc, out, err) = self._query_flow_props()
NEEDS_UPDATING = False
if rc == 0:
properties = (line.split(':') for line in out.rstrip().split('\n'))
for prop, value in properties:
if prop == 'maxbw' and self.maxbw != value:
self._needs_updating.update({prop: True})
NEEDS_UPDATING = True
elif prop == 'priority' and self.priority != value:
self._needs_updating.update({prop: True})
NEEDS_UPDATING = True
return NEEDS_UPDATING
else:
self.module.fail_json(msg='Error while checking flow properties: %s' % err,
stderr=err,
rc=rc)
def update_flow(self):
cmd = [self.module.get_bin_path('flowadm')]
cmd.append('set-flowprop')
if self.maxbw and self._needs_updating['maxbw']:
cmd.append('-p')
cmd.append('maxbw=' + self.maxbw)
if self.priority and self._needs_updating['priority']:
cmd.append('-p')
cmd.append('priority=' + self.priority)
if self.temporary:
cmd.append('-t')
cmd.append(self.name)
return self.module.run_command(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, aliases=['flow']),
link=dict(required=False),
local_ip=dict(required=False),
remote_ip=dict(required=False),
transport=dict(required=False, choices=SUPPORTED_TRANSPORTS),
local_port=dict(required=False),
dsfield=dict(required=False),
maxbw=dict(required=False),
priority=dict(required=False,
default='medium',
choices=SUPPORTED_PRIORITIES),
temporary=dict(default=False, type='bool'),
state=dict(required=False,
default='present',
choices=['absent', 'present', 'resetted']),
),
mutually_exclusive=[
('local_ip', 'remote_ip'),
('local_ip', 'transport'),
('local_ip', 'local_port'),
('local_ip', 'dsfield'),
('remote_ip', 'transport'),
('remote_ip', 'local_port'),
('remote_ip', 'dsfield'),
('transport', 'dsfield'),
('local_port', 'dsfield'),
],
supports_check_mode=True
)
flow = Flow(module)
rc = None
out = ''
err = ''
result = {}
result['name'] = flow.name
result['state'] = flow.state
result['temporary'] = flow.temporary
if flow.link:
result['link'] = flow.link
if flow.maxbw:
result['maxbw'] = flow.maxbw
if flow.priority:
result['priority'] = flow.priority
if flow.local_ip:
if flow.is_valid_address(flow.local_ip):
result['local_ip'] = flow.local_ip
if flow.remote_ip:
if flow.is_valid_address(flow.remote_ip):
result['remote_ip'] = flow.remote_ip
if flow.transport:
result['transport'] = flow.transport
if flow.local_port:
if flow.is_valid_port(flow.local_port):
result['local_port'] = flow.local_port
else:
module.fail_json(msg='Invalid port: %s' % flow.local_port,
rc=1)
if flow.dsfield:
if flow.is_valid_dsfield(flow.dsfield):
result['dsfield'] = flow.dsfield
else:
module.fail_json(msg='Invalid dsfield: %s' % flow.dsfield,
rc=1)
if flow.state == 'absent':
if flow.flow_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = flow.delete_flow()
if rc != 0:
module.fail_json(msg='Error while deleting flow: "%s"' % err,
name=flow.name,
stderr=err,
rc=rc)
elif flow.state == 'present':
if not flow.flow_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = flow.create_flow()
if rc != 0:
module.fail_json(msg='Error while creating flow: "%s"' % err,
name=flow.name,
stderr=err,
rc=rc)
else:
if flow.flow_needs_udpating():
(rc, out, err) = flow.update_flow()
if rc != 0:
module.fail_json(msg='Error while updating flow: "%s"' % err,
name=flow.name,
stderr=err,
rc=rc)
elif flow.state == 'resetted':
if flow.flow_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = flow.reset_flow()
if rc != 0:
module.fail_json(msg='Error while resetting flow: "%s"' % err,
name=flow.name,
stderr=err,
rc=rc)
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
aleksandra-tarkowska/django | django/contrib/auth/forms.py | 1 | 15080 | from __future__ import unicode_literals
from collections import OrderedDict
from django import forms
from django.core.mail import EmailMultiAlternatives
from django.forms.utils import flatatt
from django.template import loader
from django.utils.encoding import force_bytes
from django.utils.html import format_html, format_html_join
from django.utils.http import urlsafe_base64_encode
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext, ugettext_lazy as _
from django.contrib.auth import authenticate, get_user_model
from django.contrib.auth.models import User
from django.contrib.auth.hashers import UNUSABLE_PASSWORD_PREFIX, identify_hasher
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
UNMASKED_DIGITS_TO_SHOW = 6
def mask_password(password):
shown = password[:UNMASKED_DIGITS_TO_SHOW]
masked = "*" * max(len(password) - UNMASKED_DIGITS_TO_SHOW, 0)
return shown + masked
class ReadOnlyPasswordHashWidget(forms.Widget):
def render(self, name, value, attrs):
encoded = value
final_attrs = self.build_attrs(attrs)
if not encoded or encoded.startswith(UNUSABLE_PASSWORD_PREFIX):
summary = mark_safe("<strong>%s</strong>" % ugettext("No password set."))
else:
try:
hasher = identify_hasher(encoded)
except ValueError:
summary = mark_safe("<strong>%s</strong>" % ugettext(
"Invalid password format or unknown hashing algorithm."))
else:
summary = format_html_join('',
"<strong>{0}</strong>: {1} ",
((ugettext(key), value)
for key, value in hasher.safe_summary(encoded).items())
)
return format_html("<div{0}>{1}</div>", flatatt(final_attrs), summary)
class ReadOnlyPasswordHashField(forms.Field):
widget = ReadOnlyPasswordHashWidget
def __init__(self, *args, **kwargs):
kwargs.setdefault("required", False)
super(ReadOnlyPasswordHashField, self).__init__(*args, **kwargs)
def bound_data(self, data, initial):
# Always return initial because the widget doesn't
# render an input field.
return initial
def _has_changed(self, initial, data):
return False
class UserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
"""
error_messages = {
'duplicate_username': _("A user with that username already exists."),
'password_mismatch': _("The two password fields didn't match."),
}
username = forms.RegexField(label=_("Username"), max_length=30,
regex=r'^[\w.@+-]+$',
help_text=_("Required. 30 characters or fewer. Letters, digits and "
"@/./+/-/_ only."),
error_messages={
'invalid': _("This value may contain only letters, numbers and "
"@/./+/-/_ characters.")})
password1 = forms.CharField(label=_("Password"),
widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password confirmation"),
widget=forms.PasswordInput,
help_text=_("Enter the same password as above, for verification."))
class Meta:
model = User
fields = ("username",)
def clean_username(self):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
username = self.cleaned_data["username"]
try:
User._default_manager.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(
self.error_messages['duplicate_username'],
code='duplicate_username',
)
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
return password2
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
username = forms.RegexField(
label=_("Username"), max_length=30, regex=r"^[\w.@+-]+$",
help_text=_("Required. 30 characters or fewer. Letters, digits and "
"@/./+/-/_ only."),
error_messages={
'invalid': _("This value may contain only letters, numbers and "
"@/./+/-/_ characters.")})
password = ReadOnlyPasswordHashField(label=_("Password"),
help_text=_("Raw passwords are not stored, so there is no way to see "
"this user's password, but you can change the password "
"using <a href=\"password/\">this form</a>."))
class Meta:
model = User
fields = '__all__'
def __init__(self, *args, **kwargs):
super(UserChangeForm, self).__init__(*args, **kwargs)
f = self.fields.get('user_permissions', None)
if f is not None:
f.queryset = f.queryset.select_related('content_type')
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
class AuthenticationForm(forms.Form):
"""
Base class for authenticating users. Extend this to get a form that accepts
username/password logins.
"""
username = forms.CharField(max_length=254)
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
error_messages = {
'invalid_login': _("Please enter a correct %(username)s and password. "
"Note that both fields may be case-sensitive."),
'inactive': _("This account is inactive."),
}
def __init__(self, request=None, *args, **kwargs):
"""
The 'request' parameter is set for custom auth use by subclasses.
The form data comes in via the standard 'data' kwarg.
"""
self.request = request
self.user_cache = None
super(AuthenticationForm, self).__init__(*args, **kwargs)
# Set the label for the "username" field.
UserModel = get_user_model()
self.username_field = UserModel._meta.get_field(UserModel.USERNAME_FIELD)
if self.fields['username'].label is None:
self.fields['username'].label = capfirst(self.username_field.verbose_name)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
self.user_cache = authenticate(username=username,
password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
params={'username': self.username_field.verbose_name},
)
else:
self.confirm_login_allowed(self.user_cache)
return self.cleaned_data
def confirm_login_allowed(self, user):
"""
Controls whether the given User may log in. This is a policy setting,
independent of end-user authentication. This default behavior is to
allow login by active users, and reject login by inactive users.
If the given user cannot log in, this method should raise a
``forms.ValidationError``.
If the given user may log in, this method should return None.
"""
if not user.is_active:
raise forms.ValidationError(
self.error_messages['inactive'],
code='inactive',
)
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
class PasswordResetForm(forms.Form):
email = forms.EmailField(label=_("Email"), max_length=254)
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email, html_email_template_name=None):
"""
Sends a django.core.mail.EmailMultiAlternatives to `to_email`.
"""
subject = loader.render_to_string(subject_template_name, context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
body = loader.render_to_string(email_template_name, context)
email_message = EmailMultiAlternatives(subject, body, from_email, [to_email])
if html_email_template_name is not None:
html_email = loader.render_to_string(html_email_template_name, context)
email_message.attach_alternative(html_email, 'text/html')
email_message.send()
def save(self, domain_override=None,
subject_template_name='registration/password_reset_subject.txt',
email_template_name='registration/password_reset_email.html',
use_https=False, token_generator=default_token_generator,
from_email=None, request=None, html_email_template_name=None):
"""
Generates a one-use only link for resetting password and sends to the
user.
"""
UserModel = get_user_model()
email = self.cleaned_data["email"]
active_users = UserModel._default_manager.filter(
email__iexact=email, is_active=True)
for user in active_users:
# Make sure that no email is sent to a user that actually has
# a password marked as unusable
if not user.has_usable_password():
continue
if not domain_override:
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
else:
site_name = domain = domain_override
context = {
'email': user.email,
'domain': domain,
'site_name': site_name,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'user': user,
'token': token_generator.make_token(user),
'protocol': 'https' if use_https else 'http',
}
self.send_mail(subject_template_name, email_template_name,
context, from_email, user.email,
html_email_template_name=html_email_template_name)
class SetPasswordForm(forms.Form):
"""
A form that lets a user change set his/her password without entering the
old password
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
new_password1 = forms.CharField(label=_("New password"),
widget=forms.PasswordInput)
new_password2 = forms.CharField(label=_("New password confirmation"),
widget=forms.PasswordInput)
def __init__(self, user, *args, **kwargs):
self.user = user
super(SetPasswordForm, self).__init__(*args, **kwargs)
def clean_new_password2(self):
password1 = self.cleaned_data.get('new_password1')
password2 = self.cleaned_data.get('new_password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['new_password1'])
if commit:
self.user.save()
return self.user
class PasswordChangeForm(SetPasswordForm):
"""
A form that lets a user change his/her password by entering
their old password.
"""
error_messages = dict(SetPasswordForm.error_messages, **{
'password_incorrect': _("Your old password was entered incorrectly. "
"Please enter it again."),
})
old_password = forms.CharField(label=_("Old password"),
widget=forms.PasswordInput)
def clean_old_password(self):
"""
Validates that the old_password field is correct.
"""
old_password = self.cleaned_data["old_password"]
if not self.user.check_password(old_password):
raise forms.ValidationError(
self.error_messages['password_incorrect'],
code='password_incorrect',
)
return old_password
PasswordChangeForm.base_fields = OrderedDict(
(k, PasswordChangeForm.base_fields[k])
for k in ['old_password', 'new_password1', 'new_password2']
)
class AdminPasswordChangeForm(forms.Form):
"""
A form used to change the password of a user in the admin interface.
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
required_css_class = 'required'
password1 = forms.CharField(label=_("Password"),
widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password (again)"),
widget=forms.PasswordInput)
def __init__(self, user, *args, **kwargs):
self.user = user
super(AdminPasswordChangeForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
return password2
def save(self, commit=True):
"""
Saves the new password.
"""
self.user.set_password(self.cleaned_data["password1"])
if commit:
self.user.save()
return self.user
def _get_changed_data(self):
data = super(AdminPasswordChangeForm, self).changed_data
for name in self.fields.keys():
if name not in data:
return []
return ['password']
changed_data = property(_get_changed_data)
| bsd-3-clause |
chidea/GoPythonDLLWrapper | bin/lib/test/test_modulefinder.py | 79 | 8949 | import os
import errno
import importlib.machinery
import py_compile
import shutil
import unittest
import tempfile
from test import support
import modulefinder
TEST_DIR = tempfile.mkdtemp()
TEST_PATH = [TEST_DIR, os.path.dirname(tempfile.__file__)]
# Each test description is a list of 5 items:
#
# 1. a module name that will be imported by modulefinder
# 2. a list of module names that modulefinder is required to find
# 3. a list of module names that modulefinder should complain
# about because they are not found
# 4. a list of module names that modulefinder should complain
# about because they MAY be not found
# 5. a string specifying packages to create; the format is obvious imo.
#
# Each package will be created in TEST_DIR, and TEST_DIR will be
# removed after the tests again.
# Modulefinder searches in a path that contains TEST_DIR, plus
# the standard Lib directory.
maybe_test = [
"a.module",
["a", "a.module", "sys",
"b"],
["c"], ["b.something"],
"""\
a/__init__.py
a/module.py
from b import something
from c import something
b/__init__.py
from sys import *
"""]
maybe_test_new = [
"a.module",
["a", "a.module", "sys",
"b", "__future__"],
["c"], ["b.something"],
"""\
a/__init__.py
a/module.py
from b import something
from c import something
b/__init__.py
from __future__ import absolute_import
from sys import *
"""]
package_test = [
"a.module",
["a", "a.b", "a.c", "a.module", "mymodule", "sys"],
["blahblah", "c"], [],
"""\
mymodule.py
a/__init__.py
import blahblah
from a import b
import c
a/module.py
import sys
from a import b as x
from a.c import sillyname
a/b.py
a/c.py
from a.module import x
import mymodule as sillyname
from sys import version_info
"""]
absolute_import_test = [
"a.module",
["a", "a.module",
"b", "b.x", "b.y", "b.z",
"__future__", "sys", "gc"],
["blahblah", "z"], [],
"""\
mymodule.py
a/__init__.py
a/module.py
from __future__ import absolute_import
import sys # sys
import blahblah # fails
import gc # gc
import b.x # b.x
from b import y # b.y
from b.z import * # b.z.*
a/gc.py
a/sys.py
import mymodule
a/b/__init__.py
a/b/x.py
a/b/y.py
a/b/z.py
b/__init__.py
import z
b/unused.py
b/x.py
b/y.py
b/z.py
"""]
relative_import_test = [
"a.module",
["__future__",
"a", "a.module",
"a.b", "a.b.y", "a.b.z",
"a.b.c", "a.b.c.moduleC",
"a.b.c.d", "a.b.c.e",
"a.b.x",
"gc"],
[], [],
"""\
mymodule.py
a/__init__.py
from .b import y, z # a.b.y, a.b.z
a/module.py
from __future__ import absolute_import # __future__
import gc # gc
a/gc.py
a/sys.py
a/b/__init__.py
from ..b import x # a.b.x
#from a.b.c import moduleC
from .c import moduleC # a.b.moduleC
a/b/x.py
a/b/y.py
a/b/z.py
a/b/g.py
a/b/c/__init__.py
from ..c import e # a.b.c.e
a/b/c/moduleC.py
from ..c import d # a.b.c.d
a/b/c/d.py
a/b/c/e.py
a/b/c/x.py
"""]
relative_import_test_2 = [
"a.module",
["a", "a.module",
"a.sys",
"a.b", "a.b.y", "a.b.z",
"a.b.c", "a.b.c.d",
"a.b.c.e",
"a.b.c.moduleC",
"a.b.c.f",
"a.b.x",
"a.another"],
[], [],
"""\
mymodule.py
a/__init__.py
from . import sys # a.sys
a/another.py
a/module.py
from .b import y, z # a.b.y, a.b.z
a/gc.py
a/sys.py
a/b/__init__.py
from .c import moduleC # a.b.c.moduleC
from .c import d # a.b.c.d
a/b/x.py
a/b/y.py
a/b/z.py
a/b/c/__init__.py
from . import e # a.b.c.e
a/b/c/moduleC.py
#
from . import f # a.b.c.f
from .. import x # a.b.x
from ... import another # a.another
a/b/c/d.py
a/b/c/e.py
a/b/c/f.py
"""]
relative_import_test_3 = [
"a.module",
["a", "a.module"],
["a.bar"],
[],
"""\
a/__init__.py
def foo(): pass
a/module.py
from . import foo
from . import bar
"""]
relative_import_test_4 = [
"a.module",
["a", "a.module"],
[],
[],
"""\
a/__init__.py
def foo(): pass
a/module.py
from . import *
"""]
bytecode_test = [
"a",
["a"],
[],
[],
""
]
def open_file(path):
dirname = os.path.dirname(path)
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
return open(path, "w")
def create_package(source):
ofi = None
try:
for line in source.splitlines():
if line.startswith(" ") or line.startswith("\t"):
ofi.write(line.strip() + "\n")
else:
if ofi:
ofi.close()
ofi = open_file(os.path.join(TEST_DIR, line.strip()))
finally:
if ofi:
ofi.close()
class ModuleFinderTest(unittest.TestCase):
def _do_test(self, info, report=False, debug=0, replace_paths=[]):
import_this, modules, missing, maybe_missing, source = info
create_package(source)
try:
mf = modulefinder.ModuleFinder(path=TEST_PATH, debug=debug,
replace_paths=replace_paths)
mf.import_hook(import_this)
if report:
mf.report()
## # This wouldn't work in general when executed several times:
## opath = sys.path[:]
## sys.path = TEST_PATH
## try:
## __import__(import_this)
## except:
## import traceback; traceback.print_exc()
## sys.path = opath
## return
modules = sorted(set(modules))
found = sorted(mf.modules)
# check if we found what we expected, not more, not less
self.assertEqual(found, modules)
# check for missing and maybe missing modules
bad, maybe = mf.any_missing_maybe()
self.assertEqual(bad, missing)
self.assertEqual(maybe, maybe_missing)
finally:
shutil.rmtree(TEST_DIR)
def test_package(self):
self._do_test(package_test)
def test_maybe(self):
self._do_test(maybe_test)
def test_maybe_new(self):
self._do_test(maybe_test_new)
def test_absolute_imports(self):
self._do_test(absolute_import_test)
def test_relative_imports(self):
self._do_test(relative_import_test)
def test_relative_imports_2(self):
self._do_test(relative_import_test_2)
def test_relative_imports_3(self):
self._do_test(relative_import_test_3)
def test_relative_imports_4(self):
self._do_test(relative_import_test_4)
def test_bytecode(self):
base_path = os.path.join(TEST_DIR, 'a')
source_path = base_path + importlib.machinery.SOURCE_SUFFIXES[0]
bytecode_path = base_path + importlib.machinery.BYTECODE_SUFFIXES[0]
with open_file(source_path) as file:
file.write('testing_modulefinder = True\n')
py_compile.compile(source_path, cfile=bytecode_path)
os.remove(source_path)
self._do_test(bytecode_test)
def test_replace_paths(self):
old_path = os.path.join(TEST_DIR, 'a', 'module.py')
new_path = os.path.join(TEST_DIR, 'a', 'spam.py')
with support.captured_stdout() as output:
self._do_test(maybe_test, debug=2,
replace_paths=[(old_path, new_path)])
output = output.getvalue()
expected = "co_filename %r changed to %r" % (old_path, new_path)
self.assertIn(expected, output)
if __name__ == "__main__":
unittest.main()
| mit |
nelson-liu/scikit-learn | examples/plot_multilabel.py | 236 | 4157 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
| bsd-3-clause |
ClearCorp-dev/odoo-clearcorp | TODO-7.0/account_multicompany_relation/account_multicompany_relation.py | 4 | 15994 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Author: Mag Guevara. Copyright ClearCorp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import orm, osv, fields
from copy import copy
from tools.translate import _
class AccountMulticompanyRelation(orm.Model):
_name = "account.multicompany.relation"
_description = "Account multicompany relation"
_columns = {
'name':fields.char('Name',size=64,required=True,help='Name for the mirror move relation'),
'origin_account':fields.many2one('account.account','Origin Account',required=True,help='Indicate the origin move line account where the transaction is taking place.'),
'targ_account':fields.many2one('account.account','Target Account',required=True,help='Indicate the target move line account that the mirror move will affect, this account can be in another company.'),
'origin_journal':fields.many2one('account.journal','Original Journal',required=True,help='Indicate the origin journal where the transaction is taking place.'),
'targ_journal':fields.many2one('account.journal','Target Journal',required=True,help='Indicate the target journal where the mirror move will be created, this journal can be in another company.'),
'origin_analytic_account':fields.many2one('account.analytic.account','Origin Analytic Account',required=False,help='Indicate the origin analytic account where the transaction is taking place. Optional.'),
'targ_analytic_account':fields.many2one('account.analytic.account','Target Analytic Account',required=False,help='Indicate the target analytic account that the mirror move line will have, this analytic account can be in another company.'),
'mirror_move_prefix':fields.char('Move prefix',size=32,required=True,help='Prefix for the mirror move name.'),
'inverse_debit_credit':fields.boolean('Inverse debit/credit',help='If set, the debit/credit from the origin move line will be inverted in the target move line. For example, a debit line affecting the origin account in the origin move, will result in a target move with a credit line affecting the target account on the target move.'),
'notes':fields.text('Notes'),
}
_sql_constraints = [
(
'unique_name',
'unique(name)',
'The name must be unique'
),
]
def _check_unique_mirror_relation(self, cr, uid, ids, context=None):
for relation in self.browse(cr, uid, ids, context=context):
relation_ids = self.search(cr, uid, [('origin_account','=',relation.origin_account.id),
('origin_journal','=',relation.origin_journal.id),
('origin_analytic_account','=',relation.origin_analytic_account.id)], context=context)
if len(relation_ids) >= 2:
return False
elif len(relation_ids) == 1 and not relation_ids[0] == relation.id:
return False
return True
_constraints = [
(
_check_unique_mirror_relation,
'The same relation already exists', ['origin_account','origin_journal','origin_analytic_account']
)
]
class AccountMoveLine(orm.Model):
_inherit = 'account.move.line'
_columns = {
'move_mirror_rel_id':fields.many2one('account.move','Move Multicompany Relation'),
}
def copy(self, cr, uid, id, default={}, context=None):
default.update({
'move_mirror_rel_id':False,
})
class AccountMove(orm.Model):
_inherit = 'account.move'
def button_cancel(self, cr, uid, ids, context=None):
self.pool.get('account.move.reconcile')
for move in self.browse(cr, uid, ids, context=context):
if not move.journal_id.update_posted:
raise osv.except_osv(_('Error !'), _('You can not modify a posted entry of this journal !\nYou should set the journal to allow cancelling entries if you want to do that.'))
#Set user administrator to run this portion of code
uid = 1
for line in move.line_id:
if line.move_mirror_rel_id:
move_mirror = self.browse(cr, uid, line.move_mirror_rel_id.id, context=context)
if not move_mirror.journal_id.update_posted:
raise osv.except_osv(_('Error !'), _('You can not modify a posted multicompany mirror entry of this journal !\nYou should set the journal to allow cancelling entries if you want to do that.'))
move_reconcile_obj = self.pool.get('account.move.reconcile')
for line in move.line_id:
if line.move_mirror_rel_id:
move_mirror = self.browse(cr, uid, line.move_mirror_rel_id.id, context=context)
for line_mirror in move_mirror.line_id:
if line_mirror.reconcile_id:
reconcile = line_mirror.reconcile_id
if len(reconcile.line_id) > 2:
self.pool.get('account.move.line').write(cr,uid,reconcile.line_id,{'reconcile_id': False, 'reconcile_partial_id':reconcile.id})
self.pool.get('account.move.line').write(cr,uid,line_mirror.id,{'reconcile_partial_id': False})
else:
move_reconcile_obj.unlink(cr,uid,[reconcile.id],context=context)
elif line_mirror.reconcile_partial_id:
reconcile = line_mirror.reconcile_partial_id
if len(reconcile.line_partial_ids) > 2:
self.pool.get('account.move.line').write(cr,uid,line_mirror.id,{'reconcile_partial_id': False })
else:
move_reconcile_obj.unlink(cr,uid,[reconcile.id],context=context)
cr.execute('UPDATE account_move '\
'SET state=%s '\
'WHERE id IN %s', ('draft', tuple([move_mirror.id]),))
self.button_cancel(cr,uid,[move_mirror.id],context=context)
self.unlink(cr,uid,[move_mirror.id],context=context)
result = super(AccountMove, self).button_cancel(cr, uid, ids, context=context)
return True
def post(self, cr, uid, ids, context=None):
result = super(AccountMove, self).post(cr, uid, ids, context=context)
for move_id_original in ids:
account_move_obj = self.pool.get('account.move')
account_move_line_obj = self.pool.get('account.move.line')
account_multicompany_relation_obj = self.pool.get('account.multicompany.relation')
#Continue if this is a reversion move
if account_move_obj.search(cr, 1, [('move_reverse_id', '=', move_id_original)], context=context):
continue
original_move = account_move_obj.browse(cr, 1, move_id_original, context=context)
if original_move.line_id:
mirror_selected = False
for line in original_move.line_id:
#Test if the line already has a mirror move associated
if line.move_mirror_rel_id:
#Reverse the mirror move if the original move is reversed
if original_move.move_reverse_id:
account_move_obj.reverse(cr, 1, [line.move_mirror_rel_id.id], context={})
continue
#Get parent accounts for line account
parent_account_ids = []
parent_account = line.account_id
while parent_account:
parent_account_ids.append(parent_account.id)
parent_account = parent_account.parent_id
analytic_account_id = line.analytic_account_id and line.analytic_account_id.id or False
mirror_selected_list_ids = account_multicompany_relation_obj.search(cr, 1, [('origin_account', 'in', parent_account_ids), ('origin_journal', '=', line.journal_id.id), ('origin_analytic_account', '=', analytic_account_id)], context=context)
move_id = False
if len(mirror_selected_list_ids) > 0:
mirror_selected_list = account_multicompany_relation_obj.browse(cr, 1, mirror_selected_list_ids, context=context)
mirror_selected = False
if len(mirror_selected_list) == 1:
mirror_selected = mirror_selected_list[0]
else:
mirror_index = -1
for mirror in mirror_selected_list:
if mirror_index < 0 or parent_account_ids.index(mirror.origin_account.id) < mirror_index:
mirror_index = parent_account_ids.index(mirror.origin_account.id)
mirror_selected = mirror
if mirror_selected:
origin_journal = mirror_selected.origin_journal
origin_account = mirror_selected.origin_account
targ_journal = mirror_selected.targ_journal
targ_account = mirror_selected.targ_account
inverse_debit_credit = mirror_selected.inverse_debit_credit
mirror_move_prefix = mirror_selected.mirror_move_prefix
else:
continue
#Set period for target move with the correct company
if context == None:
context_copy = {'company_id': targ_account.company_id.id}
else:
context_copy = copy(context)
context_copy.update({'company_id': targ_account.company_id.id})
periods = self.pool.get('account.period').find(cr, 1, dt=original_move.date, context=context_copy)
if periods:
move_period = periods[0]
move = {
'name':mirror_move_prefix + original_move.name,
'ref':original_move.ref,
'journal_id':targ_journal.id,
'period_id':move_period or False,
'to_check':False,
'partner_id':original_move.partner_id.id,
'date':original_move.date,
'narration':original_move.narration,
'company_id':targ_account.company_id.id,
}
move_id = account_move_obj.create(cr, 1, move)
self.pool.get('account.move.line').write(cr, 1, [line.id], {'move_mirror_rel_id' : move_id})
analytic_account_id = ''
if line.analytic_account_id and line.analytic_account_id == mirror_selected.origin_analytic_account:
analytic_account_id = mirror_selected.targ_analytic_account.id
if inverse_debit_credit:
line_debit = line.credit
line_credit = line.debit
else:
line_debit = line.debit
line_credit = line.credit
move_line_one = {
'name':line.name,
'debit':line_debit,
'credit':line_credit,
'account_id':targ_account.id,
'move_id': move_id,
'amount_currency':line.amount_currency * -1,
'period_id':move_period or False,
'journal_id':targ_journal.id,
'partner_id':line.partner_id.id,
'currency_id':line.currency_id.id,
'date_maturity':line.date_maturity,
'date':line.date,
'date_created':line.date_created,
'state':'valid',
'analytic_account_id':analytic_account_id,
'company_id':targ_account.company_id.id,
}
account_move_line_obj.create(cr, 1, move_line_one)
if line.debit != 0.0:
move_line_two_account_id = targ_journal.default_credit_account_id
else:
move_line_two_account_id = targ_journal.default_debit_account_id
move_line_two = {
'name':line.name,
'debit':line_credit,
'credit':line_debit,
'account_id':move_line_two_account_id.id,
'move_id': move_id,
'amount_currency':line.amount_currency,
'journal_id':targ_journal.id,
'period_id':move_period or False,
'partner_id':line.partner_id.id,
'currency_id':line.currency_id.id,
'date_maturity':line.date_maturity,
'date':line.date,
'date_created':line.date_created,
'state':'valid',
'analytic_account_id':False,
'company_id':targ_account.company_id.id,
}
account_move_line_obj.create(cr, 1, move_line_two)
#Posted mirror
account_move_obj.post(cr, 1, [move_id], context={})
if move_id and original_move.move_reverse_id:
account_move_obj.reverse(cr, 1, [move_id], context={})
return result
def unlink(self, cr, uid, ids, context=None, check=True):
for move in self.browse(cr, 1, ids, context=context):
for line in move.line_id:
if line.move_mirror_rel_id:
self.pool.get('account.move').button_cancel(cr, 1, [line.move_mirror_rel_id.id])
result = super(AccountMove, self).unlink(cr, 1, [line.move_mirror_rel_id.id], context=context, check=check)
result = super(AccountMove, self).unlink(cr, 1, ids, context=context, check=check)
return result
| agpl-3.0 |
takeshineshiro/cinder | cinder/tests/unit/volume/drivers/netapp/dataontap/client/fakes.py | 6 | 3978 | # Copyright (c) - 2015, Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
GET_OPERATIONAL_NETWORK_INTERFACE_ADDRESSES_RESPONSE = etree.XML("""
<results status="passed">
<num-records>2</num-records>
<attributes-list>
<net-interface-info>
<address>%(address1)s</address>
</net-interface-info>
<net-interface-info>
<address>%(address2)s</address>
</net-interface-info>
</attributes-list>
</results>
""" % {"address1": "1.2.3.4", "address2": "99.98.97.96"})
VOLUME_LIST_INFO_RESPONSE = etree.XML("""
<results status="passed">
<volumes>
<volume-info>
<name>vol0</name>
<block-type>64_bit</block-type>
<state>online</state>
<size-total>1441193750528</size-total>
<size-used>3161096192</size-used>
<size-available>1438032654336</size-available>
<percentage-used>0</percentage-used>
<owning-vfiler>vfiler0</owning-vfiler>
<containing-aggregate>aggr0</containing-aggregate>
<space-reserve>volume</space-reserve>
<space-reserve-enabled>true</space-reserve-enabled>
<is-inconsistent>false</is-inconsistent>
<is-unrecoverable>false</is-unrecoverable>
<is-invalid>false</is-invalid>
</volume-info>
<volume-info>
<name>vol1</name>
<block-type>64_bit</block-type>
<state>online</state>
<size-total>1441193750528</size-total>
<size-used>3161096192</size-used>
<size-available>1438032654336</size-available>
<percentage-used>0</percentage-used>
<owning-vfiler>vfiler0</owning-vfiler>
<containing-aggregate>aggr0</containing-aggregate>
<space-reserve>volume</space-reserve>
<space-reserve-enabled>true</space-reserve-enabled>
<is-inconsistent>false</is-inconsistent>
<is-unrecoverable>false</is-unrecoverable>
<is-invalid>false</is-invalid>
</volume-info>
<volume-info>
<name>vol2</name>
<block-type>64_bit</block-type>
<state>offline</state>
<size-total>1441193750528</size-total>
<size-used>3161096192</size-used>
<size-available>1438032654336</size-available>
<percentage-used>0</percentage-used>
<owning-vfiler>vfiler0</owning-vfiler>
<containing-aggregate>aggr0</containing-aggregate>
<space-reserve>volume</space-reserve>
<space-reserve-enabled>true</space-reserve-enabled>
<is-inconsistent>false</is-inconsistent>
<is-unrecoverable>false</is-unrecoverable>
<is-invalid>false</is-invalid>
</volume-info>
<volume-info>
<name>vol3</name>
<block-type>64_bit</block-type>
<state>online</state>
<size-total>1441193750528</size-total>
<size-used>3161096192</size-used>
<size-available>1438032654336</size-available>
<percentage-used>0</percentage-used>
<owning-vfiler>vfiler0</owning-vfiler>
<containing-aggregate>aggr0</containing-aggregate>
<space-reserve>volume</space-reserve>
<space-reserve-enabled>true</space-reserve-enabled>
<is-inconsistent>false</is-inconsistent>
<is-unrecoverable>false</is-unrecoverable>
<is-invalid>false</is-invalid>
</volume-info>
</volumes>
</results>
""")
| apache-2.0 |
siripuramrk/namebench | tools/add_linkcount_and_version_to_csv.py | 174 | 1351 | #!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Add link count and version to csv"""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import csv
import check_nameserver_popularity
import sys
reader = csv.reader(open(sys.argv[1]))
writer = csv.writer(open('output.csv', 'w'))
sys.path.append('..')
#sys.path.append('/Users/tstromberg/namebench')
import third_party
from libnamebench import addr_util
from libnamebench import nameserver
for row in reader:
ip = row[0]
ns = nameserver.NameServer(ip)
ns.timeout = 0.5
ns.health_timeout = 0.5
try:
link_count = len(check_nameserver_popularity.GetUrls(ip))
except:
link_count = ''
row.insert(-1, link_count)
row.append(ns.version or '')
print "%s: %s" % (ip, ns.version)
writer.writerow(row)
| apache-2.0 |
googleapis/python-tpu | google/cloud/tpu_v1/types/cloud_tpu.py | 1 | 18294 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.tpu.v1",
manifest={
"SchedulingConfig",
"NetworkEndpoint",
"Node",
"ListNodesRequest",
"ListNodesResponse",
"GetNodeRequest",
"CreateNodeRequest",
"DeleteNodeRequest",
"ReimageNodeRequest",
"StopNodeRequest",
"StartNodeRequest",
"TensorFlowVersion",
"GetTensorFlowVersionRequest",
"ListTensorFlowVersionsRequest",
"ListTensorFlowVersionsResponse",
"AcceleratorType",
"GetAcceleratorTypeRequest",
"ListAcceleratorTypesRequest",
"ListAcceleratorTypesResponse",
"OperationMetadata",
"Symptom",
},
)
class SchedulingConfig(proto.Message):
r"""Sets the scheduling options for this node.
Attributes:
preemptible (bool):
Defines whether the node is preemptible.
reserved (bool):
Whether the node is created under a
reservation.
"""
preemptible = proto.Field(proto.BOOL, number=1,)
reserved = proto.Field(proto.BOOL, number=2,)
class NetworkEndpoint(proto.Message):
r"""A network endpoint over which a TPU worker can be reached.
Attributes:
ip_address (str):
The IP address of this network endpoint.
port (int):
The port of this network endpoint.
"""
ip_address = proto.Field(proto.STRING, number=1,)
port = proto.Field(proto.INT32, number=2,)
class Node(proto.Message):
r"""A TPU instance.
Attributes:
name (str):
Output only. Immutable. The name of the TPU
description (str):
The user-supplied description of the TPU.
Maximum of 512 characters.
accelerator_type (str):
Required. The type of hardware accelerators
associated with this node.
ip_address (str):
Output only. DEPRECATED! Use network_endpoints instead. The
network address for the TPU Node as visible to Compute
Engine instances.
port (str):
Output only. DEPRECATED! Use network_endpoints instead. The
network port for the TPU Node as visible to Compute Engine
instances.
state (google.cloud.tpu_v1.types.Node.State):
Output only. The current state for the TPU
Node.
health_description (str):
Output only. If this field is populated, it
contains a description of why the TPU Node is
unhealthy.
tensorflow_version (str):
Required. The version of Tensorflow running
in the Node.
network (str):
The name of a network they wish to peer the
TPU node to. It must be a preexisting Compute
Engine network inside of the project on which
this API has been activated. If none is
provided, "default" will be used.
cidr_block (str):
The CIDR block that the TPU node will use
when selecting an IP address. This CIDR block
must be a /29 block; the Compute Engine networks
API forbids a smaller block, and using a larger
block would be wasteful (a node can only consume
one IP address). Errors will occur if the CIDR
block has already been used for a currently
existing TPU node, the CIDR block conflicts with
any subnetworks in the user's provided network,
or the provided network is peered with another
network that is using that CIDR block.
service_account (str):
Output only. The service account used to run
the tensor flow services within the node. To
share resources, including Google Cloud Storage
data, with the Tensorflow job running in the
Node, this account must have permissions to that
data.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time when the node was
created.
scheduling_config (google.cloud.tpu_v1.types.SchedulingConfig):
The scheduling options for this node.
network_endpoints (Sequence[google.cloud.tpu_v1.types.NetworkEndpoint]):
Output only. The network endpoints where TPU
workers can be accessed and sent work. It is
recommended that Tensorflow clients of the node
reach out to the 0th entry in this map first.
health (google.cloud.tpu_v1.types.Node.Health):
The health status of the TPU node.
labels (Sequence[google.cloud.tpu_v1.types.Node.LabelsEntry]):
Resource labels to represent user-provided
metadata.
use_service_networking (bool):
Whether the VPC peering for the node is set up through
Service Networking API. The VPC Peering should be set up
before provisioning the node. If this field is set,
cidr_block field should not be specified. If the network,
that you want to peer the TPU Node to, is Shared VPC
networks, the node must be created with this this field
enabled.
api_version (google.cloud.tpu_v1.types.Node.ApiVersion):
Output only. The API version that created
this Node.
symptoms (Sequence[google.cloud.tpu_v1.types.Symptom]):
Output only. The Symptoms that have occurred
to the TPU Node.
"""
class State(proto.Enum):
r"""Represents the different states of a TPU node during its
lifecycle.
"""
STATE_UNSPECIFIED = 0
CREATING = 1
READY = 2
RESTARTING = 3
REIMAGING = 4
DELETING = 5
REPAIRING = 6
STOPPED = 8
STOPPING = 9
STARTING = 10
PREEMPTED = 11
TERMINATED = 12
HIDING = 13
HIDDEN = 14
UNHIDING = 15
class Health(proto.Enum):
r"""Health defines the status of a TPU node as reported by
Health Monitor.
"""
HEALTH_UNSPECIFIED = 0
HEALTHY = 1
DEPRECATED_UNHEALTHY = 2
TIMEOUT = 3
UNHEALTHY_TENSORFLOW = 4
UNHEALTHY_MAINTENANCE = 5
class ApiVersion(proto.Enum):
r"""TPU API Version."""
API_VERSION_UNSPECIFIED = 0
V1_ALPHA1 = 1
V1 = 2
V2_ALPHA1 = 3
name = proto.Field(proto.STRING, number=1,)
description = proto.Field(proto.STRING, number=3,)
accelerator_type = proto.Field(proto.STRING, number=5,)
ip_address = proto.Field(proto.STRING, number=8,)
port = proto.Field(proto.STRING, number=14,)
state = proto.Field(proto.ENUM, number=9, enum=State,)
health_description = proto.Field(proto.STRING, number=10,)
tensorflow_version = proto.Field(proto.STRING, number=11,)
network = proto.Field(proto.STRING, number=12,)
cidr_block = proto.Field(proto.STRING, number=13,)
service_account = proto.Field(proto.STRING, number=15,)
create_time = proto.Field(
proto.MESSAGE, number=16, message=timestamp_pb2.Timestamp,
)
scheduling_config = proto.Field(
proto.MESSAGE, number=17, message="SchedulingConfig",
)
network_endpoints = proto.RepeatedField(
proto.MESSAGE, number=21, message="NetworkEndpoint",
)
health = proto.Field(proto.ENUM, number=22, enum=Health,)
labels = proto.MapField(proto.STRING, proto.STRING, number=24,)
use_service_networking = proto.Field(proto.BOOL, number=27,)
api_version = proto.Field(proto.ENUM, number=38, enum=ApiVersion,)
symptoms = proto.RepeatedField(proto.MESSAGE, number=39, message="Symptom",)
class ListNodesRequest(proto.Message):
r"""Request for [ListNodes][google.cloud.tpu.v1.Tpu.ListNodes].
Attributes:
parent (str):
Required. The parent resource name.
page_size (int):
The maximum number of items to return.
page_token (str):
The next_page_token value returned from a previous List
request, if any.
"""
parent = proto.Field(proto.STRING, number=1,)
page_size = proto.Field(proto.INT32, number=2,)
page_token = proto.Field(proto.STRING, number=3,)
class ListNodesResponse(proto.Message):
r"""Response for [ListNodes][google.cloud.tpu.v1.Tpu.ListNodes].
Attributes:
nodes (Sequence[google.cloud.tpu_v1.types.Node]):
The listed nodes.
next_page_token (str):
The next page token or empty if none.
unreachable (Sequence[str]):
Locations that could not be reached.
"""
@property
def raw_page(self):
return self
nodes = proto.RepeatedField(proto.MESSAGE, number=1, message="Node",)
next_page_token = proto.Field(proto.STRING, number=2,)
unreachable = proto.RepeatedField(proto.STRING, number=3,)
class GetNodeRequest(proto.Message):
r"""Request for [GetNode][google.cloud.tpu.v1.Tpu.GetNode].
Attributes:
name (str):
Required. The resource name.
"""
name = proto.Field(proto.STRING, number=1,)
class CreateNodeRequest(proto.Message):
r"""Request for [CreateNode][google.cloud.tpu.v1.Tpu.CreateNode].
Attributes:
parent (str):
Required. The parent resource name.
node_id (str):
The unqualified resource name.
node (google.cloud.tpu_v1.types.Node):
Required. The node.
"""
parent = proto.Field(proto.STRING, number=1,)
node_id = proto.Field(proto.STRING, number=2,)
node = proto.Field(proto.MESSAGE, number=3, message="Node",)
class DeleteNodeRequest(proto.Message):
r"""Request for [DeleteNode][google.cloud.tpu.v1.Tpu.DeleteNode].
Attributes:
name (str):
Required. The resource name.
"""
name = proto.Field(proto.STRING, number=1,)
class ReimageNodeRequest(proto.Message):
r"""Request for [ReimageNode][google.cloud.tpu.v1.Tpu.ReimageNode].
Attributes:
name (str):
The resource name.
tensorflow_version (str):
The version for reimage to create.
"""
name = proto.Field(proto.STRING, number=1,)
tensorflow_version = proto.Field(proto.STRING, number=2,)
class StopNodeRequest(proto.Message):
r"""Request for [StopNode][google.cloud.tpu.v1.Tpu.StopNode].
Attributes:
name (str):
The resource name.
"""
name = proto.Field(proto.STRING, number=1,)
class StartNodeRequest(proto.Message):
r"""Request for [StartNode][google.cloud.tpu.v1.Tpu.StartNode].
Attributes:
name (str):
The resource name.
"""
name = proto.Field(proto.STRING, number=1,)
class TensorFlowVersion(proto.Message):
r"""A tensorflow version that a Node can be configured with.
Attributes:
name (str):
The resource name.
version (str):
the tensorflow version.
"""
name = proto.Field(proto.STRING, number=1,)
version = proto.Field(proto.STRING, number=2,)
class GetTensorFlowVersionRequest(proto.Message):
r"""Request for
[GetTensorFlowVersion][google.cloud.tpu.v1.Tpu.GetTensorFlowVersion].
Attributes:
name (str):
Required. The resource name.
"""
name = proto.Field(proto.STRING, number=1,)
class ListTensorFlowVersionsRequest(proto.Message):
r"""Request for
[ListTensorFlowVersions][google.cloud.tpu.v1.Tpu.ListTensorFlowVersions].
Attributes:
parent (str):
Required. The parent resource name.
page_size (int):
The maximum number of items to return.
page_token (str):
The next_page_token value returned from a previous List
request, if any.
filter (str):
List filter.
order_by (str):
Sort results.
"""
parent = proto.Field(proto.STRING, number=1,)
page_size = proto.Field(proto.INT32, number=2,)
page_token = proto.Field(proto.STRING, number=3,)
filter = proto.Field(proto.STRING, number=5,)
order_by = proto.Field(proto.STRING, number=6,)
class ListTensorFlowVersionsResponse(proto.Message):
r"""Response for
[ListTensorFlowVersions][google.cloud.tpu.v1.Tpu.ListTensorFlowVersions].
Attributes:
tensorflow_versions (Sequence[google.cloud.tpu_v1.types.TensorFlowVersion]):
The listed nodes.
next_page_token (str):
The next page token or empty if none.
unreachable (Sequence[str]):
Locations that could not be reached.
"""
@property
def raw_page(self):
return self
tensorflow_versions = proto.RepeatedField(
proto.MESSAGE, number=1, message="TensorFlowVersion",
)
next_page_token = proto.Field(proto.STRING, number=2,)
unreachable = proto.RepeatedField(proto.STRING, number=3,)
class AcceleratorType(proto.Message):
r"""A accelerator type that a Node can be configured with.
Attributes:
name (str):
The resource name.
type_ (str):
the accelerator type.
"""
name = proto.Field(proto.STRING, number=1,)
type_ = proto.Field(proto.STRING, number=2,)
class GetAcceleratorTypeRequest(proto.Message):
r"""Request for
[GetAcceleratorType][google.cloud.tpu.v1.Tpu.GetAcceleratorType].
Attributes:
name (str):
Required. The resource name.
"""
name = proto.Field(proto.STRING, number=1,)
class ListAcceleratorTypesRequest(proto.Message):
r"""Request for
[ListAcceleratorTypes][google.cloud.tpu.v1.Tpu.ListAcceleratorTypes].
Attributes:
parent (str):
Required. The parent resource name.
page_size (int):
The maximum number of items to return.
page_token (str):
The next_page_token value returned from a previous List
request, if any.
filter (str):
List filter.
order_by (str):
Sort results.
"""
parent = proto.Field(proto.STRING, number=1,)
page_size = proto.Field(proto.INT32, number=2,)
page_token = proto.Field(proto.STRING, number=3,)
filter = proto.Field(proto.STRING, number=5,)
order_by = proto.Field(proto.STRING, number=6,)
class ListAcceleratorTypesResponse(proto.Message):
r"""Response for
[ListAcceleratorTypes][google.cloud.tpu.v1.Tpu.ListAcceleratorTypes].
Attributes:
accelerator_types (Sequence[google.cloud.tpu_v1.types.AcceleratorType]):
The listed nodes.
next_page_token (str):
The next page token or empty if none.
unreachable (Sequence[str]):
Locations that could not be reached.
"""
@property
def raw_page(self):
return self
accelerator_types = proto.RepeatedField(
proto.MESSAGE, number=1, message="AcceleratorType",
)
next_page_token = proto.Field(proto.STRING, number=2,)
unreachable = proto.RepeatedField(proto.STRING, number=3,)
class OperationMetadata(proto.Message):
r"""Metadata describing an [Operation][google.longrunning.Operation]
Attributes:
create_time (google.protobuf.timestamp_pb2.Timestamp):
The time the operation was created.
end_time (google.protobuf.timestamp_pb2.Timestamp):
The time the operation finished running.
target (str):
Target of the operation - for example
projects/project-1/connectivityTests/test-1
verb (str):
Name of the verb executed by the operation.
status_detail (str):
Human-readable status of the operation, if
any.
cancel_requested (bool):
Specifies if cancellation was requested for
the operation.
api_version (str):
API version.
"""
create_time = proto.Field(proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp,)
end_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,)
target = proto.Field(proto.STRING, number=3,)
verb = proto.Field(proto.STRING, number=4,)
status_detail = proto.Field(proto.STRING, number=5,)
cancel_requested = proto.Field(proto.BOOL, number=6,)
api_version = proto.Field(proto.STRING, number=7,)
class Symptom(proto.Message):
r"""A Symptom instance.
Attributes:
create_time (google.protobuf.timestamp_pb2.Timestamp):
Timestamp when the Symptom is created.
symptom_type (google.cloud.tpu_v1.types.Symptom.SymptomType):
Type of the Symptom.
details (str):
Detailed information of the current Symptom.
worker_id (str):
A string used to uniquely distinguish a
worker within a TPU node.
"""
class SymptomType(proto.Enum):
r"""SymptomType represents the different types of Symptoms that a
TPU can be at.
"""
SYMPTOM_TYPE_UNSPECIFIED = 0
LOW_MEMORY = 1
OUT_OF_MEMORY = 2
EXECUTE_TIMED_OUT = 3
MESH_BUILD_FAIL = 4
HBM_OUT_OF_MEMORY = 5
PROJECT_ABUSE = 6
create_time = proto.Field(proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp,)
symptom_type = proto.Field(proto.ENUM, number=2, enum=SymptomType,)
details = proto.Field(proto.STRING, number=3,)
worker_id = proto.Field(proto.STRING, number=4,)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 |
lnielsen/invenio-metrics | invenio_metrics/admin.py | 1 | 1755 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Admin interface to metrics."""
from __future__ import absolute_import, print_function
from flask_admin.contrib.sqla import ModelView
from .models import ResourceUsage
def _(x):
"""Identity function for string extraction."""
return x
class ResourceUsageModelView(ModelView):
"""Resource usage admin interface."""
can_create = False
can_edit = False
can_delete = False
column_list = (
'object_type', 'object_id', 'metric', 'value', 'modified',
)
column_filters = ['object_type', 'metric']
column_searchable_list = ['object_type', 'object_id', 'metric']
column_default_sort = ('modified', True)
resourceusage_adminview = dict(
modelview=ResourceUsageModelView,
model=ResourceUsage,
category=_('Metrics'))
| gpl-2.0 |
hmrs-cr/PyArcPics | main_flickruploader.py | 1 | 2463 | #!/usr/bin/python
# coding=UTF8
import argparse
import os
import sys
import utils
import flickruploader
def main():
def scan(fup):
print "Scanning folder", options.folder, "..."
pc, npc, pcs, npcs = fup.scan_directory(options.folder)
print pc, "total pictures found. (" + utils.sizeof_fmt(pcs) + ")"
print npc, "other files found. (" + utils.sizeof_fmt(npcs) + ")"
parser = argparse.ArgumentParser(description='Upload to Flickr all JPEG pictures in the given folder recursively')
parser.add_argument('folder', help='The folder to search for pictures')
parser.add_argument('-u', dest="user_name", help='Flickr user name', default="")
parser.add_argument('-s', dest='scan_only', action="store_true", help="Scan folder but don't upload pictures")
parser.add_argument('-n', dest='no_chk_remote_chksum', action="store_true", help="Do not check remote checksum")
parser.add_argument('-a', dest='auth_only', action="store_true", help="Authenticate to Flickr service")
options = parser.parse_args()
if not options.folder:
parser.print_help()
exit()
config_file_name = "~/.hmsoft/flickr.json"
api_keys = utils.get_api_keys_from_config(config_file_name)
api_key, api_secret = api_keys
if api_key is None or api_secret is None:
sys.stderr.write("Please add flickr API access keys to config file: " + config_file_name + "\n")
exit()
fup = flickruploader.FlickrUploader(api_key, api_secret, options.user_name)
if options.auth_only:
if fup.authenticate():
print "User " + str(fup.user_name) + " authenticated successfully"
else:
print "Authentication failed."
exit()
if options.scan_only:
scan(fup)
exit()
print "Authenticating..."
if not fup.authenticate():
sys.stderr.write("Flickr authentication error\n")
exit()
print "Starting upload as user " + str(fup.user_name)
options.folder = unicode(options.folder, "UTF-8")
if options.no_chk_remote_chksum:
fup.check_remote_chksum = False
if os.path.isfile(options.folder):
fup.upload_file(options.folder)
print "Done."
else:
scan(fup)
t = fup.upload_directory(options.folder)
print "Done in " + utils.format_time(t)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print "Terminated by user." | apache-2.0 |
zedr/django | django/contrib/staticfiles/utils.py | 114 | 1976 | import os
import fnmatch
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
def matches_patterns(path, patterns=None):
"""
Return True or False depending on whether the ``path`` should be
ignored (if it matches any pattern in ``ignore_patterns``).
"""
if patterns is None:
patterns = []
for pattern in patterns:
if fnmatch.fnmatchcase(path, pattern):
return True
return False
def get_files(storage, ignore_patterns=None, location=''):
"""
Recursively walk the storage directories yielding the paths
of all files that should be copied.
"""
if ignore_patterns is None:
ignore_patterns = []
directories, files = storage.listdir(location)
for fn in files:
if matches_patterns(fn, ignore_patterns):
continue
if location:
fn = os.path.join(location, fn)
yield fn
for dir in directories:
if matches_patterns(dir, ignore_patterns):
continue
if location:
dir = os.path.join(location, dir)
for fn in get_files(storage, ignore_patterns, dir):
yield fn
def check_settings(base_url=None):
"""
Checks if the staticfiles settings have sane values.
"""
if base_url is None:
base_url = settings.STATIC_URL
if not base_url:
raise ImproperlyConfigured(
"You're using the staticfiles app "
"without having set the required STATIC_URL setting.")
if settings.MEDIA_URL == base_url:
raise ImproperlyConfigured("The MEDIA_URL and STATIC_URL "
"settings must have different values")
if ((settings.MEDIA_ROOT and settings.STATIC_ROOT) and
(settings.MEDIA_ROOT == settings.STATIC_ROOT)):
raise ImproperlyConfigured("The MEDIA_ROOT and STATIC_ROOT "
"settings must have different values")
| bsd-3-clause |
EvanK/ansible | lib/ansible/modules/cloud/vmware/vmware_about_facts.py | 31 | 3659 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_about_facts
short_description: Provides information about VMware server to which user is connecting to
description:
- This module can be used to gather information about VMware server to which user is trying to connect.
version_added: 2.7
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Provide information about vCenter
vmware_about_facts:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
delegate_to: localhost
register: vcenter_about_info
- name: Provide information about a standalone ESXi server
vmware_about_facts:
hostname: '{{ esxi_hostname }}'
username: '{{ esxi_username }}'
password: '{{ esxi_password }}'
delegate_to: localhost
register: esxi_about_info
'''
RETURN = r'''
about_facts:
description:
- dict about VMware server
returned: success
type: str
sample:
{
"api_type": "VirtualCenter",
"api_version": "6.5",
"build": "5973321",
"instance_uuid": "dbed6e0c-bd88-4ef6-b594-21283e1c677f",
"license_product_name": "VMware VirtualCenter Server",
"license_product_version": "6.0",
"locale_build": "000",
"locale_version": "INTL",
"os_type": "darwin-amd64",
"product_full_name": "VMware vCenter Server 6.5.0 build-5973321",
"product_line_id": "vpx",
"product_name": "VMware vCenter Server (govmomi simulator)",
"vendor": "VMware, Inc.",
"version": "6.5.0"
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
class VmwareAboutManager(PyVmomi):
def __init__(self, module):
super(VmwareAboutManager, self).__init__(module)
def gather_about_facts(self):
if not self.content:
self.module.exit_json(changed=False, about_facts=dict())
about = self.content.about
self.module.exit_json(
changed=False,
about_facts=dict(
product_name=about.name,
product_full_name=about.fullName,
vendor=about.vendor,
version=about.version,
build=about.build,
locale_version=about.localeVersion,
locale_build=about.localeBuild,
os_type=about.osType,
product_line_id=about.productLineId,
api_type=about.apiType,
api_version=about.apiVersion,
instance_uuid=about.instanceUuid,
license_product_name=about.licenseProductName,
license_product_version=about.licenseProductVersion,
)
)
def main():
argument_spec = vmware_argument_spec()
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
vmware_about_facts_mgr = VmwareAboutManager(module)
vmware_about_facts_mgr.gather_about_facts()
if __name__ == "__main__":
main()
| gpl-3.0 |
henryfjordan/django | django/contrib/contenttypes/admin.py | 191 | 5385 | from __future__ import unicode_literals
from functools import partial
from django.contrib.admin.checks import InlineModelAdminChecks
from django.contrib.admin.options import InlineModelAdmin, flatten_fieldsets
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.forms import (
BaseGenericInlineFormSet, generic_inlineformset_factory,
)
from django.core import checks
from django.core.exceptions import FieldDoesNotExist
from django.forms import ALL_FIELDS
from django.forms.models import modelform_defines_fields
class GenericInlineModelAdminChecks(InlineModelAdminChecks):
def _check_exclude_of_parent_model(self, obj, parent_model):
# There's no FK to exclude, so no exclusion checks are required.
return []
def _check_relation(self, obj, parent_model):
# There's no FK, but we do need to confirm that the ct_field and ct_fk_field are valid,
# and that they are part of a GenericForeignKey.
gfks = [
f for f in obj.model._meta.virtual_fields
if isinstance(f, GenericForeignKey)
]
if len(gfks) == 0:
return [
checks.Error(
"'%s.%s' has no GenericForeignKey." % (
obj.model._meta.app_label, obj.model._meta.object_name
),
hint=None,
obj=obj.__class__,
id='admin.E301'
)
]
else:
# Check that the ct_field and ct_fk_fields exist
try:
obj.model._meta.get_field(obj.ct_field)
except FieldDoesNotExist:
return [
checks.Error(
"'ct_field' references '%s', which is not a field on '%s.%s'." % (
obj.ct_field, obj.model._meta.app_label, obj.model._meta.object_name
),
hint=None,
obj=obj.__class__,
id='admin.E302'
)
]
try:
obj.model._meta.get_field(obj.ct_fk_field)
except FieldDoesNotExist:
return [
checks.Error(
"'ct_fk_field' references '%s', which is not a field on '%s.%s'." % (
obj.ct_fk_field, obj.model._meta.app_label, obj.model._meta.object_name
),
hint=None,
obj=obj.__class__,
id='admin.E303'
)
]
# There's one or more GenericForeignKeys; make sure that one of them
# uses the right ct_field and ct_fk_field.
for gfk in gfks:
if gfk.ct_field == obj.ct_field and gfk.fk_field == obj.ct_fk_field:
return []
return [
checks.Error(
"'%s.%s' has no GenericForeignKey using content type field '%s' and object ID field '%s'." % (
obj.model._meta.app_label, obj.model._meta.object_name, obj.ct_field, obj.ct_fk_field
),
hint=None,
obj=obj.__class__,
id='admin.E304'
)
]
class GenericInlineModelAdmin(InlineModelAdmin):
ct_field = "content_type"
ct_fk_field = "object_id"
formset = BaseGenericInlineFormSet
checks_class = GenericInlineModelAdminChecks
def get_formset(self, request, obj=None, **kwargs):
if 'fields' in kwargs:
fields = kwargs.pop('fields')
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(self.get_readonly_fields(request, obj))
if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# GenericInlineModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
exclude = exclude or None
can_delete = self.can_delete and self.has_delete_permission(request, obj)
defaults = {
"ct_field": self.ct_field,
"fk_field": self.ct_fk_field,
"form": self.form,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
"formset": self.formset,
"extra": self.get_extra(request, obj),
"can_delete": can_delete,
"can_order": False,
"fields": fields,
"min_num": self.get_min_num(request, obj),
"max_num": self.get_max_num(request, obj),
"exclude": exclude
}
defaults.update(kwargs)
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = ALL_FIELDS
return generic_inlineformset_factory(self.model, **defaults)
class GenericStackedInline(GenericInlineModelAdmin):
template = 'admin/edit_inline/stacked.html'
class GenericTabularInline(GenericInlineModelAdmin):
template = 'admin/edit_inline/tabular.html'
| bsd-3-clause |
charmoniumQ/Hallways | python_client/hallways/gui/main.py | 2 | 1792 | from __future__ import print_function
import sys
import numpy as np
from PyQt5.QtWidgets import QApplication
from .skeleton import Skeleton
from ..wifi import WiFiScanner
from ..location import Location
from ..storage import FileStorageServer
# TODO: GUI for these settings
interface = 'wlp3s0'
delay = 5
mock_network = True
mock_wifi = False
#network_names = ['CometNet']
network_names = None
class Main(Skeleton):
def __init__(self):
Skeleton.__init__(self)
# core components
self._scanner = WiFiScanner(interface, delay, network_names=network_names, mock=mock_wifi)
self._server = FileStorageServer("data.json", clear=True)
def start_recording_with_location(self, x, y):
self.log('Start collecting data from ({x:d}, {y:d})'.format(**locals()))
self._scanner.start_scanning(Location(x, y, 0))
def start_recording_without_location(self):
self.log('Start collecting data without location')
self._scanner.start_scanning(Location(0, 0, 0))
def stop_recording_with_location(self):
self.log('Stopping scanning')
data = self._scanner.stop_scanning()
self.log('Uploading {n} data points of {j} networks'.format(n=data.n, j=len(data.networks)))
self._server.upload(data)
def stop_recording_without_location(self):
self.log('Stopping scanning')
data = self._scanner.stop_scanning()
self.log('Locating self with {n} data points of {j} networks'.format(n=data.n, j=len(data.networks)))
def join(self):
self._scanner.stop_scanning()
self._scanner.join()
__all__ = ['Main']
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Main()
try:
res = app.exec_()
except:
self.join()
sys.exit(res)
| gpl-2.0 |
gameduell/duell | bin/mac/python2.7.9/lib/python2.7/trace.py | 153 | 29890 | #!/usr/bin/env python
# portions copyright 2001, Autonomous Zones Industries, Inc., all rights...
# err... reserved and offered to the public under the terms of the
# Python 2.2 license.
# Author: Zooko O'Whielacronx
# http://zooko.com/
# mailto:zooko@zooko.com
#
# Copyright 2000, Mojam Media, Inc., all rights reserved.
# Author: Skip Montanaro
#
# Copyright 1999, Bioreason, Inc., all rights reserved.
# Author: Andrew Dalke
#
# Copyright 1995-1997, Automatrix, Inc., all rights reserved.
# Author: Skip Montanaro
#
# Copyright 1991-1995, Stichting Mathematisch Centrum, all rights reserved.
#
#
# Permission to use, copy, modify, and distribute this Python software and
# its associated documentation for any purpose without fee is hereby
# granted, provided that the above copyright notice appears in all copies,
# and that both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of neither Automatrix,
# Bioreason or Mojam Media be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior permission.
#
"""program/module to trace Python program or function execution
Sample use, command line:
trace.py -c -f counts --ignore-dir '$prefix' spam.py eggs
trace.py -t --ignore-dir '$prefix' spam.py eggs
trace.py --trackcalls spam.py eggs
Sample use, programmatically
import sys
# create a Trace object, telling it what to ignore, and whether to
# do tracing or line-counting or both.
tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix,], trace=0,
count=1)
# run the new command using the given tracer
tracer.run('main()')
# make a report, placing output in /tmp
r = tracer.results()
r.write_results(show_missing=True, coverdir="/tmp")
"""
import linecache
import os
import re
import sys
import time
import token
import tokenize
import inspect
import gc
import dis
try:
import cPickle
pickle = cPickle
except ImportError:
import pickle
try:
import threading
except ImportError:
_settrace = sys.settrace
def _unsettrace():
sys.settrace(None)
else:
def _settrace(func):
threading.settrace(func)
sys.settrace(func)
def _unsettrace():
sys.settrace(None)
threading.settrace(None)
def usage(outfile):
outfile.write("""Usage: %s [OPTIONS] <file> [ARGS]
Meta-options:
--help Display this help then exit.
--version Output version information then exit.
Otherwise, exactly one of the following three options must be given:
-t, --trace Print each line to sys.stdout before it is executed.
-c, --count Count the number of times each line is executed
and write the counts to <module>.cover for each
module executed, in the module's directory.
See also `--coverdir', `--file', `--no-report' below.
-l, --listfuncs Keep track of which functions are executed at least
once and write the results to sys.stdout after the
program exits.
-T, --trackcalls Keep track of caller/called pairs and write the
results to sys.stdout after the program exits.
-r, --report Generate a report from a counts file; do not execute
any code. `--file' must specify the results file to
read, which must have been created in a previous run
with `--count --file=FILE'.
Modifiers:
-f, --file=<file> File to accumulate counts over several runs.
-R, --no-report Do not generate the coverage report files.
Useful if you want to accumulate over several runs.
-C, --coverdir=<dir> Directory where the report files. The coverage
report for <package>.<module> is written to file
<dir>/<package>/<module>.cover.
-m, --missing Annotate executable lines that were not executed
with '>>>>>> '.
-s, --summary Write a brief summary on stdout for each file.
(Can only be used with --count or --report.)
-g, --timing Prefix each line with the time since the program started.
Only used while tracing.
Filters, may be repeated multiple times:
--ignore-module=<mod> Ignore the given module(s) and its submodules
(if it is a package). Accepts comma separated
list of module names
--ignore-dir=<dir> Ignore files in the given directory (multiple
directories can be joined by os.pathsep).
""" % sys.argv[0])
PRAGMA_NOCOVER = "#pragma NO COVER"
# Simple rx to find lines with no code.
rx_blank = re.compile(r'^\s*(#.*)?$')
class Ignore:
def __init__(self, modules = None, dirs = None):
self._mods = modules or []
self._dirs = dirs or []
self._dirs = map(os.path.normpath, self._dirs)
self._ignore = { '<string>': 1 }
def names(self, filename, modulename):
if modulename in self._ignore:
return self._ignore[modulename]
# haven't seen this one before, so see if the module name is
# on the ignore list. Need to take some care since ignoring
# "cmp" musn't mean ignoring "cmpcache" but ignoring
# "Spam" must also mean ignoring "Spam.Eggs".
for mod in self._mods:
if mod == modulename: # Identical names, so ignore
self._ignore[modulename] = 1
return 1
# check if the module is a proper submodule of something on
# the ignore list
n = len(mod)
# (will not overflow since if the first n characters are the
# same and the name has not already occurred, then the size
# of "name" is greater than that of "mod")
if mod == modulename[:n] and modulename[n] == '.':
self._ignore[modulename] = 1
return 1
# Now check that __file__ isn't in one of the directories
if filename is None:
# must be a built-in, so we must ignore
self._ignore[modulename] = 1
return 1
# Ignore a file when it contains one of the ignorable paths
for d in self._dirs:
# The '+ os.sep' is to ensure that d is a parent directory,
# as compared to cases like:
# d = "/usr/local"
# filename = "/usr/local.py"
# or
# d = "/usr/local.py"
# filename = "/usr/local.py"
if filename.startswith(d + os.sep):
self._ignore[modulename] = 1
return 1
# Tried the different ways, so we don't ignore this module
self._ignore[modulename] = 0
return 0
def modname(path):
"""Return a plausible module name for the patch."""
base = os.path.basename(path)
filename, ext = os.path.splitext(base)
return filename
def fullmodname(path):
"""Return a plausible module name for the path."""
# If the file 'path' is part of a package, then the filename isn't
# enough to uniquely identify it. Try to do the right thing by
# looking in sys.path for the longest matching prefix. We'll
# assume that the rest is the package name.
comparepath = os.path.normcase(path)
longest = ""
for dir in sys.path:
dir = os.path.normcase(dir)
if comparepath.startswith(dir) and comparepath[len(dir)] == os.sep:
if len(dir) > len(longest):
longest = dir
if longest:
base = path[len(longest) + 1:]
else:
base = path
# the drive letter is never part of the module name
drive, base = os.path.splitdrive(base)
base = base.replace(os.sep, ".")
if os.altsep:
base = base.replace(os.altsep, ".")
filename, ext = os.path.splitext(base)
return filename.lstrip(".")
class CoverageResults:
def __init__(self, counts=None, calledfuncs=None, infile=None,
callers=None, outfile=None):
self.counts = counts
if self.counts is None:
self.counts = {}
self.counter = self.counts.copy() # map (filename, lineno) to count
self.calledfuncs = calledfuncs
if self.calledfuncs is None:
self.calledfuncs = {}
self.calledfuncs = self.calledfuncs.copy()
self.callers = callers
if self.callers is None:
self.callers = {}
self.callers = self.callers.copy()
self.infile = infile
self.outfile = outfile
if self.infile:
# Try to merge existing counts file.
try:
counts, calledfuncs, callers = \
pickle.load(open(self.infile, 'rb'))
self.update(self.__class__(counts, calledfuncs, callers))
except (IOError, EOFError, ValueError), err:
print >> sys.stderr, ("Skipping counts file %r: %s"
% (self.infile, err))
def update(self, other):
"""Merge in the data from another CoverageResults"""
counts = self.counts
calledfuncs = self.calledfuncs
callers = self.callers
other_counts = other.counts
other_calledfuncs = other.calledfuncs
other_callers = other.callers
for key in other_counts.keys():
counts[key] = counts.get(key, 0) + other_counts[key]
for key in other_calledfuncs.keys():
calledfuncs[key] = 1
for key in other_callers.keys():
callers[key] = 1
def write_results(self, show_missing=True, summary=False, coverdir=None):
"""
@param coverdir
"""
if self.calledfuncs:
print
print "functions called:"
calls = self.calledfuncs.keys()
calls.sort()
for filename, modulename, funcname in calls:
print ("filename: %s, modulename: %s, funcname: %s"
% (filename, modulename, funcname))
if self.callers:
print
print "calling relationships:"
calls = self.callers.keys()
calls.sort()
lastfile = lastcfile = ""
for ((pfile, pmod, pfunc), (cfile, cmod, cfunc)) in calls:
if pfile != lastfile:
print
print "***", pfile, "***"
lastfile = pfile
lastcfile = ""
if cfile != pfile and lastcfile != cfile:
print " -->", cfile
lastcfile = cfile
print " %s.%s -> %s.%s" % (pmod, pfunc, cmod, cfunc)
# turn the counts data ("(filename, lineno) = count") into something
# accessible on a per-file basis
per_file = {}
for filename, lineno in self.counts.keys():
lines_hit = per_file[filename] = per_file.get(filename, {})
lines_hit[lineno] = self.counts[(filename, lineno)]
# accumulate summary info, if needed
sums = {}
for filename, count in per_file.iteritems():
# skip some "files" we don't care about...
if filename == "<string>":
continue
if filename.startswith("<doctest "):
continue
if filename.endswith((".pyc", ".pyo")):
filename = filename[:-1]
if coverdir is None:
dir = os.path.dirname(os.path.abspath(filename))
modulename = modname(filename)
else:
dir = coverdir
if not os.path.exists(dir):
os.makedirs(dir)
modulename = fullmodname(filename)
# If desired, get a list of the line numbers which represent
# executable content (returned as a dict for better lookup speed)
if show_missing:
lnotab = find_executable_linenos(filename)
else:
lnotab = {}
source = linecache.getlines(filename)
coverpath = os.path.join(dir, modulename + ".cover")
n_hits, n_lines = self.write_results_file(coverpath, source,
lnotab, count)
if summary and n_lines:
percent = 100 * n_hits // n_lines
sums[modulename] = n_lines, percent, modulename, filename
if summary and sums:
mods = sums.keys()
mods.sort()
print "lines cov% module (path)"
for m in mods:
n_lines, percent, modulename, filename = sums[m]
print "%5d %3d%% %s (%s)" % sums[m]
if self.outfile:
# try and store counts and module info into self.outfile
try:
pickle.dump((self.counts, self.calledfuncs, self.callers),
open(self.outfile, 'wb'), 1)
except IOError, err:
print >> sys.stderr, "Can't save counts files because %s" % err
def write_results_file(self, path, lines, lnotab, lines_hit):
"""Return a coverage results file in path."""
try:
outfile = open(path, "w")
except IOError, err:
print >> sys.stderr, ("trace: Could not open %r for writing: %s"
"- skipping" % (path, err))
return 0, 0
n_lines = 0
n_hits = 0
for i, line in enumerate(lines):
lineno = i + 1
# do the blank/comment match to try to mark more lines
# (help the reader find stuff that hasn't been covered)
if lineno in lines_hit:
outfile.write("%5d: " % lines_hit[lineno])
n_hits += 1
n_lines += 1
elif rx_blank.match(line):
outfile.write(" ")
else:
# lines preceded by no marks weren't hit
# Highlight them if so indicated, unless the line contains
# #pragma: NO COVER
if lineno in lnotab and not PRAGMA_NOCOVER in lines[i]:
outfile.write(">>>>>> ")
n_lines += 1
else:
outfile.write(" ")
outfile.write(lines[i].expandtabs(8))
outfile.close()
return n_hits, n_lines
def find_lines_from_code(code, strs):
"""Return dict where keys are lines in the line number table."""
linenos = {}
for _, lineno in dis.findlinestarts(code):
if lineno not in strs:
linenos[lineno] = 1
return linenos
def find_lines(code, strs):
"""Return lineno dict for all code objects reachable from code."""
# get all of the lineno information from the code of this scope level
linenos = find_lines_from_code(code, strs)
# and check the constants for references to other code objects
for c in code.co_consts:
if inspect.iscode(c):
# find another code object, so recurse into it
linenos.update(find_lines(c, strs))
return linenos
def find_strings(filename):
"""Return a dict of possible docstring positions.
The dict maps line numbers to strings. There is an entry for
line that contains only a string or a part of a triple-quoted
string.
"""
d = {}
# If the first token is a string, then it's the module docstring.
# Add this special case so that the test in the loop passes.
prev_ttype = token.INDENT
f = open(filename)
for ttype, tstr, start, end, line in tokenize.generate_tokens(f.readline):
if ttype == token.STRING:
if prev_ttype == token.INDENT:
sline, scol = start
eline, ecol = end
for i in range(sline, eline + 1):
d[i] = 1
prev_ttype = ttype
f.close()
return d
def find_executable_linenos(filename):
"""Return dict where keys are line numbers in the line number table."""
try:
prog = open(filename, "rU").read()
except IOError, err:
print >> sys.stderr, ("Not printing coverage data for %r: %s"
% (filename, err))
return {}
code = compile(prog, filename, "exec")
strs = find_strings(filename)
return find_lines(code, strs)
class Trace:
def __init__(self, count=1, trace=1, countfuncs=0, countcallers=0,
ignoremods=(), ignoredirs=(), infile=None, outfile=None,
timing=False):
"""
@param count true iff it should count number of times each
line is executed
@param trace true iff it should print out each line that is
being counted
@param countfuncs true iff it should just output a list of
(filename, modulename, funcname,) for functions
that were called at least once; This overrides
`count' and `trace'
@param ignoremods a list of the names of modules to ignore
@param ignoredirs a list of the names of directories to ignore
all of the (recursive) contents of
@param infile file from which to read stored counts to be
added into the results
@param outfile file in which to write the results
@param timing true iff timing information be displayed
"""
self.infile = infile
self.outfile = outfile
self.ignore = Ignore(ignoremods, ignoredirs)
self.counts = {} # keys are (filename, linenumber)
self.blabbed = {} # for debugging
self.pathtobasename = {} # for memoizing os.path.basename
self.donothing = 0
self.trace = trace
self._calledfuncs = {}
self._callers = {}
self._caller_cache = {}
self.start_time = None
if timing:
self.start_time = time.time()
if countcallers:
self.globaltrace = self.globaltrace_trackcallers
elif countfuncs:
self.globaltrace = self.globaltrace_countfuncs
elif trace and count:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_trace_and_count
elif trace:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_trace
elif count:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_count
else:
# Ahem -- do nothing? Okay.
self.donothing = 1
def run(self, cmd):
import __main__
dict = __main__.__dict__
self.runctx(cmd, dict, dict)
def runctx(self, cmd, globals=None, locals=None):
if globals is None: globals = {}
if locals is None: locals = {}
if not self.donothing:
_settrace(self.globaltrace)
try:
exec cmd in globals, locals
finally:
if not self.donothing:
_unsettrace()
def runfunc(self, func, *args, **kw):
result = None
if not self.donothing:
sys.settrace(self.globaltrace)
try:
result = func(*args, **kw)
finally:
if not self.donothing:
sys.settrace(None)
return result
def file_module_function_of(self, frame):
code = frame.f_code
filename = code.co_filename
if filename:
modulename = modname(filename)
else:
modulename = None
funcname = code.co_name
clsname = None
if code in self._caller_cache:
if self._caller_cache[code] is not None:
clsname = self._caller_cache[code]
else:
self._caller_cache[code] = None
## use of gc.get_referrers() was suggested by Michael Hudson
# all functions which refer to this code object
funcs = [f for f in gc.get_referrers(code)
if inspect.isfunction(f)]
# require len(func) == 1 to avoid ambiguity caused by calls to
# new.function(): "In the face of ambiguity, refuse the
# temptation to guess."
if len(funcs) == 1:
dicts = [d for d in gc.get_referrers(funcs[0])
if isinstance(d, dict)]
if len(dicts) == 1:
classes = [c for c in gc.get_referrers(dicts[0])
if hasattr(c, "__bases__")]
if len(classes) == 1:
# ditto for new.classobj()
clsname = classes[0].__name__
# cache the result - assumption is that new.* is
# not called later to disturb this relationship
# _caller_cache could be flushed if functions in
# the new module get called.
self._caller_cache[code] = clsname
if clsname is not None:
funcname = "%s.%s" % (clsname, funcname)
return filename, modulename, funcname
def globaltrace_trackcallers(self, frame, why, arg):
"""Handler for call events.
Adds information about who called who to the self._callers dict.
"""
if why == 'call':
# XXX Should do a better job of identifying methods
this_func = self.file_module_function_of(frame)
parent_func = self.file_module_function_of(frame.f_back)
self._callers[(parent_func, this_func)] = 1
def globaltrace_countfuncs(self, frame, why, arg):
"""Handler for call events.
Adds (filename, modulename, funcname) to the self._calledfuncs dict.
"""
if why == 'call':
this_func = self.file_module_function_of(frame)
self._calledfuncs[this_func] = 1
def globaltrace_lt(self, frame, why, arg):
"""Handler for call events.
If the code block being entered is to be ignored, returns `None',
else returns self.localtrace.
"""
if why == 'call':
code = frame.f_code
filename = frame.f_globals.get('__file__', None)
if filename:
# XXX modname() doesn't work right for packages, so
# the ignore support won't work right for packages
modulename = modname(filename)
if modulename is not None:
ignore_it = self.ignore.names(filename, modulename)
if not ignore_it:
if self.trace:
print (" --- modulename: %s, funcname: %s"
% (modulename, code.co_name))
return self.localtrace
else:
return None
def localtrace_trace_and_count(self, frame, why, arg):
if why == "line":
# record the file name and line number of every trace
filename = frame.f_code.co_filename
lineno = frame.f_lineno
key = filename, lineno
self.counts[key] = self.counts.get(key, 0) + 1
if self.start_time:
print '%.2f' % (time.time() - self.start_time),
bname = os.path.basename(filename)
print "%s(%d): %s" % (bname, lineno,
linecache.getline(filename, lineno)),
return self.localtrace
def localtrace_trace(self, frame, why, arg):
if why == "line":
# record the file name and line number of every trace
filename = frame.f_code.co_filename
lineno = frame.f_lineno
if self.start_time:
print '%.2f' % (time.time() - self.start_time),
bname = os.path.basename(filename)
print "%s(%d): %s" % (bname, lineno,
linecache.getline(filename, lineno)),
return self.localtrace
def localtrace_count(self, frame, why, arg):
if why == "line":
filename = frame.f_code.co_filename
lineno = frame.f_lineno
key = filename, lineno
self.counts[key] = self.counts.get(key, 0) + 1
return self.localtrace
def results(self):
return CoverageResults(self.counts, infile=self.infile,
outfile=self.outfile,
calledfuncs=self._calledfuncs,
callers=self._callers)
def _err_exit(msg):
sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
sys.exit(1)
def main(argv=None):
import getopt
if argv is None:
argv = sys.argv
try:
opts, prog_argv = getopt.getopt(argv[1:], "tcrRf:d:msC:lTg",
["help", "version", "trace", "count",
"report", "no-report", "summary",
"file=", "missing",
"ignore-module=", "ignore-dir=",
"coverdir=", "listfuncs",
"trackcalls", "timing"])
except getopt.error, msg:
sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
sys.stderr.write("Try `%s --help' for more information\n"
% sys.argv[0])
sys.exit(1)
trace = 0
count = 0
report = 0
no_report = 0
counts_file = None
missing = 0
ignore_modules = []
ignore_dirs = []
coverdir = None
summary = 0
listfuncs = False
countcallers = False
timing = False
for opt, val in opts:
if opt == "--help":
usage(sys.stdout)
sys.exit(0)
if opt == "--version":
sys.stdout.write("trace 2.0\n")
sys.exit(0)
if opt == "-T" or opt == "--trackcalls":
countcallers = True
continue
if opt == "-l" or opt == "--listfuncs":
listfuncs = True
continue
if opt == "-g" or opt == "--timing":
timing = True
continue
if opt == "-t" or opt == "--trace":
trace = 1
continue
if opt == "-c" or opt == "--count":
count = 1
continue
if opt == "-r" or opt == "--report":
report = 1
continue
if opt == "-R" or opt == "--no-report":
no_report = 1
continue
if opt == "-f" or opt == "--file":
counts_file = val
continue
if opt == "-m" or opt == "--missing":
missing = 1
continue
if opt == "-C" or opt == "--coverdir":
coverdir = val
continue
if opt == "-s" or opt == "--summary":
summary = 1
continue
if opt == "--ignore-module":
for mod in val.split(","):
ignore_modules.append(mod.strip())
continue
if opt == "--ignore-dir":
for s in val.split(os.pathsep):
s = os.path.expandvars(s)
# should I also call expanduser? (after all, could use $HOME)
s = s.replace("$prefix",
os.path.join(sys.prefix, "lib",
"python" + sys.version[:3]))
s = s.replace("$exec_prefix",
os.path.join(sys.exec_prefix, "lib",
"python" + sys.version[:3]))
s = os.path.normpath(s)
ignore_dirs.append(s)
continue
assert 0, "Should never get here"
if listfuncs and (count or trace):
_err_exit("cannot specify both --listfuncs and (--trace or --count)")
if not (count or trace or report or listfuncs or countcallers):
_err_exit("must specify one of --trace, --count, --report, "
"--listfuncs, or --trackcalls")
if report and no_report:
_err_exit("cannot specify both --report and --no-report")
if report and not counts_file:
_err_exit("--report requires a --file")
if no_report and len(prog_argv) == 0:
_err_exit("missing name of file to run")
# everything is ready
if report:
results = CoverageResults(infile=counts_file, outfile=counts_file)
results.write_results(missing, summary=summary, coverdir=coverdir)
else:
sys.argv = prog_argv
progname = prog_argv[0]
sys.path[0] = os.path.split(progname)[0]
t = Trace(count, trace, countfuncs=listfuncs,
countcallers=countcallers, ignoremods=ignore_modules,
ignoredirs=ignore_dirs, infile=counts_file,
outfile=counts_file, timing=timing)
try:
with open(progname) as fp:
code = compile(fp.read(), progname, 'exec')
# try to emulate __main__ namespace as much as possible
globs = {
'__file__': progname,
'__name__': '__main__',
'__package__': None,
'__cached__': None,
}
t.runctx(code, globs, globs)
except IOError, err:
_err_exit("Cannot run file %r because: %s" % (sys.argv[0], err))
except SystemExit:
pass
results = t.results()
if not no_report:
results.write_results(missing, summary=summary, coverdir=coverdir)
if __name__=='__main__':
main()
| bsd-2-clause |
prune998/ansible | lib/ansible/plugins/lookup/env.py | 251 | 1071 | # (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
ret = []
for term in terms:
var = term.split()[0]
ret.append(os.getenv(var, ''))
return ret
| gpl-3.0 |
hongyan0118/uiautotestmacaca | THhealth/ForTest/run.py | 1 | 2307 | #-*- coding: utf-8 -*-
import os
import sys
from macaca import WebDriver
sys.path.append(os.path.split(os.path.split(os.path.abspath(''))[0])[0])
from Public.Log import Log
from Public.ReportPath import ReportPath
from Public.BasePage import BasePage
from THhealth.PageObject.THhealthHomePage import THhealthHomePage
from THhealth.PageObject.WizardPage import skip_wizard_to_home
from THhealth.PageObject.PermissionRequestPopupPage import PermissionRequestPopupPage
from THhealth.PageObject.HealthNewsPage import HealthNewsPage
from THhealth.PageObject.THhealthDiscoverPage import THhealthDiscoverPage
from THhealth.PageObject.THhealthConsulationPage import THhealthConsulationPage
from THhealth.PageObject.THhealthMyPage import THhealthMyPage
from THhealth.PageObject.THhealthRecordPage import THhealthRecordPage
class Run(BasePage):
def run(self):
#skip_wizard_to_home()
#homepage = THhealthHomePage()
#homepage.click_discover()
#discover_page = THhealthDiscoverPage()
#self.driver.assertTrue(discover_page.wait_page())
#discover_page.click_tuhuanlife()
width = self.driver \
.get_window_size()['width']
height = self.driver \
.get_window_size()['height']
i = 1
while (i <= 3):
self.driver \
.touch('drag', {
'fromX': width * 0.8,
'fromY': height * 0.2,
'toX': width * 0.05,
'toY': height * 0.2,
'steps': 5
})
self.driver.save_screenshot('banner'+str(i)+'.jpg')
i = i + 1
def init():
port = int(sys.argv[1])
udid = sys.argv[2]
report_path = str(sys.argv[3])
session = sys.argv[4]
server_url = {
'hostname': '127.0.0.1',
'port': port,
}
log = Log()
log.set_logger(udid, report_path + '\\' + 'client.log')
driver = WebDriver('', server_url)
driver.attach(session)
# set cls.path, it must be call before operate on any page
path = ReportPath()
path.set_path(report_path)
# set cls.driver, it must be call before operate on any page
base_page = BasePage()
base_page.set_driver(driver)
if __name__ == '__main__':
init()
Run().run()
| gpl-3.0 |
slarosa/QGIS | python/plugins/sextante/script/WrongScriptException.py | 5 | 1230 | # -*- coding: utf-8 -*-
"""
***************************************************************************
WrongScriptException.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
class WrongScriptException(Exception):
def __init__(self, msg):
Exception.__init__(self)
self.msg = msg | gpl-2.0 |
rodsol/opencog | opencog/python/conceptnet/writer.py | 16 | 4006 | __author__ = 'DongMin Kim'
class ConceptNetWriter:
def __init__(self, a, out_type, out_name):
self.a = a
self.output_file_type = out_type
self.output_file_name = out_name
# Open an output file
self.output_file = open(self.output_file_name, 'w')
# Prepare the output file
if self.output_file_type == "py":
self.output_file.write(
'import opencog.atomspace\n' +
'from opencog.atomspace import TruthValue\n' +
'\n'
'def load_concept_net(a, types):\n'
)
def __write_space(self, pre_space):
self.output_file.write(
' ' * pre_space
)
def __write_scm_file(self, output_atom):
self.output_file.write(repr(output_atom) + '\n' * 2)
def __write_py_file(self, output_atom, pre_space):
if output_atom.is_link():
outgoing = self.a.get_outgoing(output_atom.h)
self.__write_space(pre_space)
self.output_file.write(
'a.add_link(\n'
)
self.__write_space(pre_space+4)
self.output_file.write(
'types.%s,\n' % output_atom.type_name
)
self.__write_space(pre_space+4)
self.output_file.write(
'[\n'
)
for node in outgoing:
self.__write_py_file(node, pre_space + 8)
self.output_file.write(',\n')
self.__write_space(pre_space + 4)
self.output_file.write(
'],\n'
)
self.__write_space(pre_space + 4)
self.output_file.write(
'TruthValue(%f, %f)\n' % (
output_atom.tv.mean,
output_atom.tv.confidence
)
)
self.__write_space(pre_space)
if pre_space == 4:
self.output_file.write(')\n\n')
else:
self.output_file.write(')')
else:
self.__write_space(pre_space)
self.output_file.write(
'a.add_node(types.%s, "%s", TruthValue(%f, %f))' % (
output_atom.type_name,
output_atom.name,
output_atom.tv.mean,
output_atom.tv.confidence
)
)
"""
# This is more beautiful than above, but can't load in python file...
def __write_py_beautiful_file(self, output_atom, pre_space):
if output_atom.is_link():
outgoing = self.a.get_outgoing(output_atom.h)
self.__write_space(pre_space)
self.output_file.write(
'%s(\n' % output_atom.type_name
)
for node in outgoing:
self.__write_py_beautiful_file(node, pre_space + 4)
self.output_file.write(',\n')
self.__write_space(pre_space + 4)
self.output_file.write(
'TruthValue(%f, %f)\n' % (
output_atom.tv.mean,
output_atom.tv.confidence
)
)
self.__write_space(pre_space)
if pre_space == 0:
self.output_file.write(')\n\n')
else:
self.output_file.write(')')
else:
self.__write_space(pre_space)
self.output_file.write(
'%s("%s", TruthValue(%f, %f))' % (
output_atom.type_name,
output_atom.name,
output_atom.tv.mean,
output_atom.tv.confidence
)
)
"""
def write_to_file(self, output_atom, pre_space=0):
if self.output_file_type == "scm":
self.__write_scm_file(output_atom)
elif self.output_file_type == "py":
self.__write_py_file(output_atom, pre_space+4)
else:
raise ValueError()
| agpl-3.0 |
xrg/django-static-gitified | tests/modeltests/proxy_models/tests.py | 33 | 12189 | from __future__ import absolute_import
from django.contrib.contenttypes.models import ContentType
from django.core import management
from django.core.exceptions import FieldError
from django.db import models, DEFAULT_DB_ALIAS
from django.db.models import signals
from django.test import TestCase
from .models import (MyPerson, Person, StatusPerson, LowerStatusPerson,
MyPersonProxy, Abstract, OtherPerson, User, UserProxy, UserProxyProxy,
Country, State, StateProxy, TrackerUser, BaseUser, Bug, ProxyTrackerUser,
Improvement, ProxyProxyBug, ProxyBug, ProxyImprovement)
class ProxyModelTests(TestCase):
def test_same_manager_queries(self):
"""
The MyPerson model should be generating the same database queries as
the Person model (when the same manager is used in each case).
"""
my_person_sql = MyPerson.other.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
person_sql = Person.objects.order_by("name").query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
self.assertEqual(my_person_sql, person_sql)
def test_inheretance_new_table(self):
"""
The StatusPerson models should have its own table (it's using ORM-level
inheritance).
"""
sp_sql = StatusPerson.objects.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
p_sql = Person.objects.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
self.assertNotEqual(sp_sql, p_sql)
def test_basic_proxy(self):
"""
Creating a Person makes them accessible through the MyPerson proxy.
"""
person = Person.objects.create(name="Foo McBar")
self.assertEqual(len(Person.objects.all()), 1)
self.assertEqual(len(MyPerson.objects.all()), 1)
self.assertEqual(MyPerson.objects.get(name="Foo McBar").id, person.id)
self.assertFalse(MyPerson.objects.get(id=person.id).has_special_name())
def test_no_proxy(self):
"""
Person is not proxied by StatusPerson subclass.
"""
Person.objects.create(name="Foo McBar")
self.assertEqual(list(StatusPerson.objects.all()), [])
def test_basic_proxy_reverse(self):
"""
A new MyPerson also shows up as a standard Person.
"""
MyPerson.objects.create(name="Bazza del Frob")
self.assertEqual(len(MyPerson.objects.all()), 1)
self.assertEqual(len(Person.objects.all()), 1)
LowerStatusPerson.objects.create(status="low", name="homer")
lsps = [lsp.name for lsp in LowerStatusPerson.objects.all()]
self.assertEqual(lsps, ["homer"])
def test_correct_type_proxy_of_proxy(self):
"""
Correct type when querying a proxy of proxy
"""
Person.objects.create(name="Foo McBar")
MyPerson.objects.create(name="Bazza del Frob")
LowerStatusPerson.objects.create(status="low", name="homer")
pp = sorted([mpp.name for mpp in MyPersonProxy.objects.all()])
self.assertEqual(pp, ['Bazza del Frob', 'Foo McBar', 'homer'])
def test_proxy_included_in_ancestors(self):
"""
Proxy models are included in the ancestors for a model's DoesNotExist
and MultipleObjectsReturned
"""
Person.objects.create(name="Foo McBar")
MyPerson.objects.create(name="Bazza del Frob")
LowerStatusPerson.objects.create(status="low", name="homer")
max_id = Person.objects.aggregate(max_id=models.Max('id'))['max_id']
self.assertRaises(Person.DoesNotExist,
MyPersonProxy.objects.get,
name='Zathras'
)
self.assertRaises(Person.MultipleObjectsReturned,
MyPersonProxy.objects.get,
id__lt=max_id+1
)
self.assertRaises(Person.DoesNotExist,
StatusPerson.objects.get,
name='Zathras'
)
sp1 = StatusPerson.objects.create(name='Bazza Jr.')
sp2 = StatusPerson.objects.create(name='Foo Jr.')
max_id = Person.objects.aggregate(max_id=models.Max('id'))['max_id']
self.assertRaises(Person.MultipleObjectsReturned,
StatusPerson.objects.get,
id__lt=max_id+1
)
def test_abc(self):
"""
All base classes must be non-abstract
"""
def build_abc():
class NoAbstract(Abstract):
class Meta:
proxy = True
self.assertRaises(TypeError, build_abc)
def test_no_cbc(self):
"""
The proxy must actually have one concrete base class
"""
def build_no_cbc():
class TooManyBases(Person, Abstract):
class Meta:
proxy = True
self.assertRaises(TypeError, build_no_cbc)
def test_no_base_classes(self):
def build_no_base_classes():
class NoBaseClasses(models.Model):
class Meta:
proxy = True
self.assertRaises(TypeError, build_no_base_classes)
def test_new_fields(self):
def build_new_fields():
class NoNewFields(Person):
newfield = models.BooleanField()
class Meta:
proxy = True
self.assertRaises(FieldError, build_new_fields)
def test_myperson_manager(self):
Person.objects.create(name="fred")
Person.objects.create(name="wilma")
Person.objects.create(name="barney")
resp = [p.name for p in MyPerson.objects.all()]
self.assertEqual(resp, ['barney', 'fred'])
resp = [p.name for p in MyPerson._default_manager.all()]
self.assertEqual(resp, ['barney', 'fred'])
def test_otherperson_manager(self):
Person.objects.create(name="fred")
Person.objects.create(name="wilma")
Person.objects.create(name="barney")
resp = [p.name for p in OtherPerson.objects.all()]
self.assertEqual(resp, ['barney', 'wilma'])
resp = [p.name for p in OtherPerson.excluder.all()]
self.assertEqual(resp, ['barney', 'fred'])
resp = [p.name for p in OtherPerson._default_manager.all()]
self.assertEqual(resp, ['barney', 'wilma'])
def test_permissions_created(self):
from django.contrib.auth.models import Permission
try:
Permission.objects.get(name="May display users information")
except Permission.DoesNotExist:
self.fail("The permission 'May display users information' has not been created")
def test_proxy_model_signals(self):
"""
Test save signals for proxy models
"""
output = []
def make_handler(model, event):
def _handler(*args, **kwargs):
output.append('%s %s save' % (model, event))
return _handler
h1 = make_handler('MyPerson', 'pre')
h2 = make_handler('MyPerson', 'post')
h3 = make_handler('Person', 'pre')
h4 = make_handler('Person', 'post')
signals.pre_save.connect(h1, sender=MyPerson)
signals.post_save.connect(h2, sender=MyPerson)
signals.pre_save.connect(h3, sender=Person)
signals.post_save.connect(h4, sender=Person)
dino = MyPerson.objects.create(name=u"dino")
self.assertEqual(output, [
'MyPerson pre save',
'MyPerson post save'
])
output = []
h5 = make_handler('MyPersonProxy', 'pre')
h6 = make_handler('MyPersonProxy', 'post')
signals.pre_save.connect(h5, sender=MyPersonProxy)
signals.post_save.connect(h6, sender=MyPersonProxy)
dino = MyPersonProxy.objects.create(name=u"pebbles")
self.assertEqual(output, [
'MyPersonProxy pre save',
'MyPersonProxy post save'
])
signals.pre_save.disconnect(h1, sender=MyPerson)
signals.post_save.disconnect(h2, sender=MyPerson)
signals.pre_save.disconnect(h3, sender=Person)
signals.post_save.disconnect(h4, sender=Person)
signals.pre_save.disconnect(h5, sender=MyPersonProxy)
signals.post_save.disconnect(h6, sender=MyPersonProxy)
def test_content_type(self):
ctype = ContentType.objects.get_for_model
self.assertTrue(ctype(Person) is ctype(OtherPerson))
def test_user_userproxy_userproxyproxy(self):
User.objects.create(name='Bruce')
resp = [u.name for u in User.objects.all()]
self.assertEqual(resp, ['Bruce'])
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
resp = [u.name for u in UserProxyProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
def test_proxy_for_model(self):
self.assertEqual(UserProxy, UserProxyProxy._meta.proxy_for_model)
def test_concrete_model(self):
self.assertEqual(User, UserProxyProxy._meta.concrete_model)
def test_proxy_delete(self):
"""
Proxy objects can be deleted
"""
User.objects.create(name='Bruce')
u2 = UserProxy.objects.create(name='George')
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce', 'George'])
u2.delete()
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
def test_select_related(self):
"""
We can still use `select_related()` to include related models in our
querysets.
"""
country = Country.objects.create(name='Australia')
state = State.objects.create(name='New South Wales', country=country)
resp = [s.name for s in State.objects.select_related()]
self.assertEqual(resp, ['New South Wales'])
resp = [s.name for s in StateProxy.objects.select_related()]
self.assertEqual(resp, ['New South Wales'])
self.assertEqual(StateProxy.objects.get(name='New South Wales').name,
'New South Wales')
resp = StateProxy.objects.select_related().get(name='New South Wales')
self.assertEqual(resp.name, 'New South Wales')
def test_proxy_bug(self):
contributor = TrackerUser.objects.create(name='Contributor',
status='contrib')
someone = BaseUser.objects.create(name='Someone')
Bug.objects.create(summary='fix this', version='1.1beta',
assignee=contributor, reporter=someone)
pcontributor = ProxyTrackerUser.objects.create(name='OtherContributor',
status='proxy')
Improvement.objects.create(summary='improve that', version='1.1beta',
assignee=contributor, reporter=pcontributor,
associated_bug=ProxyProxyBug.objects.all()[0])
# Related field filter on proxy
resp = ProxyBug.objects.get(version__icontains='beta')
self.assertEqual(repr(resp), '<ProxyBug: ProxyBug:fix this>')
# Select related + filter on proxy
resp = ProxyBug.objects.select_related().get(version__icontains='beta')
self.assertEqual(repr(resp), '<ProxyBug: ProxyBug:fix this>')
# Proxy of proxy, select_related + filter
resp = ProxyProxyBug.objects.select_related().get(
version__icontains='beta'
)
self.assertEqual(repr(resp), '<ProxyProxyBug: ProxyProxyBug:fix this>')
# Select related + filter on a related proxy field
resp = ProxyImprovement.objects.select_related().get(
reporter__name__icontains='butor'
)
self.assertEqual(repr(resp),
'<ProxyImprovement: ProxyImprovement:improve that>'
)
# Select related + filter on a related proxy of proxy field
resp = ProxyImprovement.objects.select_related().get(
associated_bug__summary__icontains='fix'
)
self.assertEqual(repr(resp),
'<ProxyImprovement: ProxyImprovement:improve that>'
)
def test_proxy_load_from_fixture(self):
management.call_command('loaddata', 'mypeople.json', verbosity=0, commit=False)
p = MyPerson.objects.get(pk=100)
self.assertEqual(p.name, 'Elvis Presley')
| bsd-3-clause |
qtumproject/qtum | test/functional/test_framework/segwit_addr.py | 88 | 3478 | #!/usr/bin/env python3
# Copyright (c) 2017 Pieter Wuille
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Reference implementation for Bech32 and segwit addresses."""
CHARSET = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
def bech32_polymod(values):
"""Internal function that computes the Bech32 checksum."""
generator = [0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3]
chk = 1
for value in values:
top = chk >> 25
chk = (chk & 0x1ffffff) << 5 ^ value
for i in range(5):
chk ^= generator[i] if ((top >> i) & 1) else 0
return chk
def bech32_hrp_expand(hrp):
"""Expand the HRP into values for checksum computation."""
return [ord(x) >> 5 for x in hrp] + [0] + [ord(x) & 31 for x in hrp]
def bech32_verify_checksum(hrp, data):
"""Verify a checksum given HRP and converted data characters."""
return bech32_polymod(bech32_hrp_expand(hrp) + data) == 1
def bech32_create_checksum(hrp, data):
"""Compute the checksum values given HRP and data."""
values = bech32_hrp_expand(hrp) + data
polymod = bech32_polymod(values + [0, 0, 0, 0, 0, 0]) ^ 1
return [(polymod >> 5 * (5 - i)) & 31 for i in range(6)]
def bech32_encode(hrp, data):
"""Compute a Bech32 string given HRP and data values."""
combined = data + bech32_create_checksum(hrp, data)
return hrp + '1' + ''.join([CHARSET[d] for d in combined])
def bech32_decode(bech):
"""Validate a Bech32 string, and determine HRP and data."""
if ((any(ord(x) < 33 or ord(x) > 126 for x in bech)) or
(bech.lower() != bech and bech.upper() != bech)):
return (None, None)
bech = bech.lower()
pos = bech.rfind('1')
if pos < 1 or pos + 7 > len(bech) or len(bech) > 90:
return (None, None)
if not all(x in CHARSET for x in bech[pos+1:]):
return (None, None)
hrp = bech[:pos]
data = [CHARSET.find(x) for x in bech[pos+1:]]
if not bech32_verify_checksum(hrp, data):
return (None, None)
return (hrp, data[:-6])
def convertbits(data, frombits, tobits, pad=True):
"""General power-of-2 base conversion."""
acc = 0
bits = 0
ret = []
maxv = (1 << tobits) - 1
max_acc = (1 << (frombits + tobits - 1)) - 1
for value in data:
if value < 0 or (value >> frombits):
return None
acc = ((acc << frombits) | value) & max_acc
bits += frombits
while bits >= tobits:
bits -= tobits
ret.append((acc >> bits) & maxv)
if pad:
if bits:
ret.append((acc << (tobits - bits)) & maxv)
elif bits >= frombits or ((acc << (tobits - bits)) & maxv):
return None
return ret
def decode(hrp, addr):
"""Decode a segwit address."""
hrpgot, data = bech32_decode(addr)
if hrpgot != hrp:
return (None, None)
decoded = convertbits(data[1:], 5, 8, False)
if decoded is None or len(decoded) < 2 or len(decoded) > 40:
return (None, None)
if data[0] > 16:
return (None, None)
if data[0] == 0 and len(decoded) != 20 and len(decoded) != 32:
return (None, None)
return (data[0], decoded)
def encode(hrp, witver, witprog):
"""Encode a segwit address."""
ret = bech32_encode(hrp, [witver] + convertbits(witprog, 8, 5))
if decode(hrp, ret) == (None, None):
return None
return ret
| mit |
ahmedRguei/job | typo3conf/ext/extension_builder/Resources/Public/jsDomainModeling/node_modules/node-gyp/gyp/pylib/gyp/MSVSVersion.py | 1509 | 17165 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Handle version information related to Visual Stuio."""
import errno
import os
import re
import subprocess
import sys
import gyp
import glob
class VisualStudioVersion(object):
"""Information regarding a version of Visual Studio."""
def __init__(self, short_name, description,
solution_version, project_version, flat_sln, uses_vcxproj,
path, sdk_based, default_toolset=None):
self.short_name = short_name
self.description = description
self.solution_version = solution_version
self.project_version = project_version
self.flat_sln = flat_sln
self.uses_vcxproj = uses_vcxproj
self.path = path
self.sdk_based = sdk_based
self.default_toolset = default_toolset
def ShortName(self):
return self.short_name
def Description(self):
"""Get the full description of the version."""
return self.description
def SolutionVersion(self):
"""Get the version number of the sln files."""
return self.solution_version
def ProjectVersion(self):
"""Get the version number of the vcproj or vcxproj files."""
return self.project_version
def FlatSolution(self):
return self.flat_sln
def UsesVcxproj(self):
"""Returns true if this version uses a vcxproj file."""
return self.uses_vcxproj
def ProjectExtension(self):
"""Returns the file extension for the project."""
return self.uses_vcxproj and '.vcxproj' or '.vcproj'
def Path(self):
"""Returns the path to Visual Studio installation."""
return self.path
def ToolPath(self, tool):
"""Returns the path to a given compiler tool. """
return os.path.normpath(os.path.join(self.path, "VC/bin", tool))
def DefaultToolset(self):
"""Returns the msbuild toolset version that will be used in the absence
of a user override."""
return self.default_toolset
def SetupScript(self, target_arch):
"""Returns a command (with arguments) to be used to set up the
environment."""
# Check if we are running in the SDK command line environment and use
# the setup script from the SDK if so. |target_arch| should be either
# 'x86' or 'x64'.
assert target_arch in ('x86', 'x64')
sdk_dir = os.environ.get('WindowsSDKDir')
if self.sdk_based and sdk_dir:
return [os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.Cmd')),
'/' + target_arch]
else:
# We don't use VC/vcvarsall.bat for x86 because vcvarsall calls
# vcvars32, which it can only find if VS??COMNTOOLS is set, which it
# isn't always.
if target_arch == 'x86':
if self.short_name >= '2013' and self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
# VS2013 and later, non-Express have a x64-x86 cross that we want
# to prefer.
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), 'amd64_x86']
# Otherwise, the standard x86 compiler.
return [os.path.normpath(
os.path.join(self.path, 'Common7/Tools/vsvars32.bat'))]
else:
assert target_arch == 'x64'
arg = 'x86_amd64'
# Use the 64-on-64 compiler if we're not using an express
# edition and we're running on a 64bit OS.
if self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
arg = 'amd64'
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), arg]
def _RegistryQueryBase(sysdir, key, value):
"""Use reg.exe to read a particular key.
While ideally we might use the win32 module, we would like gyp to be
python neutral, so for instance cygwin python lacks this module.
Arguments:
sysdir: The system subdirectory to attempt to launch reg.exe from.
key: The registry key to read from.
value: The particular value to read.
Return:
stdout from reg.exe, or None for failure.
"""
# Skip if not on Windows or Python Win32 setup issue
if sys.platform not in ('win32', 'cygwin'):
return None
# Setup params to pass to and attempt to launch reg.exe
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
'query', key]
if value:
cmd.extend(['/v', value])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Obtain the stdout from reg.exe, reading to the end so p.returncode is valid
# Note that the error text may be in [1] in some cases
text = p.communicate()[0]
# Check return code from reg.exe; officially 0==success and 1==error
if p.returncode:
return None
return text
def _RegistryQuery(key, value=None):
r"""Use reg.exe to read a particular key through _RegistryQueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = _RegistryQueryBase('Sysnative', key, value)
except OSError, e:
if e.errno == errno.ENOENT:
text = _RegistryQueryBase('System32', key, value)
else:
raise
return text
def _RegistryGetValueUsingWinReg(key, value):
"""Use the _winreg module to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure. Throws
ImportError if _winreg is unavailable.
"""
import _winreg
try:
root, subkey = key.split('\\', 1)
assert root == 'HKLM' # Only need HKLM for now.
with _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, subkey) as hkey:
return _winreg.QueryValueEx(hkey, value)[0]
except WindowsError:
return None
def _RegistryGetValue(key, value):
"""Use _winreg or reg.exe to obtain the value of a registry key.
Using _winreg is preferable because it solves an issue on some corporate
environments where access to reg.exe is locked down. However, we still need
to fallback to reg.exe for the case where the _winreg module is not available
(for example in cygwin python).
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
try:
return _RegistryGetValueUsingWinReg(key, value)
except ImportError:
pass
# Fallback to reg.exe if we fail to import _winreg.
text = _RegistryQuery(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
def _CreateVersion(name, path, sdk_based=False):
"""Sets up MSVS project generation.
Setup is based off the GYP_MSVS_VERSION environment variable or whatever is
autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is
passed in that doesn't match a value in versions python will throw a error.
"""
if path:
path = os.path.normpath(path)
versions = {
'2015': VisualStudioVersion('2015',
'Visual Studio 2015',
solution_version='12.00',
project_version='14.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v140'),
'2013': VisualStudioVersion('2013',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2013e': VisualStudioVersion('2013e',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2012': VisualStudioVersion('2012',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2012e': VisualStudioVersion('2012e',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2010': VisualStudioVersion('2010',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2010e': VisualStudioVersion('2010e',
'Visual C++ Express 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2008': VisualStudioVersion('2008',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2008e': VisualStudioVersion('2008e',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005': VisualStudioVersion('2005',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005e': VisualStudioVersion('2005e',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
}
return versions[str(name)]
def _ConvertToCygpath(path):
"""Convert to cygwin path if we are using cygwin."""
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path
def _DetectVisualStudioVersions(versions_to_check, force_express):
"""Collect the list of installed visual studio versions.
Returns:
A list of visual studio versions installed in descending order of
usage preference.
Base this on the registry and a quick check if devenv.exe exists.
Only versions 8-10 are considered.
Possibilities are:
2005(e) - Visual Studio 2005 (8)
2008(e) - Visual Studio 2008 (9)
2010(e) - Visual Studio 2010 (10)
2012(e) - Visual Studio 2012 (11)
2013(e) - Visual Studio 2013 (12)
2015 - Visual Studio 2015 (14)
Where (e) is e for express editions of MSVS and blank otherwise.
"""
version_to_year = {
'8.0': '2005',
'9.0': '2008',
'10.0': '2010',
'11.0': '2012',
'12.0': '2013',
'14.0': '2015',
}
versions = []
for version in versions_to_check:
# Old method of searching for which VS version is installed
# We don't use the 2010-encouraged-way because we also want to get the
# path to the binaries, which it doesn't offer.
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Microsoft\VCExpress\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version]
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], 'InstallDir')
if not path:
continue
path = _ConvertToCygpath(path)
# Check for full.
full_path = os.path.join(path, 'devenv.exe')
express_path = os.path.join(path, '*express.exe')
if not force_express and os.path.exists(full_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version],
os.path.join(path, '..', '..')))
# Check for express.
elif glob.glob(express_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..', '..')))
# The old method above does not work when only SDK is installed.
keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7',
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7']
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], version)
if not path:
continue
path = _ConvertToCygpath(path)
if version != '14.0': # There is no Express edition for 2015.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..'), sdk_based=True))
return versions
def SelectVisualStudioVersion(version='auto', allow_fallback=True):
"""Select which version of Visual Studio projects to generate.
Arguments:
version: Hook to allow caller to force a particular version (vs auto).
Returns:
An object representing a visual studio project format version.
"""
# In auto mode, check environment variable for override.
if version == 'auto':
version = os.environ.get('GYP_MSVS_VERSION', 'auto')
version_map = {
'auto': ('14.0', '12.0', '10.0', '9.0', '8.0', '11.0'),
'2005': ('8.0',),
'2005e': ('8.0',),
'2008': ('9.0',),
'2008e': ('9.0',),
'2010': ('10.0',),
'2010e': ('10.0',),
'2012': ('11.0',),
'2012e': ('11.0',),
'2013': ('12.0',),
'2013e': ('12.0',),
'2015': ('14.0',),
}
override_path = os.environ.get('GYP_MSVS_OVERRIDE_PATH')
if override_path:
msvs_version = os.environ.get('GYP_MSVS_VERSION')
if not msvs_version:
raise ValueError('GYP_MSVS_OVERRIDE_PATH requires GYP_MSVS_VERSION to be '
'set to a particular version (e.g. 2010e).')
return _CreateVersion(msvs_version, override_path, sdk_based=True)
version = str(version)
versions = _DetectVisualStudioVersions(version_map[version], 'e' in version)
if not versions:
if not allow_fallback:
raise ValueError('Could not locate Visual Studio installation.')
if version == 'auto':
# Default to 2005 if we couldn't find anything
return _CreateVersion('2005', None)
else:
return _CreateVersion(version, None)
return versions[0]
| gpl-2.0 |
elite-lang/eobject | conanfile.py | 1 | 1107 | from conans import ConanFile, CMake, tools
class eobjectConan(ConanFile):
name = "eobject"
version = "0.1.2"
license = "MIT"
url = "https://github.com/elite-lang/eobject"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False]}
default_options = "shared=False", "gtest:shared=False"
generators = "cmake"
build_policy = "missing"
requires = 'gtest/1.8.0@lasote/stable'
exports = "*"
def build(self):
shared = {"BUILD_SHARED_LIBS": self.options.shared}
cmake = CMake(self)
cmake.configure(defs=shared)
cmake.build()
def package(self):
self.copy("*.h", dst="include", src="include")
self.copy("*.lib", dst="lib", keep_path=False)
self.copy("*.dll", dst="bin", keep_path=False)
self.copy("*.so", dst="lib", keep_path=False)
self.copy("*.dylib", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
self.copy("LICENSE", dst=".", keep_path=False)
def package_info(self):
self.cpp_info.libs = [self.name]
| mit |
thnee/ansible | lib/ansible/modules/packaging/os/yum.py | 7 | 66737 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Red Hat, Inc
# Written by Seth Vidal <skvidal at fedoraproject.org>
# Copyright: (c) 2014, Epic Games, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: yum
version_added: historical
short_description: Manages packages with the I(yum) package manager
description:
- Installs, upgrade, downgrades, removes, and lists packages and groups with the I(yum) package manager.
- This module only works on Python 2. If you require Python 3 support see the M(dnf) module.
options:
use_backend:
description:
- This module supports C(yum) (as it always has), this is known as C(yum3)/C(YUM3)/C(yum-deprecated) by
upstream yum developers. As of Ansible 2.7+, this module also supports C(YUM4), which is the
"new yum" and it has an C(dnf) backend.
- By default, this module will select the backend based on the C(ansible_pkg_mgr) fact.
default: "auto"
choices: [ auto, yum, yum4, dnf ]
version_added: "2.7"
name:
description:
- A package name or package specifier with version, like C(name-1.0).
- If a previous version is specified, the task also needs to turn C(allow_downgrade) on.
See the C(allow_downgrade) documentation for caveats with downgrading packages.
- When using state=latest, this can be C('*') which means run C(yum -y update).
- You can also pass a url or a local path to a rpm file (using state=present).
To operate on several packages this can accept a comma separated string of packages or (as of 2.0) a list of packages.
aliases: [ pkg ]
exclude:
description:
- Package name(s) to exclude when state=present, or latest
version_added: "2.0"
list:
description:
- "Package name to run the equivalent of yum list --show-duplicates <package> against. In addition to listing packages,
use can also list the following: C(installed), C(updates), C(available) and C(repos)."
- This parameter is mutually exclusive with C(name).
state:
description:
- Whether to install (C(present) or C(installed), C(latest)), or remove (C(absent) or C(removed)) a package.
- C(present) and C(installed) will simply ensure that a desired package is installed.
- C(latest) will update the specified package if it's not of the latest available version.
- C(absent) and C(removed) will remove the specified package.
- Default is C(None), however in effect the default action is C(present) unless the C(autoremove) option is
enabled for this module, then C(absent) is inferred.
choices: [ absent, installed, latest, present, removed ]
enablerepo:
description:
- I(Repoid) of repositories to enable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a C(",").
- As of Ansible 2.7, this can alternatively be a list instead of C(",")
separated string
version_added: "0.9"
disablerepo:
description:
- I(Repoid) of repositories to disable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a C(",").
- As of Ansible 2.7, this can alternatively be a list instead of C(",")
separated string
version_added: "0.9"
conf_file:
description:
- The remote yum configuration file to use for the transaction.
version_added: "0.6"
disable_gpg_check:
description:
- Whether to disable the GPG checking of signatures of packages being
installed. Has an effect only if state is I(present) or I(latest).
type: bool
default: "no"
version_added: "1.2"
skip_broken:
description:
- Skip packages with broken dependencies(devsolve) and are causing problems.
type: bool
default: "no"
version_added: "2.3"
update_cache:
description:
- Force yum to check if cache is out of date and redownload if needed.
Has an effect only if state is I(present) or I(latest).
type: bool
default: "no"
aliases: [ expire-cache ]
version_added: "1.9"
validate_certs:
description:
- This only applies if using a https url as the source of the rpm. e.g. for localinstall. If set to C(no), the SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates as it avoids verifying the source site.
- Prior to 2.1 the code worked as if this was set to C(yes).
type: bool
default: "yes"
version_added: "2.1"
update_only:
description:
- When using latest, only update installed packages. Do not install packages.
- Has an effect only if state is I(latest)
default: "no"
type: bool
version_added: "2.5"
installroot:
description:
- Specifies an alternative installroot, relative to which all packages
will be installed.
default: "/"
version_added: "2.3"
security:
description:
- If set to C(yes), and C(state=latest) then only installs updates that have been marked security related.
type: bool
default: "no"
version_added: "2.4"
bugfix:
description:
- If set to C(yes), and C(state=latest) then only installs updates that have been marked bugfix related.
default: "no"
version_added: "2.6"
allow_downgrade:
description:
- Specify if the named package and version is allowed to downgrade
a maybe already installed higher version of that package.
Note that setting allow_downgrade=True can make this module
behave in a non-idempotent way. The task could end up with a set
of packages that does not match the complete list of specified
packages to install (because dependencies between the downgraded
package and others can cause changes to the packages which were
in the earlier transaction).
type: bool
default: "no"
version_added: "2.4"
enable_plugin:
description:
- I(Plugin) name to enable for the install/update operation.
The enabled plugin will not persist beyond the transaction.
version_added: "2.5"
disable_plugin:
description:
- I(Plugin) name to disable for the install/update operation.
The disabled plugins will not persist beyond the transaction.
version_added: "2.5"
releasever:
description:
- Specifies an alternative release from which all packages will be
installed.
version_added: "2.7"
autoremove:
description:
- If C(yes), removes all "leaf" packages from the system that were originally
installed as dependencies of user-installed packages but which are no longer
required by any such package. Should be used alone or when state is I(absent)
- "NOTE: This feature requires yum >= 3.4.3 (RHEL/CentOS 7+)"
type: bool
default: "no"
version_added: "2.7"
disable_excludes:
description:
- Disable the excludes defined in YUM config files.
- If set to C(all), disables all excludes.
- If set to C(main), disable excludes defined in [main] in yum.conf.
- If set to C(repoid), disable excludes defined for given repo id.
version_added: "2.7"
download_only:
description:
- Only download the packages, do not install them.
default: "no"
type: bool
version_added: "2.7"
lock_timeout:
description:
- Amount of time to wait for the yum lockfile to be freed.
required: false
default: 30
type: int
version_added: "2.8"
install_weak_deps:
description:
- Will also install all packages linked by a weak dependency relation.
- "NOTE: This feature requires yum >= 4 (RHEL/CentOS 8+)"
type: bool
default: "yes"
version_added: "2.8"
download_dir:
description:
- Specifies an alternate directory to store packages.
- Has an effect only if I(download_only) is specified.
type: str
version_added: "2.8"
notes:
- When used with a `loop:` each package will be processed individually,
it is much more efficient to pass the list directly to the `name` option.
- In versions prior to 1.9.2 this module installed and removed each package
given to the yum module separately. This caused problems when packages
specified by filename or url had to be installed or removed together. In
1.9.2 this was fixed so that packages are installed in one yum
transaction. However, if one of the packages adds a new yum repository
that the other packages come from (such as epel-release) then that package
needs to be installed in a separate task. This mimics yum's command line
behaviour.
- 'Yum itself has two types of groups. "Package groups" are specified in the
rpm itself while "environment groups" are specified in a separate file
(usually by the distribution). Unfortunately, this division becomes
apparent to ansible users because ansible needs to operate on the group
of packages in a single transaction and yum requires groups to be specified
in different ways when used in that way. Package groups are specified as
"@development-tools" and environment groups are "@^gnome-desktop-environment".
Use the "yum group list hidden ids" command to see which category of group the group
you want to install falls into.'
- 'The yum module does not support clearing yum cache in an idempotent way, so it
was decided not to implement it, the only method is to use command and call the yum
command directly, namely "command: yum clean all"
https://github.com/ansible/ansible/pull/31450#issuecomment-352889579'
# informational: requirements for nodes
requirements:
- yum
author:
- Ansible Core Team
- Seth Vidal (@skvidal)
- Eduard Snesarev (@verm666)
- Berend De Schouwer (@berenddeschouwer)
- Abhijeet Kasurde (@Akasurde)
- Adam Miller (@maxamillion)
'''
EXAMPLES = '''
- name: install the latest version of Apache
yum:
name: httpd
state: latest
- name: ensure a list of packages installed
yum:
name: "{{ packages }}"
vars:
packages:
- httpd
- httpd-tools
- name: remove the Apache package
yum:
name: httpd
state: absent
- name: install the latest version of Apache from the testing repo
yum:
name: httpd
enablerepo: testing
state: present
- name: install one specific version of Apache
yum:
name: httpd-2.2.29-1.4.amzn1
state: present
- name: upgrade all packages
yum:
name: '*'
state: latest
- name: upgrade all packages, excluding kernel & foo related packages
yum:
name: '*'
state: latest
exclude: kernel*,foo*
- name: install the nginx rpm from a remote repo
yum:
name: http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm
state: present
- name: install nginx rpm from a local file
yum:
name: /usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm
state: present
- name: install the 'Development tools' package group
yum:
name: "@Development tools"
state: present
- name: install the 'Gnome desktop' environment group
yum:
name: "@^gnome-desktop-environment"
state: present
- name: List ansible packages and register result to print with debug later.
yum:
list: ansible
register: result
- name: Install package with multiple repos enabled
yum:
name: sos
enablerepo: "epel,ol7_latest"
- name: Install package with multiple repos disabled
yum:
name: sos
disablerepo: "epel,ol7_latest"
- name: Install a list of packages
yum:
name:
- nginx
- postgresql
- postgresql-server
state: present
- name: Download the nginx package but do not install it
yum:
name:
- nginx
state: latest
download_only: true
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.yumdnf import YumDnf, yumdnf_argument_spec
import errno
import os
import re
import tempfile
try:
import rpm
HAS_RPM_PYTHON = True
except ImportError:
HAS_RPM_PYTHON = False
try:
import yum
HAS_YUM_PYTHON = True
except ImportError:
HAS_YUM_PYTHON = False
try:
from yum.misc import find_unfinished_transactions, find_ts_remaining
from rpmUtils.miscutils import splitFilename, compareEVR
transaction_helpers = True
except ImportError:
transaction_helpers = False
from contextlib import contextmanager
from ansible.module_utils.urls import fetch_file
def_qf = "%{epoch}:%{name}-%{version}-%{release}.%{arch}"
rpmbin = None
class YumModule(YumDnf):
"""
Yum Ansible module back-end implementation
"""
def __init__(self, module):
# state=installed name=pkgspec
# state=removed name=pkgspec
# state=latest name=pkgspec
#
# informational commands:
# list=installed
# list=updates
# list=available
# list=repos
# list=pkgspec
# This populates instance vars for all argument spec params
super(YumModule, self).__init__(module)
self.pkg_mgr_name = "yum"
self.lockfile = '/var/run/yum.pid'
self._yum_base = None
def _enablerepos_with_error_checking(self, yumbase):
# NOTE: This seems unintuitive, but it mirrors yum's CLI behavior
if len(self.enablerepo) == 1:
try:
yumbase.repos.enableRepo(self.enablerepo[0])
except yum.Errors.YumBaseError as e:
if u'repository not found' in to_text(e):
self.module.fail_json(msg="Repository %s not found." % self.enablerepo[0])
else:
raise e
else:
for rid in self.enablerepo:
try:
yumbase.repos.enableRepo(rid)
except yum.Errors.YumBaseError as e:
if u'repository not found' in to_text(e):
self.module.warn("Repository %s not found." % rid)
else:
raise e
def is_lockfile_pid_valid(self):
try:
try:
with open(self.lockfile, 'r') as f:
oldpid = int(f.readline())
except ValueError:
# invalid data
os.unlink(self.lockfile)
return False
if oldpid == os.getpid():
# that's us?
os.unlink(self.lockfile)
return False
try:
with open("/proc/%d/stat" % oldpid, 'r') as f:
stat = f.readline()
if stat.split()[2] == 'Z':
# Zombie
os.unlink(self.lockfile)
return False
except IOError:
# either /proc is not mounted or the process is already dead
try:
# check the state of the process
os.kill(oldpid, 0)
except OSError as e:
if e.errno == errno.ESRCH:
# No such process
os.unlink(self.lockfile)
return False
self.module.fail_json(msg="Unable to check PID %s in %s: %s" % (oldpid, self.lockfile, to_native(e)))
except (IOError, OSError) as e:
# lockfile disappeared?
return False
# another copy seems to be running
return True
@property
def yum_base(self):
if self._yum_base:
return self._yum_base
else:
# Only init once
self._yum_base = yum.YumBase()
self._yum_base.preconf.debuglevel = 0
self._yum_base.preconf.errorlevel = 0
self._yum_base.preconf.plugins = True
self._yum_base.preconf.enabled_plugins = self.enable_plugin
self._yum_base.preconf.disabled_plugins = self.disable_plugin
if self.releasever:
self._yum_base.preconf.releasever = self.releasever
if self.installroot != '/':
# do not setup installroot by default, because of error
# CRITICAL:yum.cli:Config Error: Error accessing file for config file:////etc/yum.conf
# in old yum version (like in CentOS 6.6)
self._yum_base.preconf.root = self.installroot
self._yum_base.conf.installroot = self.installroot
if self.conf_file and os.path.exists(self.conf_file):
self._yum_base.preconf.fn = self.conf_file
if os.geteuid() != 0:
if hasattr(self._yum_base, 'setCacheDir'):
self._yum_base.setCacheDir()
else:
cachedir = yum.misc.getCacheDir()
self._yum_base.repos.setCacheDir(cachedir)
self._yum_base.conf.cache = 0
if self.disable_excludes:
self._yum_base.conf.disable_excludes = self.disable_excludes
# A sideeffect of accessing conf is that the configuration is
# loaded and plugins are discovered
self.yum_base.conf
try:
self._enablerepos_with_error_checking(self._yum_base)
for rid in self.disablerepo:
self.yum_base.repos.disableRepo(rid)
except Exception as e:
self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
return self._yum_base
def po_to_envra(self, po):
if hasattr(po, 'ui_envra'):
return po.ui_envra
return '%s:%s-%s-%s.%s' % (po.epoch, po.name, po.version, po.release, po.arch)
def is_group_env_installed(self, name):
name_lower = name.lower()
if yum.__version_info__ >= (3, 4):
groups_list = self.yum_base.doGroupLists(return_evgrps=True)
else:
groups_list = self.yum_base.doGroupLists()
# list of the installed groups on the first index
groups = groups_list[0]
for group in groups:
if name_lower.endswith(group.name.lower()) or name_lower.endswith(group.groupid.lower()):
return True
if yum.__version_info__ >= (3, 4):
# list of the installed env_groups on the third index
envs = groups_list[2]
for env in envs:
if name_lower.endswith(env.name.lower()) or name_lower.endswith(env.environmentid.lower()):
return True
return False
def is_installed(self, repoq, pkgspec, qf=None, is_pkg=False):
if qf is None:
qf = "%{epoch}:%{name}-%{version}-%{release}.%{arch}\n"
if not repoq:
pkgs = []
try:
e, m, _ = self.yum_base.rpmdb.matchPackageNames([pkgspec])
pkgs = e + m
if not pkgs and not is_pkg:
pkgs.extend(self.yum_base.returnInstalledPackagesByDep(pkgspec))
except Exception as e:
self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
return [self.po_to_envra(p) for p in pkgs]
else:
global rpmbin
if not rpmbin:
rpmbin = self.module.get_bin_path('rpm', required=True)
cmd = [rpmbin, '-q', '--qf', qf, pkgspec]
if self.installroot != '/':
cmd.extend(['--root', self.installroot])
# rpm localizes messages and we're screen scraping so make sure we use
# the C locale
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
rc, out, err = self.module.run_command(cmd, environ_update=lang_env)
if rc != 0 and 'is not installed' not in out:
self.module.fail_json(msg='Error from rpm: %s: %s' % (cmd, err))
if 'is not installed' in out:
out = ''
pkgs = [p for p in out.replace('(none)', '0').split('\n') if p.strip()]
if not pkgs and not is_pkg:
cmd = [rpmbin, '-q', '--qf', qf, '--whatprovides', pkgspec]
if self.installroot != '/':
cmd.extend(['--root', self.installroot])
rc2, out2, err2 = self.module.run_command(cmd, environ_update=lang_env)
else:
rc2, out2, err2 = (0, '', '')
if rc2 != 0 and 'no package provides' not in out2:
self.module.fail_json(msg='Error from rpm: %s: %s' % (cmd, err + err2))
if 'no package provides' in out2:
out2 = ''
pkgs += [p for p in out2.replace('(none)', '0').split('\n') if p.strip()]
return pkgs
return []
def is_available(self, repoq, pkgspec, qf=def_qf):
if not repoq:
pkgs = []
try:
e, m, _ = self.yum_base.pkgSack.matchPackageNames([pkgspec])
pkgs = e + m
if not pkgs:
pkgs.extend(self.yum_base.returnPackagesByDep(pkgspec))
except Exception as e:
self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
return [self.po_to_envra(p) for p in pkgs]
else:
myrepoq = list(repoq)
r_cmd = ['--disablerepo', ','.join(self.disablerepo)]
myrepoq.extend(r_cmd)
r_cmd = ['--enablerepo', ','.join(self.enablerepo)]
myrepoq.extend(r_cmd)
cmd = myrepoq + ["--qf", qf, pkgspec]
rc, out, err = self.module.run_command(cmd)
if rc == 0:
return [p for p in out.split('\n') if p.strip()]
else:
self.module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err))
return []
def is_update(self, repoq, pkgspec, qf=def_qf):
if not repoq:
pkgs = []
updates = []
try:
pkgs = self.yum_base.returnPackagesByDep(pkgspec) + \
self.yum_base.returnInstalledPackagesByDep(pkgspec)
if not pkgs:
e, m, _ = self.yum_base.pkgSack.matchPackageNames([pkgspec])
pkgs = e + m
updates = self.yum_base.doPackageLists(pkgnarrow='updates').updates
except Exception as e:
self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
retpkgs = (pkg for pkg in pkgs if pkg in updates)
return set(self.po_to_envra(p) for p in retpkgs)
else:
myrepoq = list(repoq)
r_cmd = ['--disablerepo', ','.join(self.disablerepo)]
myrepoq.extend(r_cmd)
r_cmd = ['--enablerepo', ','.join(self.enablerepo)]
myrepoq.extend(r_cmd)
cmd = myrepoq + ["--pkgnarrow=updates", "--qf", qf, pkgspec]
rc, out, err = self.module.run_command(cmd)
if rc == 0:
return set(p for p in out.split('\n') if p.strip())
else:
self.module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err))
return set()
def what_provides(self, repoq, req_spec, qf=def_qf):
if not repoq:
pkgs = []
try:
try:
pkgs = self.yum_base.returnPackagesByDep(req_spec) + \
self.yum_base.returnInstalledPackagesByDep(req_spec)
except Exception as e:
# If a repo with `repo_gpgcheck=1` is added and the repo GPG
# key was never accepted, querying this repo will throw an
# error: 'repomd.xml signature could not be verified'. In that
# situation we need to run `yum -y makecache` which will accept
# the key and try again.
if 'repomd.xml signature could not be verified' in to_native(e):
self.module.run_command(self.yum_basecmd + ['makecache'])
pkgs = self.yum_base.returnPackagesByDep(req_spec) + \
self.yum_base.returnInstalledPackagesByDep(req_spec)
else:
raise
if not pkgs:
e, m, _ = self.yum_base.pkgSack.matchPackageNames([req_spec])
pkgs.extend(e)
pkgs.extend(m)
e, m, _ = self.yum_base.rpmdb.matchPackageNames([req_spec])
pkgs.extend(e)
pkgs.extend(m)
except Exception as e:
self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
return set(self.po_to_envra(p) for p in pkgs)
else:
myrepoq = list(repoq)
r_cmd = ['--disablerepo', ','.join(self.disablerepo)]
myrepoq.extend(r_cmd)
r_cmd = ['--enablerepo', ','.join(self.enablerepo)]
myrepoq.extend(r_cmd)
cmd = myrepoq + ["--qf", qf, "--whatprovides", req_spec]
rc, out, err = self.module.run_command(cmd)
cmd = myrepoq + ["--qf", qf, req_spec]
rc2, out2, err2 = self.module.run_command(cmd)
if rc == 0 and rc2 == 0:
out += out2
pkgs = set([p for p in out.split('\n') if p.strip()])
if not pkgs:
pkgs = self.is_installed(repoq, req_spec, qf=qf)
return pkgs
else:
self.module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err + err2))
return set()
def transaction_exists(self, pkglist):
"""
checks the package list to see if any packages are
involved in an incomplete transaction
"""
conflicts = []
if not transaction_helpers:
return conflicts
# first, we create a list of the package 'nvreas'
# so we can compare the pieces later more easily
pkglist_nvreas = (splitFilename(pkg) for pkg in pkglist)
# next, we build the list of packages that are
# contained within an unfinished transaction
unfinished_transactions = find_unfinished_transactions()
for trans in unfinished_transactions:
steps = find_ts_remaining(trans)
for step in steps:
# the action is install/erase/etc., but we only
# care about the package spec contained in the step
(action, step_spec) = step
(n, v, r, e, a) = splitFilename(step_spec)
# and see if that spec is in the list of packages
# requested for installation/updating
for pkg in pkglist_nvreas:
# if the name and arch match, we're going to assume
# this package is part of a pending transaction
# the label is just for display purposes
label = "%s-%s" % (n, a)
if n == pkg[0] and a == pkg[4]:
if label not in conflicts:
conflicts.append("%s-%s" % (n, a))
break
return conflicts
def local_envra(self, path):
"""return envra of a local rpm passed in"""
ts = rpm.TransactionSet()
ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)
fd = os.open(path, os.O_RDONLY)
try:
header = ts.hdrFromFdno(fd)
except rpm.error as e:
return None
finally:
os.close(fd)
return '%s:%s-%s-%s.%s' % (
header[rpm.RPMTAG_EPOCH] or '0',
header[rpm.RPMTAG_NAME],
header[rpm.RPMTAG_VERSION],
header[rpm.RPMTAG_RELEASE],
header[rpm.RPMTAG_ARCH]
)
@contextmanager
def set_env_proxy(self):
# setting system proxy environment and saving old, if exists
namepass = ""
scheme = ["http", "https"]
old_proxy_env = [os.getenv("http_proxy"), os.getenv("https_proxy")]
try:
# "_none_" is a special value to disable proxy in yum.conf/*.repo
if self.yum_base.conf.proxy and self.yum_base.conf.proxy not in ("_none_",):
if self.yum_base.conf.proxy_username:
namepass = namepass + self.yum_base.conf.proxy_username
proxy_url = self.yum_base.conf.proxy
if self.yum_base.conf.proxy_password:
namepass = namepass + ":" + self.yum_base.conf.proxy_password
elif '@' in self.yum_base.conf.proxy:
namepass = self.yum_base.conf.proxy.split('@')[0].split('//')[-1]
proxy_url = self.yum_base.conf.proxy.replace("{0}@".format(namepass), "")
if namepass:
namepass = namepass + '@'
for item in scheme:
os.environ[item + "_proxy"] = re.sub(
r"(http://)",
r"\g<1>" + namepass, proxy_url
)
else:
for item in scheme:
os.environ[item + "_proxy"] = self.yum_base.conf.proxy
yield
except yum.Errors.YumBaseError:
raise
finally:
# revert back to previously system configuration
for item in scheme:
if os.getenv("{0}_proxy".format(item)):
del os.environ["{0}_proxy".format(item)]
if old_proxy_env[0]:
os.environ["http_proxy"] = old_proxy_env[0]
if old_proxy_env[1]:
os.environ["https_proxy"] = old_proxy_env[1]
def pkg_to_dict(self, pkgstr):
if pkgstr.strip():
n, e, v, r, a, repo = pkgstr.split('|')
else:
return {'error_parsing': pkgstr}
d = {
'name': n,
'arch': a,
'epoch': e,
'release': r,
'version': v,
'repo': repo,
'envra': '%s:%s-%s-%s.%s' % (e, n, v, r, a)
}
if repo == 'installed':
d['yumstate'] = 'installed'
else:
d['yumstate'] = 'available'
return d
def repolist(self, repoq, qf="%{repoid}"):
cmd = repoq + ["--qf", qf, "-a"]
rc, out, _ = self.module.run_command(cmd)
if rc == 0:
return set(p for p in out.split('\n') if p.strip())
else:
return []
def list_stuff(self, repoquerybin, stuff):
qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|%{repoid}"
# is_installed goes through rpm instead of repoquery so it needs a slightly different format
is_installed_qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|installed\n"
repoq = [repoquerybin, '--show-duplicates', '--plugins', '--quiet']
if self.disablerepo:
repoq.extend(['--disablerepo', ','.join(self.disablerepo)])
if self.enablerepo:
repoq.extend(['--enablerepo', ','.join(self.enablerepo)])
if self.installroot != '/':
repoq.extend(['--installroot', self.installroot])
if self.conf_file and os.path.exists(self.conf_file):
repoq += ['-c', self.conf_file]
if stuff == 'installed':
return [self.pkg_to_dict(p) for p in sorted(self.is_installed(repoq, '-a', qf=is_installed_qf)) if p.strip()]
if stuff == 'updates':
return [self.pkg_to_dict(p) for p in sorted(self.is_update(repoq, '-a', qf=qf)) if p.strip()]
if stuff == 'available':
return [self.pkg_to_dict(p) for p in sorted(self.is_available(repoq, '-a', qf=qf)) if p.strip()]
if stuff == 'repos':
return [dict(repoid=name, state='enabled') for name in sorted(self.repolist(repoq)) if name.strip()]
return [
self.pkg_to_dict(p) for p in
sorted(self.is_installed(repoq, stuff, qf=is_installed_qf) + self.is_available(repoq, stuff, qf=qf))
if p.strip()
]
def exec_install(self, items, action, pkgs, res):
cmd = self.yum_basecmd + [action] + pkgs
if self.module.check_mode:
self.module.exit_json(changed=True, results=res['results'], changes=dict(installed=pkgs))
else:
res['changes'] = dict(installed=pkgs)
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
rc, out, err = self.module.run_command(cmd, environ_update=lang_env)
if rc == 1:
for spec in items:
# Fail on invalid urls:
if ('://' in spec and ('No package %s available.' % spec in out or 'Cannot open: %s. Skipping.' % spec in err)):
err = 'Package at %s could not be installed' % spec
self.module.fail_json(changed=False, msg=err, rc=rc)
res['rc'] = rc
res['results'].append(out)
res['msg'] += err
res['changed'] = True
if ('Nothing to do' in out and rc == 0) or ('does not have any packages' in err):
res['changed'] = False
if rc != 0:
res['changed'] = False
self.module.fail_json(**res)
# Fail if yum prints 'No space left on device' because that means some
# packages failed executing their post install scripts because of lack of
# free space (e.g. kernel package couldn't generate initramfs). Note that
# yum can still exit with rc=0 even if some post scripts didn't execute
# correctly.
if 'No space left on device' in (out or err):
res['changed'] = False
res['msg'] = 'No space left on device'
self.module.fail_json(**res)
# FIXME - if we did an install - go and check the rpmdb to see if it actually installed
# look for each pkg in rpmdb
# look for each pkg via obsoletes
return res
def install(self, items, repoq):
pkgs = []
downgrade_pkgs = []
res = {}
res['results'] = []
res['msg'] = ''
res['rc'] = 0
res['changed'] = False
for spec in items:
pkg = None
downgrade_candidate = False
# check if pkgspec is installed (if possible for idempotence)
if spec.endswith('.rpm') or '://' in spec:
if '://' not in spec and not os.path.exists(spec):
res['msg'] += "No RPM file matching '%s' found on system" % spec
res['results'].append("No RPM file matching '%s' found on system" % spec)
res['rc'] = 127 # Ensure the task fails in with-loop
self.module.fail_json(**res)
if '://' in spec:
with self.set_env_proxy():
package = fetch_file(self.module, spec)
if not package.endswith('.rpm'):
# yum requires a local file to have the extension of .rpm and we
# can not guarantee that from an URL (redirects, proxies, etc)
new_package_path = '%s.rpm' % package
os.rename(package, new_package_path)
package = new_package_path
else:
package = spec
# most common case is the pkg is already installed
envra = self.local_envra(package)
if envra is None:
self.module.fail_json(msg="Failed to get nevra information from RPM package: %s" % spec)
installed_pkgs = self.is_installed(repoq, envra)
if installed_pkgs:
res['results'].append('%s providing %s is already installed' % (installed_pkgs[0], package))
continue
(name, ver, rel, epoch, arch) = splitFilename(envra)
installed_pkgs = self.is_installed(repoq, name)
# case for two same envr but different archs like x86_64 and i686
if len(installed_pkgs) == 2:
(cur_name0, cur_ver0, cur_rel0, cur_epoch0, cur_arch0) = splitFilename(installed_pkgs[0])
(cur_name1, cur_ver1, cur_rel1, cur_epoch1, cur_arch1) = splitFilename(installed_pkgs[1])
cur_epoch0 = cur_epoch0 or '0'
cur_epoch1 = cur_epoch1 or '0'
compare = compareEVR((cur_epoch0, cur_ver0, cur_rel0), (cur_epoch1, cur_ver1, cur_rel1))
if compare == 0 and cur_arch0 != cur_arch1:
for installed_pkg in installed_pkgs:
if installed_pkg.endswith(arch):
installed_pkgs = [installed_pkg]
if len(installed_pkgs) == 1:
installed_pkg = installed_pkgs[0]
(cur_name, cur_ver, cur_rel, cur_epoch, cur_arch) = splitFilename(installed_pkg)
cur_epoch = cur_epoch or '0'
compare = compareEVR((cur_epoch, cur_ver, cur_rel), (epoch, ver, rel))
# compare > 0 -> higher version is installed
# compare == 0 -> exact version is installed
# compare < 0 -> lower version is installed
if compare > 0 and self.allow_downgrade:
downgrade_candidate = True
elif compare >= 0:
continue
# else: if there are more installed packages with the same name, that would mean
# kernel, gpg-pubkey or like, so just let yum deal with it and try to install it
pkg = package
# groups
elif spec.startswith('@'):
if self.is_group_env_installed(spec):
continue
pkg = spec
# range requires or file-requires or pkgname :(
else:
# most common case is the pkg is already installed and done
# short circuit all the bs - and search for it as a pkg in is_installed
# if you find it then we're done
if not set(['*', '?']).intersection(set(spec)):
installed_pkgs = self.is_installed(repoq, spec, is_pkg=True)
if installed_pkgs:
res['results'].append('%s providing %s is already installed' % (installed_pkgs[0], spec))
continue
# look up what pkgs provide this
pkglist = self.what_provides(repoq, spec)
if not pkglist:
res['msg'] += "No package matching '%s' found available, installed or updated" % spec
res['results'].append("No package matching '%s' found available, installed or updated" % spec)
res['rc'] = 126 # Ensure the task fails in with-loop
self.module.fail_json(**res)
# if any of the packages are involved in a transaction, fail now
# so that we don't hang on the yum operation later
conflicts = self.transaction_exists(pkglist)
if conflicts:
res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
res['rc'] = 125 # Ensure the task fails in with-loop
self.module.fail_json(**res)
# if any of them are installed
# then nothing to do
found = False
for this in pkglist:
if self.is_installed(repoq, this, is_pkg=True):
found = True
res['results'].append('%s providing %s is already installed' % (this, spec))
break
# if the version of the pkg you have installed is not in ANY repo, but there are
# other versions in the repos (both higher and lower) then the previous checks won't work.
# so we check one more time. This really only works for pkgname - not for file provides or virt provides
# but virt provides should be all caught in what_provides on its own.
# highly irritating
if not found:
if self.is_installed(repoq, spec):
found = True
res['results'].append('package providing %s is already installed' % (spec))
if found:
continue
# Downgrade - The yum install command will only install or upgrade to a spec version, it will
# not install an older version of an RPM even if specified by the install spec. So we need to
# determine if this is a downgrade, and then use the yum downgrade command to install the RPM.
if self.allow_downgrade:
for package in pkglist:
# Get the NEVRA of the requested package using pkglist instead of spec because pkglist
# contains consistently-formatted package names returned by yum, rather than user input
# that is often not parsed correctly by splitFilename().
(name, ver, rel, epoch, arch) = splitFilename(package)
# Check if any version of the requested package is installed
inst_pkgs = self.is_installed(repoq, name, is_pkg=True)
if inst_pkgs:
(cur_name, cur_ver, cur_rel, cur_epoch, cur_arch) = splitFilename(inst_pkgs[0])
compare = compareEVR((cur_epoch, cur_ver, cur_rel), (epoch, ver, rel))
if compare > 0:
downgrade_candidate = True
else:
downgrade_candidate = False
break
# If package needs to be installed/upgraded/downgraded, then pass in the spec
# we could get here if nothing provides it but that's not
# the error we're catching here
pkg = spec
if downgrade_candidate and self.allow_downgrade:
downgrade_pkgs.append(pkg)
else:
pkgs.append(pkg)
if downgrade_pkgs:
res = self.exec_install(items, 'downgrade', downgrade_pkgs, res)
if pkgs:
res = self.exec_install(items, 'install', pkgs, res)
return res
def remove(self, items, repoq):
pkgs = []
res = {}
res['results'] = []
res['msg'] = ''
res['changed'] = False
res['rc'] = 0
for pkg in items:
if pkg.startswith('@'):
installed = self.is_group_env_installed(pkg)
else:
installed = self.is_installed(repoq, pkg)
if installed:
pkgs.append(pkg)
else:
res['results'].append('%s is not installed' % pkg)
if pkgs:
if self.module.check_mode:
self.module.exit_json(changed=True, results=res['results'], changes=dict(removed=pkgs))
else:
res['changes'] = dict(removed=pkgs)
# run an actual yum transaction
if self.autoremove:
cmd = self.yum_basecmd + ["autoremove"] + pkgs
else:
cmd = self.yum_basecmd + ["remove"] + pkgs
rc, out, err = self.module.run_command(cmd)
res['rc'] = rc
res['results'].append(out)
res['msg'] = err
if rc != 0:
if self.autoremove and 'No such command' in out:
self.module.fail_json(msg='Version of YUM too old for autoremove: Requires yum 3.4.3 (RHEL/CentOS 7+)')
else:
self.module.fail_json(**res)
# compile the results into one batch. If anything is changed
# then mark changed
# at the end - if we've end up failed then fail out of the rest
# of the process
# at this point we check to see if the pkg is no longer present
self._yum_base = None # previous YumBase package index is now invalid
for pkg in pkgs:
if pkg.startswith('@'):
installed = self.is_group_env_installed(pkg)
else:
installed = self.is_installed(repoq, pkg)
if installed:
# Return a message so it's obvious to the user why yum failed
# and which package couldn't be removed. More details:
# https://github.com/ansible/ansible/issues/35672
res['msg'] = "Package '%s' couldn't be removed!" % pkg
self.module.fail_json(**res)
res['changed'] = True
return res
def run_check_update(self):
# run check-update to see if we have packages pending
rc, out, err = self.module.run_command(self.yum_basecmd + ['check-update'])
return rc, out, err
@staticmethod
def parse_check_update(check_update_output):
updates = {}
obsoletes = {}
# remove incorrect new lines in longer columns in output from yum check-update
# yum line wrapping can move the repo to the next line
#
# Meant to filter out sets of lines like:
# some_looooooooooooooooooooooooooooooooooooong_package_name 1:1.2.3-1.el7
# some-repo-label
#
# But it also needs to avoid catching lines like:
# Loading mirror speeds from cached hostfile
#
# ceph.x86_64 1:11.2.0-0.el7 ceph
# preprocess string and filter out empty lines so the regex below works
out = re.sub(r'\n[^\w]\W+(.*)', r' \1', check_update_output)
available_updates = out.split('\n')
# build update dictionary
for line in available_updates:
line = line.split()
# ignore irrelevant lines
# '*' in line matches lines like mirror lists:
# * base: mirror.corbina.net
# len(line) != 3 or 6 could be junk or a continuation
# len(line) = 6 is package obsoletes
#
# FIXME: what is the '.' not in line conditional for?
if '*' in line or len(line) not in [3, 6] or '.' not in line[0]:
continue
else:
pkg, version, repo = line[0], line[1], line[2]
name, dist = pkg.rsplit('.', 1)
updates.update({name: {'version': version, 'dist': dist, 'repo': repo}})
if len(line) == 6:
obsolete_pkg, obsolete_version, obsolete_repo = line[3], line[4], line[5]
obsolete_name, obsolete_dist = obsolete_pkg.rsplit('.', 1)
obsoletes.update({obsolete_name: {'version': obsolete_version, 'dist': obsolete_dist, 'repo': obsolete_repo}})
return updates, obsoletes
def latest(self, items, repoq):
res = {}
res['results'] = []
res['msg'] = ''
res['changed'] = False
res['rc'] = 0
pkgs = {}
pkgs['update'] = []
pkgs['install'] = []
updates = {}
obsoletes = {}
update_all = False
cmd = None
# determine if we're doing an update all
if '*' in items:
update_all = True
rc, out, err = self.run_check_update()
if rc == 0 and update_all:
res['results'].append('Nothing to do here, all packages are up to date')
return res
elif rc == 100:
updates, obsoletes = self.parse_check_update(out)
elif rc == 1:
res['msg'] = err
res['rc'] = rc
self.module.fail_json(**res)
if update_all:
cmd = self.yum_basecmd + ['update']
will_update = set(updates.keys())
will_update_from_other_package = dict()
else:
will_update = set()
will_update_from_other_package = dict()
for spec in items:
# some guess work involved with groups. update @<group> will install the group if missing
if spec.startswith('@'):
pkgs['update'].append(spec)
will_update.add(spec)
continue
# check if pkgspec is installed (if possible for idempotence)
# localpkg
elif spec.endswith('.rpm') and '://' not in spec:
if not os.path.exists(spec):
res['msg'] += "No RPM file matching '%s' found on system" % spec
res['results'].append("No RPM file matching '%s' found on system" % spec)
res['rc'] = 127 # Ensure the task fails in with-loop
self.module.fail_json(**res)
# get the pkg e:name-v-r.arch
envra = self.local_envra(spec)
if envra is None:
self.module.fail_json(msg="Failed to get nevra information from RPM package: %s" % spec)
# local rpm files can't be updated
if self.is_installed(repoq, envra):
pkgs['update'].append(spec)
else:
pkgs['install'].append(spec)
continue
# URL
elif '://' in spec:
# download package so that we can check if it's already installed
with self.set_env_proxy():
package = fetch_file(self.module, spec)
envra = self.local_envra(package)
if envra is None:
self.module.fail_json(msg="Failed to get nevra information from RPM package: %s" % spec)
# local rpm files can't be updated
if self.is_installed(repoq, envra):
pkgs['update'].append(spec)
else:
pkgs['install'].append(spec)
continue
# dep/pkgname - find it
else:
if self.is_installed(repoq, spec):
pkgs['update'].append(spec)
else:
pkgs['install'].append(spec)
pkglist = self.what_provides(repoq, spec)
# FIXME..? may not be desirable to throw an exception here if a single package is missing
if not pkglist:
res['msg'] += "No package matching '%s' found available, installed or updated" % spec
res['results'].append("No package matching '%s' found available, installed or updated" % spec)
res['rc'] = 126 # Ensure the task fails in with-loop
self.module.fail_json(**res)
nothing_to_do = True
for pkg in pkglist:
if spec in pkgs['install'] and self.is_available(repoq, pkg):
nothing_to_do = False
break
# this contains the full NVR and spec could contain wildcards
# or virtual provides (like "python-*" or "smtp-daemon") while
# updates contains name only.
pkgname, _, _, _, _ = splitFilename(pkg)
if spec in pkgs['update'] and pkgname in updates:
nothing_to_do = False
will_update.add(spec)
# Massage the updates list
if spec != pkgname:
# For reporting what packages would be updated more
# succinctly
will_update_from_other_package[spec] = pkgname
break
if not self.is_installed(repoq, spec) and self.update_only:
res['results'].append("Packages providing %s not installed due to update_only specified" % spec)
continue
if nothing_to_do:
res['results'].append("All packages providing %s are up to date" % spec)
continue
# if any of the packages are involved in a transaction, fail now
# so that we don't hang on the yum operation later
conflicts = self.transaction_exists(pkglist)
if conflicts:
res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
res['results'].append("The following packages have pending transactions: %s" % ", ".join(conflicts))
res['rc'] = 128 # Ensure the task fails in with-loop
self.module.fail_json(**res)
# check_mode output
to_update = []
for w in will_update:
if w.startswith('@'):
to_update.append((w, None))
elif w not in updates:
other_pkg = will_update_from_other_package[w]
to_update.append(
(
w,
'because of (at least) %s-%s.%s from %s' % (
other_pkg,
updates[other_pkg]['version'],
updates[other_pkg]['dist'],
updates[other_pkg]['repo']
)
)
)
else:
to_update.append((w, '%s.%s from %s' % (updates[w]['version'], updates[w]['dist'], updates[w]['repo'])))
if self.update_only:
res['changes'] = dict(installed=[], updated=to_update)
else:
res['changes'] = dict(installed=pkgs['install'], updated=to_update)
if obsoletes:
res['obsoletes'] = obsoletes
# return results before we actually execute stuff
if self.module.check_mode:
if will_update or pkgs['install']:
res['changed'] = True
return res
# run commands
if cmd: # update all
rc, out, err = self.module.run_command(cmd)
res['changed'] = True
elif self.update_only:
if pkgs['update']:
cmd = self.yum_basecmd + ['update'] + pkgs['update']
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
rc, out, err = self.module.run_command(cmd, environ_update=lang_env)
out_lower = out.strip().lower()
if not out_lower.endswith("no packages marked for update") and \
not out_lower.endswith("nothing to do"):
res['changed'] = True
else:
rc, out, err = [0, '', '']
elif pkgs['install'] or will_update and not self.update_only:
cmd = self.yum_basecmd + ['install'] + pkgs['install'] + pkgs['update']
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
rc, out, err = self.module.run_command(cmd, environ_update=lang_env)
out_lower = out.strip().lower()
if not out_lower.endswith("no packages marked for update") and \
not out_lower.endswith("nothing to do"):
res['changed'] = True
else:
rc, out, err = [0, '', '']
res['rc'] = rc
res['msg'] += err
res['results'].append(out)
if rc:
res['failed'] = True
return res
def ensure(self, repoq):
pkgs = self.names
# autoremove was provided without `name`
if not self.names and self.autoremove:
pkgs = []
self.state = 'absent'
if self.conf_file and os.path.exists(self.conf_file):
self.yum_basecmd += ['-c', self.conf_file]
if repoq:
repoq += ['-c', self.conf_file]
if self.skip_broken:
self.yum_basecmd.extend(['--skip-broken'])
if self.disablerepo:
self.yum_basecmd.extend(['--disablerepo=%s' % ','.join(self.disablerepo)])
if self.enablerepo:
self.yum_basecmd.extend(['--enablerepo=%s' % ','.join(self.enablerepo)])
if self.enable_plugin:
self.yum_basecmd.extend(['--enableplugin', ','.join(self.enable_plugin)])
if self.disable_plugin:
self.yum_basecmd.extend(['--disableplugin', ','.join(self.disable_plugin)])
if self.exclude:
e_cmd = ['--exclude=%s' % ','.join(self.exclude)]
self.yum_basecmd.extend(e_cmd)
if self.disable_excludes:
self.yum_basecmd.extend(['--disableexcludes=%s' % self.disable_excludes])
if self.download_only:
self.yum_basecmd.extend(['--downloadonly'])
if self.download_dir:
self.yum_basecmd.extend(['--downloaddir=%s' % self.download_dir])
if self.installroot != '/':
# do not setup installroot by default, because of error
# CRITICAL:yum.cli:Config Error: Error accessing file for config file:////etc/yum.conf
# in old yum version (like in CentOS 6.6)
e_cmd = ['--installroot=%s' % self.installroot]
self.yum_basecmd.extend(e_cmd)
if self.state in ('installed', 'present', 'latest'):
""" The need of this entire if conditional has to be changed
this function is the ensure function that is called
in the main section.
This conditional tends to disable/enable repo for
install present latest action, same actually
can be done for remove and absent action
As solution I would advice to cal
try: self.yum_base.repos.disableRepo(disablerepo)
and
try: self.yum_base.repos.enableRepo(enablerepo)
right before any yum_cmd is actually called regardless
of yum action.
Please note that enable/disablerepo options are general
options, this means that we can call those with any action
option. https://linux.die.net/man/8/yum
This docstring will be removed together when issue: #21619
will be solved.
This has been triggered by: #19587
"""
if self.update_cache:
self.module.run_command(self.yum_basecmd + ['clean', 'expire-cache'])
try:
current_repos = self.yum_base.repos.repos.keys()
if self.enablerepo:
try:
new_repos = self.yum_base.repos.repos.keys()
for i in new_repos:
if i not in current_repos:
rid = self.yum_base.repos.getRepo(i)
a = rid.repoXML.repoid # nopep8 - https://github.com/ansible/ansible/pull/21475#pullrequestreview-22404868
current_repos = new_repos
except yum.Errors.YumBaseError as e:
self.module.fail_json(msg="Error setting/accessing repos: %s" % to_native(e))
except yum.Errors.YumBaseError as e:
self.module.fail_json(msg="Error accessing repos: %s" % to_native(e))
if self.state == 'latest' or self.update_only:
if self.disable_gpg_check:
self.yum_basecmd.append('--nogpgcheck')
if self.security:
self.yum_basecmd.append('--security')
if self.bugfix:
self.yum_basecmd.append('--bugfix')
res = self.latest(pkgs, repoq)
elif self.state in ('installed', 'present'):
if self.disable_gpg_check:
self.yum_basecmd.append('--nogpgcheck')
res = self.install(pkgs, repoq)
elif self.state in ('removed', 'absent'):
res = self.remove(pkgs, repoq)
else:
# should be caught by AnsibleModule argument_spec
self.module.fail_json(
msg="we should never get here unless this all failed",
changed=False,
results='',
errors='unexpected state'
)
return res
@staticmethod
def has_yum():
return HAS_YUM_PYTHON
def run(self):
"""
actually execute the module code backend
"""
error_msgs = []
if not HAS_RPM_PYTHON:
error_msgs.append('The Python 2 bindings for rpm are needed for this module. If you require Python 3 support use the `dnf` Ansible module instead.')
if not HAS_YUM_PYTHON:
error_msgs.append('The Python 2 yum module is needed for this module. If you require Python 3 support use the `dnf` Ansible module instead.')
self.wait_for_lock()
if error_msgs:
self.module.fail_json(msg='. '.join(error_msgs))
# fedora will redirect yum to dnf, which has incompatibilities
# with how this module expects yum to operate. If yum-deprecated
# is available, use that instead to emulate the old behaviors.
if self.module.get_bin_path('yum-deprecated'):
yumbin = self.module.get_bin_path('yum-deprecated')
else:
yumbin = self.module.get_bin_path('yum')
# need debug level 2 to get 'Nothing to do' for groupinstall.
self.yum_basecmd = [yumbin, '-d', '2', '-y']
if self.update_cache and not self.names and not self.list:
rc, stdout, stderr = self.module.run_command(self.yum_basecmd + ['clean', 'expire-cache'])
if rc == 0:
self.module.exit_json(
changed=False,
msg="Cache updated",
rc=rc,
results=[]
)
else:
self.module.exit_json(
changed=False,
msg="Failed to update cache",
rc=rc,
results=[stderr],
)
repoquerybin = self.module.get_bin_path('repoquery', required=False)
if self.install_repoquery and not repoquerybin and not self.module.check_mode:
yum_path = self.module.get_bin_path('yum')
if yum_path:
self.module.run_command('%s -y install yum-utils' % yum_path)
repoquerybin = self.module.get_bin_path('repoquery', required=False)
if self.list:
if not repoquerybin:
self.module.fail_json(msg="repoquery is required to use list= with this module. Please install the yum-utils package.")
results = {'results': self.list_stuff(repoquerybin, self.list)}
else:
# If rhn-plugin is installed and no rhn-certificate is available on
# the system then users will see an error message using the yum API.
# Use repoquery in those cases.
repoquery = None
try:
yum_plugins = self.yum_base.plugins._plugins
except AttributeError:
pass
else:
if 'rhnplugin' in yum_plugins:
if repoquerybin:
repoquery = [repoquerybin, '--show-duplicates', '--plugins', '--quiet']
if self.installroot != '/':
repoquery.extend(['--installroot', self.installroot])
if self.disable_excludes:
# repoquery does not support --disableexcludes,
# so make a temp copy of yum.conf and get rid of the 'exclude=' line there
try:
with open('/etc/yum.conf', 'r') as f:
content = f.readlines()
tmp_conf_file = tempfile.NamedTemporaryFile(dir=self.module.tmpdir, delete=False)
self.module.add_cleanup_file(tmp_conf_file.name)
tmp_conf_file.writelines([c for c in content if not c.startswith("exclude=")])
tmp_conf_file.close()
except Exception as e:
self.module.fail_json(msg="Failure setting up repoquery: %s" % to_native(e))
repoquery.extend(['-c', tmp_conf_file.name])
results = self.ensure(repoquery)
if repoquery:
results['msg'] = '%s %s' % (
results.get('msg', ''),
'Warning: Due to potential bad behaviour with rhnplugin and certificates, used slower repoquery calls instead of Yum API.'
)
self.module.exit_json(**results)
def main():
# state=installed name=pkgspec
# state=removed name=pkgspec
# state=latest name=pkgspec
#
# informational commands:
# list=installed
# list=updates
# list=available
# list=repos
# list=pkgspec
yumdnf_argument_spec['argument_spec']['use_backend'] = dict(default='auto', choices=['auto', 'yum', 'yum4', 'dnf'])
module = AnsibleModule(
**yumdnf_argument_spec
)
module_implementation = YumModule(module)
module_implementation.run()
if __name__ == '__main__':
main()
| gpl-3.0 |
rdipietro/tensorflow | tensorflow/python/framework/graph_util_test.py | 9 | 12229 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.graph_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops # pylint: disable=unused-import
# Utility device function to use for testing
def test_device_func_pin_variable_to_cpu(op):
if op.device:
return op.device
return "/cpu:0" if op.node_def.op == "Variable" else op.device
class DeviceFunctionsTest(tf.test.TestCase):
def testTwoDeviceFunctions(self):
with ops.Graph().as_default() as g:
var_0 = gen_state_ops._variable(shape=[1], dtype=dtypes.float32,
name="var_0", container="", shared_name="")
with g.device(test_device_func_pin_variable_to_cpu):
var_1 = gen_state_ops._variable(shape=[1], dtype=dtypes.float32,
name="var_1", container="", shared_name="")
var_2 = gen_state_ops._variable(shape=[1], dtype=dtypes.float32,
name="var_2", container="", shared_name="")
var_3 = gen_state_ops._variable(shape=[1], dtype=dtypes.float32,
name="var_3", container="", shared_name="")
with g.device(test_device_func_pin_variable_to_cpu):
var_4 = gen_state_ops._variable(shape=[1], dtype=dtypes.float32,
name="var_4", container="", shared_name="")
with g.device("/device:GPU:0"):
var_5 = gen_state_ops._variable(shape=[1], dtype=dtypes.float32,
name="var_5", container="", shared_name="")
var_6 = gen_state_ops._variable(shape=[1], dtype=dtypes.float32,
name="var_6", container="", shared_name="")
self.assertDeviceEqual(var_0.device, None)
self.assertDeviceEqual(var_1.device, "/device:CPU:0")
self.assertDeviceEqual(var_2.device, None)
self.assertDeviceEqual(var_3.device, None)
self.assertDeviceEqual(var_4.device, "/device:CPU:0")
self.assertDeviceEqual(var_5.device, "/device:GPU:0")
self.assertDeviceEqual(var_6.device, "/device:CPU:0")
def testNestedDeviceFunctions(self):
with tf.Graph().as_default():
var_0 = tf.Variable(0)
with tf.device(test_device_func_pin_variable_to_cpu):
var_1 = tf.Variable(1)
with tf.device(lambda op: "/gpu:0"):
var_2 = tf.Variable(2)
with tf.device("/gpu:0"): # Implicit merging device function.
var_3 = tf.Variable(3)
self.assertDeviceEqual(var_0.device, None)
self.assertDeviceEqual(var_1.device, "/device:CPU:0")
self.assertDeviceEqual(var_2.device, "/device:GPU:0")
self.assertDeviceEqual(var_3.device, "/device:GPU:0")
def testExplicitDevice(self):
with ops.Graph().as_default() as g:
const_0 = constant_op.constant(5.0)
with g.device("/device:GPU:0"):
const_1 = constant_op.constant(5.0)
with g.device("/device:GPU:1"):
const_2 = constant_op.constant(5.0)
with g.device("/device:CPU:0"):
const_3 = constant_op.constant(5.0)
with g.device("/device:CPU:1"):
const_4 = constant_op.constant(5.0)
with g.device("/job:ps"):
const_5 = constant_op.constant(5.0)
self.assertDeviceEqual(const_0.device, None)
self.assertDeviceEqual(const_1.device, "/device:GPU:0")
self.assertDeviceEqual(const_2.device, "/device:GPU:1")
self.assertDeviceEqual(const_3.device, "/device:CPU:0")
self.assertDeviceEqual(const_4.device, "/device:CPU:1")
self.assertDeviceEqual(const_5.device, "/job:ps")
def testDefaultDevice(self):
with ops.Graph().as_default() as g, g.device(
test_device_func_pin_variable_to_cpu):
with g.device("/job:ps"):
const_0 = constant_op.constant(5.0)
with g.device("/device:GPU:0"):
const_1 = constant_op.constant(5.0)
with g.device("/device:GPU:1"):
const_2 = constant_op.constant(5.0)
with g.device("/device:CPU:0"):
const_3 = constant_op.constant(5.0)
with g.device("/device:CPU:1"):
const_4 = constant_op.constant(5.0)
with g.device("/replica:0"):
const_5 = constant_op.constant(5.0)
self.assertDeviceEqual(const_0.device, "/job:ps")
self.assertDeviceEqual(const_1.device, "/device:GPU:0")
self.assertDeviceEqual(const_2.device, "/device:GPU:1")
self.assertDeviceEqual(const_3.device, "/device:CPU:0")
self.assertDeviceEqual(const_4.device, "/device:CPU:1")
self.assertDeviceEqual(const_5.device, "/replica:0")
def testExtractSubGraph(self):
graph_def = tf.GraphDef()
n1 = graph_def.node.add()
n1.name = "n1"
n1.input.extend(["n5"])
n2 = graph_def.node.add()
n2.name = "n2"
# Take the first output of the n1 node as the input.
n2.input.extend(["n1:0"])
n3 = graph_def.node.add()
n3.name = "n3"
# Add a control input (which isn't really needed by the kernel, but
# rather to enforce execution order between nodes).
n3.input.extend(["^n2"])
n4 = graph_def.node.add()
n4.name = "n4"
# It is fine to have a loops in the graph as well.
n5 = graph_def.node.add()
n5.name = "n5"
n5.input.extend(["n1"])
sub_graph = graph_util.extract_sub_graph(graph_def, ["n3"])
self.assertEqual("n1", sub_graph.node[0].name)
self.assertEqual("n2", sub_graph.node[1].name)
self.assertEqual("n3", sub_graph.node[2].name)
self.assertEqual("n5", sub_graph.node[3].name)
def testConvertVariablesToConsts(self):
with tf.Graph().as_default():
variable_node = tf.Variable(1.0, name="variable_node")
_ = tf.Variable(1.0, name="unused_variable_node")
output_node = tf.mul(variable_node, 2.0, name="output_node")
with tf.Session() as sess:
init = tf.initialize_variables([variable_node])
sess.run(init)
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
variable_graph_def = sess.graph.as_graph_def()
# First get the constant_graph_def when variable_names_whitelist is set,
# note that if variable_names_whitelist is not set an error will be
# thrown because unused_variable_node is not initialized.
constant_graph_def = graph_util.convert_variables_to_constants(
sess, variable_graph_def, ["output_node"],
variable_names_whitelist=set(["variable_node"]))
# Then initialize the unused variable, and get another
# constant_graph_def when variable_names_whitelist is not set.
sess.run(tf.global_variables_initializer())
constant_graph_def_without_variable_whitelist = (
graph_util.convert_variables_to_constants(
sess, variable_graph_def, ["output_node"]))
# The unused variable should be cleared so the two graphs should be
# equivalent.
self.assertEqual(str(constant_graph_def),
str(constant_graph_def_without_variable_whitelist))
# Now we make sure the variable is now a constant, and that the graph still
# produces the expected result.
with tf.Graph().as_default():
_ = tf.import_graph_def(constant_graph_def, name="")
self.assertEqual(4, len(constant_graph_def.node))
for node in constant_graph_def.node:
self.assertNotEqual("Variable", node.op)
with tf.Session() as sess:
output_node = sess.graph.get_tensor_by_name("output_node:0")
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
def create_node_def(self, op, name, inputs):
new_node = tf.NodeDef()
new_node.op = op
new_node.name = name
for input_name in inputs:
new_node.input.extend([input_name])
return new_node
def create_constant_node_def(self, name, value, dtype, shape=None):
node = self.create_node_def("Const", name, [])
self.set_attr_dtype(node, "dtype", dtype)
self.set_attr_tensor(node, "value", value, dtype, shape)
return node
def set_attr_dtype(self, node, key, value):
node.attr[key].CopyFrom(tf.AttrValue(type=value.as_datatype_enum))
def set_attr_tensor(self, node, key, value, dtype, shape=None):
node.attr[key].CopyFrom(tf.AttrValue(
tensor=tensor_util.make_tensor_proto(value,
dtype=dtype,
shape=shape)))
def testRemoveTrainingNodes(self):
a_constant_name = "a_constant"
b_constant_name = "b_constant"
a_check_name = "a_check"
b_check_name = "b_check"
a_identity_name = "a_identity"
b_identity_name = "b_identity"
add_name = "add"
graph_def = tf.GraphDef()
a_constant = self.create_constant_node_def(a_constant_name,
value=1,
dtype=tf.float32,
shape=[])
graph_def.node.extend([a_constant])
a_check_node = self.create_node_def("CheckNumerics", a_check_name,
[a_constant_name])
graph_def.node.extend([a_check_node])
a_identity_node = self.create_node_def("Identity", a_identity_name,
[a_constant_name,
"^" + a_check_name])
graph_def.node.extend([a_identity_node])
b_constant = self.create_constant_node_def(b_constant_name,
value=1,
dtype=tf.float32,
shape=[])
graph_def.node.extend([b_constant])
b_check_node = self.create_node_def("CheckNumerics", b_check_name,
[b_constant_name])
graph_def.node.extend([b_check_node])
b_identity_node = self.create_node_def("Identity", b_identity_name,
[b_constant_name,
"^" + b_check_name])
graph_def.node.extend([b_identity_node])
add_node = self.create_node_def("Add", add_name,
[a_identity_name,
b_identity_name])
self.set_attr_dtype(add_node, "T", tf.float32)
graph_def.node.extend([add_node])
expected_output = tf.GraphDef()
a_constant = self.create_constant_node_def(a_constant_name,
value=1,
dtype=tf.float32,
shape=[])
expected_output.node.extend([a_constant])
b_constant = self.create_constant_node_def(b_constant_name,
value=1,
dtype=tf.float32,
shape=[])
expected_output.node.extend([b_constant])
add_node = self.create_node_def("Add", add_name,
[a_constant_name,
b_constant_name])
self.set_attr_dtype(add_node, "T", tf.float32)
expected_output.node.extend([add_node])
output = graph_util.remove_training_nodes(graph_def)
self.assertProtoEquals(expected_output, output)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
Docker-J/Sail_GEE_L | scripts/build-all.py | 1182 | 9486 | #! /usr/bin/env python
# Copyright (c) 2009-2011, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'CROSS_COMPILE': 'arm-none-linux-gnueabi-',
'KCONFIG_NOTIMESTAMP': 'true' })
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
for n in glob.glob('arch/arm/configs/[fm]sm[0-9-]*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/qsd*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/apq*_defconfig'):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
build = Builder(log_name)
result = build.run(['make', 'O=%s' % dest_dir] + make_command)
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" % (target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=make_env, stdin=devnull)
devnull.close()
shutil.copyfile(savedefconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
| gpl-2.0 |
Barrog/C4-Datapack | data/jscript/quests/37_PleaseMakeMeFormalWear/__init__.py | 1 | 3555 | # Made by disKret
import sys
from net.sf.l2j.gameserver.model.quest import State
from net.sf.l2j.gameserver.model.quest import QuestState
from net.sf.l2j.gameserver.model.quest.jython import QuestJython as JQuest
MYSTERIOUS_CLOTH = 7076
JEWEL_BOX = 7077
SEWING_KIT = 7078
DRESS_SHOES_BOX = 7113
FORMAL_WEAR = 6408
SIGNET_RING = 7164
ICE_WINE = 7160
BOX_OF_COOKIES = 7159
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onEvent (self,event,st) :
htmltext = event
if event == "7842-1.htm" :
st.set("cond","1")
st.setState(STARTED)
st.playSound("ItemSound.quest_accept")
if event == "8520-1.htm" :
st.giveItems(SIGNET_RING,1)
st.set("cond","2")
if event == "8521-1.htm" :
st.giveItems(ICE_WINE,1)
st.set("cond","3")
if event == "8627-1.htm" :
if st.getQuestItemsCount(ICE_WINE):
st.takeItems(ICE_WINE,1)
st.set("cond","4")
else:
htmltext = "You don't have enough materials"
if event == "8521-3.htm" :
st.giveItems(BOX_OF_COOKIES,1)
st.set("cond","5")
if event == "8520-3.htm" :
st.set("cond","6")
if event == "8520-5.htm" :
if st.getQuestItemsCount(MYSTERIOUS_CLOTH) and st.getQuestItemsCount(JEWEL_BOX) and st.getQuestItemsCount(SEWING_KIT) :
st.takeItems(MYSTERIOUS_CLOTH,1)
st.takeItems(JEWEL_BOX,1)
st.takeItems(SEWING_KIT,1)
st.set("cond","7")
else :
htmltext = "You don't have enough materials"
if event == "8520-7.htm" :
if st.getQuestItemsCount(DRESS_SHOES_BOX) :
st.takeItems(DRESS_SHOES_BOX,1)
st.giveItems(FORMAL_WEAR,1)
st.setState(COMPLETED)
st.set("cond","0")
st.playSound("ItemSound.quest_finish")
else :
htmltext = "You don't have enough materials"
return htmltext
def onTalk (Self,npc,st):
htmltext = "<html><head><body>I have nothing to say you</body></html>"
npcId = npc.getNpcId()
id = st.getState()
if id == CREATED :
st.set("cond","0")
cond = int(st.get("cond"))
if npcId == 7842 and cond == 0 :
if id == COMPLETED :
htmltext = "<html><head><body>This quest has already been completed.</body></html>"
elif st.getPlayer().getLevel() >= 60 :
htmltext = "7842-0.htm"
return htmltext
else:
htmltext = "7842-2.htm"
st.exitQuest(1)
elif npcId == 8520 and cond == 1 :
htmltext = "8520-0.htm"
elif npcId == 8521 and st.getQuestItemsCount(SIGNET_RING) :
st.takeItems(SIGNET_RING,1)
htmltext = "8521-0.htm"
elif npcId == 8627 and st.getQuestItemsCount(ICE_WINE) :
htmltext = "8627-0.htm"
elif npcId == 8521 and cond == 4 :
htmltext = "8521-2.htm"
elif npcId == 8520 and st.getQuestItemsCount(BOX_OF_COOKIES) :
st.takeItems(BOX_OF_COOKIES,1)
htmltext = "8520-2.htm"
elif npcId == 8520 and st.getQuestItemsCount(MYSTERIOUS_CLOTH) and st.getQuestItemsCount(JEWEL_BOX) and st.getQuestItemsCount(SEWING_KIT) :
htmltext = "8520-4.htm"
elif npcId == 8520 and st.getQuestItemsCount(DRESS_SHOES_BOX) :
htmltext = "8520-6.htm"
return htmltext
QUEST = Quest(37,"37_PleaseMakeMeFormalWear","Please Make Me Formal Wear")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
COMPLETED = State('Completed', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(7842)
CREATED.addTalkId(7842)
STARTED.addTalkId(8520)
STARTED.addTalkId(8521)
STARTED.addTalkId(8627)
print "importing quests: 37: Please Make Me Formal Wear"
| gpl-2.0 |
Laurawly/tvm-1 | python/tvm/relay/expr.py | 2 | 15511 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, invalid-name, unused-import
"""The expression nodes of Relay."""
from __future__ import absolute_import
from numbers import Number as _Number
import numpy as _np
import tvm._ffi
from tvm._ffi import base as _base
from tvm.runtime import NDArray, ndarray as _nd
from tvm.ir import RelayExpr, GlobalVar
from .base import RelayNode
from . import _ffi_api
from . import ty as _ty
# alias relay expr as Expr.
Expr = RelayExpr
# will be registered afterwards
_op_make = None
class ExprWithOp(RelayExpr):
"""Basetype of all relay expressions that defines op overloading."""
def astype(self, dtype):
"""Cast the content type of the current data to dtype.
Parameters
----------
dtype : str
The target data type.
Note
----
This function only works for TensorType Exprs.
Returns
-------
result : tvm.relay.Expr
The result expression.
"""
return _ffi_api.cast(self, dtype)
def __neg__(self):
return _op_make.negative(self)
def __lt__(self, other):
if isinstance(other, Expr):
return _op_make.less(self, other)
elif isinstance(other, _Number):
raise TypeError('convert "%s" with `const` first' % str(other))
else:
raise TypeError("type %s not supported" % str(type(other)))
def __gt__(self, other):
if isinstance(other, Expr):
return _op_make.greater(self, other)
elif isinstance(other, _Number):
raise TypeError('convert "%s" with `const` first' % str(other))
else:
raise TypeError("type %s not supported" % str(type(other)))
def __ge__(self, other):
if isinstance(other, Expr):
return _op_make.greater_equal(self, other)
elif isinstance(other, _Number):
raise TypeError('convert "%s" with `const` first' % str(other))
else:
raise TypeError("type %s not supported" % str(type(other)))
def __le__(self, other):
if isinstance(other, Expr):
return _op_make.less_equal(self, other)
elif isinstance(other, _Number):
raise TypeError('convert "%s" with `const` first' % str(other))
else:
raise TypeError("type %s not supported" % str(type(other)))
def __add__(self, other):
if isinstance(other, Expr):
return _op_make.add(self, other)
elif isinstance(other, _Number):
raise TypeError('convert "%s" with `const` first' % str(other))
else:
raise TypeError("type %s not supported" % str(type(other)))
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
if isinstance(other, Expr):
return _op_make.subtract(self, other)
elif isinstance(other, _Number):
raise TypeError('convert "%s" with `const` first' % str(other))
else:
raise TypeError("type %s not supported" % str(type(other)))
def __rsub__(self, other):
if isinstance(other, _Number):
raise TypeError('convert "%s" with `const` first' % str(other))
raise TypeError("type %s not supported" % str(type(other)))
def __mul__(self, other):
if isinstance(other, Expr):
return _op_make.multiply(self, other)
elif isinstance(other, _Number):
raise TypeError('convert "%s" with `const` first' % str(other))
else:
raise TypeError("type %s not supported" % str(type(other)))
def __rmul__(self, other):
return self.__mul__(other)
def __div__(self, other):
if isinstance(other, Expr):
return _op_make.divide(self, other)
elif isinstance(other, _Number):
raise TypeError('convert "%s" with `const` first' % str(other))
else:
raise TypeError("type %s not supported" % str(type(other)))
def __rdiv__(self, other):
if isinstance(other, _Number):
raise TypeError('convert "%s" with `const` first' % str(other))
raise TypeError("type %s not supported" % str(type(other)))
def __truediv__(self, other):
return self.__div__(other)
def __rtruediv__(self, other):
return self.__rdiv__(other)
def __call__(self, *args):
"""Call the variable (if it represents a function).
Parameters
----------
args: List[relay.Expr]
The arguments to the call.
Returns
-------
call: Call
A call taking the variable as a function.
"""
return Call(self, args)
@tvm._ffi.register_object("relay.Constant")
class Constant(ExprWithOp):
"""A constant expression in Relay.
Parameters
----------
data : tvm.nd.NDArray
The data content of the constant expression.
"""
def __init__(self, data):
self.__init_handle_by_constructor__(_ffi_api.Constant, data)
@tvm._ffi.register_object("relay.Tuple")
class Tuple(ExprWithOp):
"""Tuple expression that groups several fields together.
Parameters
----------
fields : List[tvm.relay.Expr]
The fields in the tuple.
span: Optional[tvm.relay.Span]
Span that points to original source code
"""
def __init__(self, fields, span=None):
self.__init_handle_by_constructor__(_ffi_api.Tuple, fields, span)
def __getitem__(self, index):
if index >= len(self):
raise IndexError("Tuple index out of range")
return self.fields[index]
def __len__(self):
return len(self.fields)
def astype(self, _):
raise TypeError("astype cannot be used on tuple")
@tvm._ffi.register_object("relay.Var")
class Var(ExprWithOp):
"""A local variable in Relay.
Local variable can be used to declare input
arguments to a function, or intermediate variables.
Parameters
----------
name_hint: str
The name of the variable.
This name only acts as a hint, and is not used
for equality.
type_annotation: tvm.relay.Type, optional
The type annotation on the variable.
"""
def __init__(self, name_hint, type_annotation=None):
self.__init_handle_by_constructor__(_ffi_api.Var, name_hint, type_annotation)
@property
def name_hint(self):
"""Get name hint of the current var."""
name = str(self.vid.name_hint)
return name
@tvm._ffi.register_object("relay.Call")
class Call(ExprWithOp):
"""Function call node in Relay.
Call node corresponds the operator application node
in computational graph terminology.
Parameters
----------
op: tvm.ir.Op or any tvm.relay.Expr with function type.
The operation to be called.
args: List[tvm.relay.Expr]
The arguments to the call.
attrs: Optional[tvm.Attrs]
Attributes to the call, can be None
type_args: Optional[List[tvm.relay.Type]]
The additional type arguments, this is only
used in advanced usecase of template functions.
span: Optional[tvm.relay.Span]
Span that points to original source code
"""
def __init__(self, op, args, attrs=None, type_args=None, span=None):
if not type_args:
type_args = []
self.__init_handle_by_constructor__(_ffi_api.Call, op, args, attrs, type_args, span)
@tvm._ffi.register_object("relay.Let")
class Let(ExprWithOp):
"""Let variable binding expression.
Parameters
----------
variable: tvm.relay.Var
The local variable to be bound.
value: tvm.relay.Expr
The value to be bound.
body: tvm.relay.Expr
The body of the let binding.
"""
def __init__(self, variable, value, body):
self.__init_handle_by_constructor__(_ffi_api.Let, variable, value, body)
@tvm._ffi.register_object("relay.If")
class If(ExprWithOp):
"""A conditional expression in Relay.
Parameters
----------
cond: tvm.relay.Expr
The condition.
true_branch: tvm.relay.Expr
The expression evaluated when condition is true.
false_branch: tvm.relay.Expr
The expression evaluated when condition is false.
"""
def __init__(self, cond, true_branch, false_branch):
self.__init_handle_by_constructor__(_ffi_api.If, cond, true_branch, false_branch)
@tvm._ffi.register_object("relay.TupleGetItem")
class TupleGetItem(ExprWithOp):
"""Get index-th item from a tuple.
Parameters
----------
tuple_value: tvm.relay.Expr
The input tuple expression.
index: int
The index.
"""
def __init__(self, tuple_value, index):
self.__init_handle_by_constructor__(_ffi_api.TupleGetItem, tuple_value, index)
@tvm._ffi.register_object("relay.RefCreate")
class RefCreate(ExprWithOp):
"""Create a new reference from initial value.
Parameters
----------
value: tvm.relay.Expr
The initial value.
"""
def __init__(self, value):
self.__init_handle_by_constructor__(_ffi_api.RefCreate, value)
@tvm._ffi.register_object("relay.RefRead")
class RefRead(ExprWithOp):
"""Get the value inside the reference.
Parameters
----------
ref: tvm.relay.Expr
The reference.
"""
def __init__(self, ref):
self.__init_handle_by_constructor__(_ffi_api.RefRead, ref)
@tvm._ffi.register_object("relay.RefWrite")
class RefWrite(ExprWithOp):
"""
Update the value inside the reference.
The whole expression will evaluate to an empty tuple.
Parameters
----------
ref: tvm.relay.Expr
The reference.
value: tvm.relay.Expr
The new value.
"""
def __init__(self, ref, value):
self.__init_handle_by_constructor__(_ffi_api.RefWrite, ref, value)
class TempExpr(ExprWithOp):
"""Baseclass of all TempExpr.
TempExprs are pass specific expression that can be
useful to define intermediate result in the
rewriting pass such as layout or type transformation.
"""
def realize(self):
"""Convert the expression to a normal(non-temp) Expr.
Returns
-------
The corresponding normal expression.
"""
return _ffi_api.TempExprRealize(self)
class TupleWrapper(object):
"""TupleWrapper.
This class is a Python wrapper for a Relay tuple of known size.
It allows for accessing the fields of the Relay tuple as though
it were a Python tuple.
Parameters
----------
tuple_value: tvm.relay.Expr
The input tuple
size: int
The size of the tuple.
"""
def __init__(self, tuple_value, size):
self.tuple_value = tuple_value
self.size = size
def astuple(self):
"""Returns the underlying Relay tuple if this wrapper is passed
as an argument to an FFI function."""
return self.tuple_value
def astext(self):
"""Get the text format of the tuple expression.
Returns
-------
text : str
The text format of the tuple expression.
"""
return self.tuple_value.astext()
def __getitem__(self, index):
if index >= len(self):
raise IndexError("Tuple index out of range")
return TupleGetItem(self.tuple_value, index)
def __len__(self):
return self.size
def __repr__(self):
return "TupleWrapper(" + self.tuple_value.__repr__() + ", " + str(self.size) + ")"
def astype(self, _):
raise TypeError("astype cannot be used on tuple")
def var(name_hint, type_annotation=None, shape=None, dtype="float32"):
"""Create a new tvm.relay.Var.
This is a simple wrapper function that allows specify
shape and dtype directly.
Parameters
----------
name_hint: str
The name of the variable.
This name only acts as a hint, and is not used
for equality.
type_annotation: Optional[tvm.relay.Type, str]
The type annotation on the variable.
When type_annotation is a str, we will create a scalar variable.
shape: Optional[List[tvm.Expr]]
The shape of the tensor type.
dtype: str, optional
The data type of the tensor.
Examples
--------
.. code-block:: python
# The following 4 lines are equivalent to each other
x = tvm.relay.Var("x", tvm.relay.TensorType([1, 2]))
x = tvm.relay.var("x", tvm.relay.TensorType([1, 2]))
x = tvm.relay.var("x", shape=[1, 2])
x = tvm.relay.var("x", shape=[1, 2], dtype="float32")
# The following 2 lines are equivalent to each other.
y = tvm.relay.var("x", "float32")
y = tvm.relay.var("x", shape=(), dtype="float32")
"""
if type_annotation is not None and shape is not None:
raise ValueError("Can only specify either type_annotation or shape.")
if shape is not None:
type_annotation = _ty.TensorType(shape, dtype)
elif isinstance(type_annotation, str):
type_annotation = _ty.TensorType((), type_annotation)
return Var(name_hint, type_annotation)
def const(value, dtype=None):
"""Create a constant value.
Parameters
----------
value: Union[bool, int, float, numpy.ndarray, tvm.nd.NDArray]
The constant value.
dtype: str, optional
The data type of the resulting constant.
Note
----
When dtype is None, we use the following rule:
- int maps to "int32"
- float maps to "float32"
- bool maps to "bool"
- other using the same default rule as numpy.
"""
if isinstance(value, (_base.numeric_types, (bool, list))):
value = _np.array(value, dtype=dtype)
if not dtype:
# when dtype is None: int maps to "int32", float maps to "float32"
dtype = {_np.dtype("int64"): _np.int32, _np.dtype("float64"): _np.float32}.get(
value.dtype, None
)
if isinstance(value, (_np.ndarray, _np.generic)):
if dtype is not None:
value = value.astype(dtype)
value = _nd.array(value)
if not isinstance(value, _nd.NDArray):
raise ValueError("value has to be scalar or NDArray")
return Constant(value)
def bind(expr, binds):
"""Bind an free variables in expr or function arguments.
We can bind parameters expr if it is a function.
Parameters
----------
expr : tvm.relay.Expr
The input expression.
binds : Map[tvm.relay.Var, tvm.relay.Expr]
The specific bindings.
Returns
-------
result : tvm.relay.Expr
The expression or function after binding.
"""
return _ffi_api.Bind(expr, binds)
| apache-2.0 |
JasonCormie/ansible-modules-extras | storage/netapp/netapp_e_amg_role.py | 27 | 8160 | #!/usr/bin/python
# (c) 2016, NetApp, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
---
module: netapp_e_amg_role
short_description: Update the role of a storage array within an Asynchronous Mirror Group (AMG).
description:
- Update a storage array to become the primary or secondary instance in an asynchronous mirror group
version_added: '2.2'
author: Kevin Hulquest (@hulquest)
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
example:
- https://prod-1.wahoo.acme.com/devmgr/v2
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
ssid:
description:
- The ID of the primary storage array for the async mirror action
required: yes
role:
description:
- Whether the array should be the primary or secondary array for the AMG
required: yes
choices: ['primary', 'secondary']
noSync:
description:
- Whether to avoid synchronization prior to role reversal
required: no
default: no
choices: [yes, no]
force:
description:
- Whether to force the role reversal regardless of the online-state of the primary
required: no
default: no
"""
EXAMPLES = """
- name: Update the role of a storage array
netapp_e_amg_role:
name: updating amg role
role: primary
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
"""
RETURN = """
msg:
description: Failure message
returned: failure
type: string
sample: "No Async Mirror Group with the name."
"""
import json
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError:
err = get_exception()
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def has_match(module, ssid, api_url, api_pwd, api_usr, body, name):
amg_exists = False
has_desired_role = False
amg_id = None
amg_data = None
get_amgs = 'storage-systems/%s/async-mirrors' % ssid
url = api_url + get_amgs
try:
amg_rc, amgs = request(url, url_username=api_usr, url_password=api_pwd,
headers=HEADERS)
except:
module.fail_json(msg="Failed to find AMGs on storage array. Id [%s]" % (ssid))
for amg in amgs:
if amg['label'] == name:
amg_exists = True
amg_id = amg['id']
amg_data = amg
if amg['localRole'] == body.get('role'):
has_desired_role = True
return amg_exists, has_desired_role, amg_id, amg_data
def update_amg(module, ssid, api_url, api_usr, api_pwd, body, amg_id):
endpoint = 'storage-systems/%s/async-mirrors/%s/role' % (ssid, amg_id)
url = api_url + endpoint
post_data = json.dumps(body)
try:
request(url, data=post_data, method='POST', url_username=api_usr,
url_password=api_pwd, headers=HEADERS)
except:
err = get_exception()
module.fail_json(
msg="Failed to change role of AMG. Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, str(err)))
status_endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, amg_id)
status_url = api_url + status_endpoint
try:
rc, status = request(status_url, method='GET', url_username=api_usr,
url_password=api_pwd, headers=HEADERS)
except:
err = get_exception()
module.fail_json(
msg="Failed to check status of AMG after role reversal. " +
"Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, str(err)))
# Here we wait for the role reversal to complete
if 'roleChangeProgress' in status:
while status['roleChangeProgress'] != "none":
try:
rc, status = request(status_url, method='GET',
url_username=api_usr, url_password=api_pwd, headers=HEADERS)
except:
err = get_exception()
module.fail_json(
msg="Failed to check status of AMG after role reversal. " +
"Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, str(err)))
return status
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
name=dict(required=True, type='str'),
role=dict(required=True, choices=['primary', 'secondary']),
noSync=dict(required=False, type='bool', default=False),
force=dict(required=False, type='bool', default=False),
ssid=dict(required=True, type='str'),
api_url=dict(required=True),
api_username=dict(required=False),
api_password=dict(required=False, no_log=True),
))
module = AnsibleModule(argument_spec=argument_spec)
p = module.params
ssid = p.pop('ssid')
api_url = p.pop('api_url')
user = p.pop('api_username')
pwd = p.pop('api_password')
name = p.pop('name')
if not api_url.endswith('/'):
api_url += '/'
agm_exists, has_desired_role, async_id, amg_data = has_match(module, ssid, api_url, pwd, user, p, name)
if not agm_exists:
module.fail_json(msg="No Async Mirror Group with the name: '%s' was found" % name)
elif has_desired_role:
module.exit_json(changed=False, **amg_data)
else:
amg_data = update_amg(module, ssid, api_url, user, pwd, p, async_id)
if amg_data:
module.exit_json(changed=True, **amg_data)
else:
module.exit_json(changed=True, msg="AMG role changed.")
if __name__ == '__main__':
main()
| gpl-3.0 |
crankyadmin/shadowsocks | shadowsocks/local.py | 1015 | 2248 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import sys
import os
import logging
import signal
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../'))
from shadowsocks import shell, daemon, eventloop, tcprelay, udprelay, asyncdns
def main():
shell.check_python()
# fix py2exe
if hasattr(sys, "frozen") and sys.frozen in \
("windows_exe", "console_exe"):
p = os.path.dirname(os.path.abspath(sys.executable))
os.chdir(p)
config = shell.get_config(True)
daemon.daemon_exec(config)
try:
logging.info("starting local at %s:%d" %
(config['local_address'], config['local_port']))
dns_resolver = asyncdns.DNSResolver()
tcp_server = tcprelay.TCPRelay(config, dns_resolver, True)
udp_server = udprelay.UDPRelay(config, dns_resolver, True)
loop = eventloop.EventLoop()
dns_resolver.add_to_loop(loop)
tcp_server.add_to_loop(loop)
udp_server.add_to_loop(loop)
def handler(signum, _):
logging.warn('received SIGQUIT, doing graceful shutting down..')
tcp_server.close(next_tick=True)
udp_server.close(next_tick=True)
signal.signal(getattr(signal, 'SIGQUIT', signal.SIGTERM), handler)
def int_handler(signum, _):
sys.exit(1)
signal.signal(signal.SIGINT, int_handler)
daemon.set_user(config.get('user', None))
loop.run()
except Exception as e:
shell.print_exception(e)
sys.exit(1)
if __name__ == '__main__':
main()
| apache-2.0 |
ESG-Leipzig/Homepage-2015 | manage.py | 2 | 2413 | import os
import re
import sys
from django.utils.crypto import get_random_string
DEPLOYMENT_DIRECTORY_NAME = 'esg_leipzig_homepage_2015_deployment'
def create_deployment_dir():
# Creates the deployment directory with settings and wsgi file if
# inexistent.
base_dir = os.path.dirname(os.path.abspath(__file__))
if not os.path.exists(os.path.join(base_dir, DEPLOYMENT_DIRECTORY_NAME)):
# Create directory.
os.makedirs(os.path.join(base_dir, DEPLOYMENT_DIRECTORY_NAME))
# Create settings file.
with open(os.path.join(
base_dir,
'esg_leipzig_homepage_2015',
'default_settings.py')) as default_settings:
secret_key = get_random_string(
50,
'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)')
settings = re.sub(
r"SECRET_KEY = ''",
"SECRET_KEY = '%s'" % secret_key,
default_settings.read())
wsgi_app = '%s.wsgi.application' % DEPLOYMENT_DIRECTORY_NAME
settings = re.sub(
r"WSGI_APPLICATION = ''",
"WSGI_APPLICATION = '%s'" % wsgi_app,
settings)
settings_path = os.path.join(
os.path.join(base_dir, DEPLOYMENT_DIRECTORY_NAME),
'settings.py')
with open(settings_path, 'w') as new_settings:
new_settings.write(settings)
# Create wsgi file.
with open(os.path.join(
base_dir,
'esg_leipzig_homepage_2015',
'default_wsgi.py')) as default_wsgi:
django_settings_module = '%s.settings' % DEPLOYMENT_DIRECTORY_NAME
wsgi = re.sub(
r"DJANGO_SETTINGS_MODULE = ''",
"DJANGO_SETTINGS_MODULE = '%s'" % django_settings_module,
default_wsgi.read())
wsgi_path = os.path.join(
os.path.join(base_dir, DEPLOYMENT_DIRECTORY_NAME),
'wsgi.py')
with open(wsgi_path, 'w') as new_wsgi:
new_wsgi.write(wsgi)
if __name__ == "__main__":
create_deployment_dir()
os.environ.setdefault(
'DJANGO_SETTINGS_MODULE',
'%s.settings' % DEPLOYMENT_DIRECTORY_NAME)
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| mit |
tahmid-tanzim/youtube-dl | setup.py | 107 | 3233 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os.path
import warnings
import sys
try:
from setuptools import setup
setuptools_available = True
except ImportError:
from distutils.core import setup
setuptools_available = False
try:
# This will create an exe that needs Microsoft Visual C++ 2008
# Redistributable Package
import py2exe
except ImportError:
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
print("Cannot import py2exe", file=sys.stderr)
exit(1)
py2exe_options = {
"bundle_files": 1,
"compressed": 1,
"optimize": 2,
"dist_dir": '.',
"dll_excludes": ['w9xpopen.exe'],
}
py2exe_console = [{
"script": "./youtube_dl/__main__.py",
"dest_base": "youtube-dl",
}]
py2exe_params = {
'console': py2exe_console,
'options': {"py2exe": py2exe_options},
'zipfile': None
}
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
params = py2exe_params
else:
files_spec = [
('etc/bash_completion.d', ['youtube-dl.bash-completion']),
('etc/fish/completions', ['youtube-dl.fish']),
('share/doc/youtube_dl', ['README.txt']),
('share/man/man1', ['youtube-dl.1'])
]
root = os.path.dirname(os.path.abspath(__file__))
data_files = []
for dirname, files in files_spec:
resfiles = []
for fn in files:
if not os.path.exists(fn):
warnings.warn('Skipping file %s since it is not present. Type make to build all automatically generated files.' % fn)
else:
resfiles.append(fn)
data_files.append((dirname, resfiles))
params = {
'data_files': data_files,
}
if setuptools_available:
params['entry_points'] = {'console_scripts': ['youtube-dl = youtube_dl:main']}
else:
params['scripts'] = ['bin/youtube-dl']
# Get the version from youtube_dl/version.py without importing the package
exec(compile(open('youtube_dl/version.py').read(),
'youtube_dl/version.py', 'exec'))
setup(
name='youtube_dl',
version=__version__,
description='YouTube video downloader',
long_description='Small command-line program to download videos from'
' YouTube.com and other video sites.',
url='https://github.com/rg3/youtube-dl',
author='Ricardo Garcia',
author_email='ytdl@yt-dl.org',
maintainer='Philipp Hagemeister',
maintainer_email='phihag@phihag.de',
packages=[
'youtube_dl',
'youtube_dl.extractor', 'youtube_dl.downloader',
'youtube_dl.postprocessor'],
# Provokes warning on most systems (why?!)
# test_suite = 'nose.collector',
# test_requires = ['nosetest'],
classifiers=[
"Topic :: Multimedia :: Video",
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"License :: Public Domain",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
],
**params
)
| unlicense |
lebrice/SimpleParsing | simple_parsing/helpers/flatten.py | 1 | 6257 | import dataclasses
import warnings
from typing import *
from ..logging_utils import get_logger
logger = get_logger(__file__)
class FlattenedAccess:
""" Allows flattened access to the attributes of all children dataclasses.
This is meant to simplify the adoption of dataclasses for argument
hierarchies, rather than a single-level dictionary.
Dataclasses allow for easy, neatly separated arguments, but suffer from 2
potential drawbacks:
- When using a highly nested structure, having long accesses is annoying
- The dictionary access syntax is often more natural than using getattr()
when reading an attribute whose name is a variable.
"""
def attributes(self,
recursive: bool=True,
prefix: str="") -> Iterable[Tuple[str, Any]]:
"""Returns an Iterator over the attributes of the dataclass.
[extended_summary]
Parameters
----------
- dataclass : Dataclass
A dataclass type or instance.
- recursive : bool, optional, by default True
Wether or not to recurse and yield all the elements of the children
dataclass attributes.
- prefix : str, optional, by default ""
A prefix to prepend to all the attribute names before yielding them.
Returns
-------
Iterable[Tuple[str, Any]]
An iterable of attribute names and values.
Yields
-------
Iterable[Tuple[str, Any]]
A Tuple of the form <Attribute name, attribute_value>.
"""
for field in dataclasses.fields(self):
if field.name not in self.__dict__:
# the dataclass isn't yet instantiated, or the attr was deleted.
continue
# get the field value (without needless recursion)
field_value = self.__dict__[field.name]
yield prefix + field.name, field_value
if recursive and dataclasses.is_dataclass(field_value):
yield from FlattenedAccess.attributes(
field_value,
recursive=True,
prefix=prefix + field.name + "."
)
def __getattr__(self, name: str):
"""Retrieves the attribute on self, or recursively on the children.
NOTE: `__getattribute__` is always called before `__getattr__`, hence we
always get here because `self` does not have an attribute of `name`.
"""
# potential parents and corresponding values.
parents: List[str] = []
values: List[Any] = []
for attr_name, attr_value in FlattenedAccess.attributes(self):
# if the attribute name's last part ends with `name`, we add it to
# some list of potential parent attributes.
name_parts = name.split(".")
dest_parts = attr_name.split(".")
if dest_parts[-len(name_parts):] == name_parts:
parents.append(attr_name)
values.append(attr_value)
if not parents:
raise AttributeError(
f"{type(self)} object has no attribute '{name}', "
"and neither does any of its children attributes."
)
elif len(parents) > 1:
raise AttributeError(
f"Ambiguous Attribute access: name '{name}' may refer to:\n" +
"\n".join(f"- '{parent}' (with a value of: '{value}')"
for parent, value in zip(parents, values)
)
)
else:
return values[0]
def __setattr__(self, name: str, value: Any):
"""Write the attribute in self or in the children that has it.
If more than one child has attributes that match the given one, an
`AttributeError` is raised.
"""
# potential parents and corresponding values.
parents: List[str] = []
values: List[Any] = []
field_names = {field.name for field in dataclasses.fields(self)}
if name in field_names:
object.__setattr__(self, name, value)
return
for attr_name, attr_value in self.attributes():
# if the attribute name of the attribute ends with `name`, we add it
# to some list of potential parent attributes.
name_parts = name.split(".")
dest_parts = attr_name.split(".")
if dest_parts[-len(name_parts):] == name_parts:
parents.append(attr_name)
values.append(attr_value)
if not parents:
# We set the value on the dataclass directly, since it wasn't found.
warnings.warn(UserWarning(f"Setting a new attribute '{name}' on the"
f" dataclass, but it does not have a field of the same name. \n"
f"(Consider adding a field '{name}' of type {type(value)} to "
f"{type(self)})"))
object.__setattr__(self, name, value)
elif len(parents) > 1:
# more than one parent (ambiguous).
raise AttributeError(
f"Ambiguous Attribute access: name '{name}' may refer to:\n" +
"\n".join(f"- '{parent}' (with a value of: '{value}')"
for parent, value in zip(parents, values)
)
)
else:
# We recursively set the attribute.
attr_name = parents[0]
lineage = attr_name.split(".")[:-1]
parent: object = self
for parent_name in lineage:
# NOTE: we can't use getattr, otherwise we would recurse.
parent = object.__getattribute__(parent, parent_name)
# destination attribute name
dest_name = name.split(".")[-1]
# Set the attribute on the parent.
object.__setattr__(parent, dest_name, value)
def __getitem__(self, key):
return getattr(self, key)
def __setitem__(self, key, value):
setattr(self, key, value)
def asdict(self) -> Dict:
return dataclasses.asdict(self)
| mit |
caseman/grease | test/field_test.py | 1 | 9571 | import unittest
class TestData(object):
def __init__(self, entity=None, **kw):
self.entity = entity
self.__dict__.update(kw)
class TestComponent(dict):
def __init__(self, entities=()):
self.entities = set(entities)
def __getitem__(self, entity):
return self.setdefault(entity, TestData(entity))
class TestField(object):
def __init__(self, component, name):
self.component = component
self.name = name
self.cast = int
class TestAccessor(object):
def __init__(self, field, entities):
self.field = field
self.entities = entities
class FieldTestCase(unittest.TestCase):
def test_basics(self):
from grease.component.field import Field
comp = TestComponent()
f = Field(comp, "myfield", int)
self.assertTrue(f.component is comp, (f.component, comp))
self.assertTrue(f.type is int, f.type)
self.assertEqual(f.default(), 0)
def test_cast(self):
from grease.component.field import Field
from grease.geometry import Vec2d
f = Field(None, "string", str)
self.assertEqual(f.cast(22), "22")
f = Field(None, "int", int)
self.assertEqual(f.cast("42"), 42)
f = Field(None, "vec", Vec2d)
self.assertEqual(f.cast((11,12)), Vec2d(11,12))
def test_accessor_default_set(self):
from grease.component.field import Field
comp = TestComponent()
f = Field(comp, "acc_default", str, TestAccessor)
acc = f.accessor()
self.assertTrue(acc.field is f, (acc.field, f))
self.assertTrue(acc.entities is comp.entities, (acc.entities, comp.entities))
def test_accessor_subset(self):
from grease.component.field import Field
comp = TestComponent((1,2,3,4))
f = Field(comp, "acc_default", str, TestAccessor)
acc = f.accessor(set([2,4,6,8]))
self.assertTrue(acc.field is f, (acc.field, f))
self.assertEqual(acc.entities, set([2,4]))
class FieldAccessorTestCase(unittest.TestCase):
def test_iter(self):
from grease.component.field import FieldAccessor
entities = set([1,5,9])
comp = TestComponent()
accessor = FieldAccessor(TestField(comp, 'entity'), entities)
self.assertEqual(sorted(iter(accessor)), [1, 5, 9])
def test_child_attr(self):
from grease.component.field import FieldAccessor
entities = set([1,2,3])
comp = TestComponent()
comp[1] = TestData(foo=TestData(bar=100, baz=TestData(spam=-1)))
comp[2] = TestData(foo=TestData(bar=200, baz=TestData(spam=-2)))
comp[3] = TestData(foo=TestData(bar=300, baz=TestData(spam=-3)))
accessor = FieldAccessor(TestField(comp, 'foo'), entities)
bar_acc = accessor.bar
self.assertTrue(isinstance(bar_acc, FieldAccessor))
self.assertEqual(bar_acc[1], 100)
self.assertEqual(bar_acc[2], 200)
self.assertEqual(bar_acc[3], 300)
self.assertRaises(KeyError, lambda: bar_acc[4])
baz_spam_acc = accessor.baz.spam
self.assertTrue(isinstance(baz_spam_acc, FieldAccessor))
self.assertEqual(baz_spam_acc[1], -1)
self.assertEqual(baz_spam_acc[2], -2)
self.assertEqual(baz_spam_acc[3], -3)
self.assertRaises(KeyError, lambda: baz_spam_acc[4])
def test_set(self):
from grease.component.field import FieldAccessor
entities = set([3,7,8])
comp = TestComponent()
for i in range(9):
comp[i] = TestData(id=i, xy=TestData(x=i*10, y=i*-10))
id_accessor = FieldAccessor(TestField(comp, 'id'), entities)
xy_accessor = FieldAccessor(TestField(comp, 'xy'), entities)
id_accessor.__set__(10)
xy_accessor.x = 0
for i in range(9):
if i in entities:
self.assertEqual(comp[i].id, 10)
self.assertEqual(comp[i].xy.x, 0)
self.assertEqual(comp[i].xy.y, i*-10)
else:
self.assertEqual(comp[i].id, i)
self.assertEqual(comp[i].xy.x, i*10)
self.assertEqual(comp[i].xy.y, i*-10)
def test_set_join(self):
from grease.component.field import FieldAccessor
entities1 = set([2,3,7,8])
entities2 = set([1,2,3])
comp1 = TestComponent()
comp2 = TestComponent()
for i in range(9):
comp1[i] = TestData(foo=i)
comp2[i] = TestData(bar=-i)
foo_accessor = FieldAccessor(TestField(comp1, 'foo'), entities1)
bar_accessor = FieldAccessor(TestField(comp2, 'bar'), entities2)
foo_accessor.__set__(bar_accessor)
for i in range(9):
if i in entities1 & entities2:
self.assertEqual(comp1[i].foo, -i)
else:
self.assertEqual(comp1[i].foo, i)
self.assertEqual(comp2[i].bar, -i)
bar_accessor = FieldAccessor(TestField(comp2, 'bar'), entities1)
foo_accessor.__set__(bar_accessor)
for i in range(9):
if i in entities1:
self.assertEqual(comp1[i].foo, -i)
else:
self.assertEqual(comp1[i].foo, i)
self.assertEqual(comp2[i].bar, -i)
bar_accessor.__set__(foo_accessor)
for i in range(9):
if i in entities1:
self.assertEqual(comp1[i].foo, comp2[i].bar)
else:
self.assertEqual(comp1[i].foo, i)
self.assertEqual(comp2[i].bar, -i)
def test_use_as_bool(self):
from grease.component.field import FieldAccessor
field = TestField(TestComponent(), 'test')
self.assertFalse(FieldAccessor(field, set()))
self.assertTrue(FieldAccessor(field, set([1,2,3])))
def test_repr(self):
from grease.component.field import FieldAccessor
field = TestField(TestComponent(), 'test')
accessor = FieldAccessor(field, set())
self.assertTrue(
repr(accessor).startswith('<FieldAccessor test '), repr(accessor))
self.assertTrue(
repr(accessor.foo.bar).startswith('<FieldAccessor test.foo.bar '),
repr(accessor))
def test_query_ops(self):
from grease.component.field import FieldAccessor
comp = TestComponent()
for i in range(1,4):
comp[i] = TestData(x=i*i, pos=TestData(x=i, y=-i))
comp[i+3] = TestData(x=i*i, pos=TestData(x=i, y=-i))
comp[i+6] = TestData(x=i*i, pos=TestData(x=i, y=-i))
entities = set(range(1,7))
x_accessor = FieldAccessor(TestField(comp, 'x'), entities)
self.assertEqual(x_accessor == 4, set([2, 5]))
self.assertEqual(x_accessor == 0, set())
self.assertEqual(x_accessor != 1, set([2, 3, 5, 6]))
self.assertEqual(x_accessor != 33, set([1, 2, 3, 4, 5, 6]))
self.assertEqual(x_accessor > 5, set([3, 6]))
self.assertEqual(x_accessor > 9, set())
self.assertEqual(x_accessor >= 4, set([2, 3, 5, 6]))
self.assertEqual(x_accessor >= 10, set())
self.assertEqual(x_accessor < 2, set([1, 4]))
self.assertEqual(x_accessor < 1, set())
self.assertEqual(x_accessor <= 4, set([1, 2, 4, 5]))
self.assertEqual(x_accessor <= -1, set())
pos_accessor = FieldAccessor(TestField(comp, 'pos'), entities)
self.assertEqual(pos_accessor.x == 3, set([3, 6]))
self.assertEqual(pos_accessor.x < 3, set([1, 2, 4, 5]))
def test_query_ops_join(self):
from grease.component.field import FieldAccessor
comp = TestComponent()
entities1 = set([2,3,7,8])
entities2 = set([1,2,3,8])
comp1 = TestComponent()
comp2 = TestComponent()
for i in range(9):
comp1[i] = TestData(foo=i)
comp2[i] = TestData(bar=6-i)
foo_accessor = FieldAccessor(TestField(comp1, 'foo'), entities1)
bar_accessor = FieldAccessor(TestField(comp2, 'bar'), entities2)
self.assertEqual(foo_accessor == bar_accessor, set([3]))
self.assertEqual(foo_accessor > bar_accessor, set([8]))
self.assertEqual(foo_accessor >= bar_accessor, set([3, 8]))
self.assertEqual(foo_accessor <= bar_accessor, set([2, 3]))
self.assertEqual(foo_accessor < bar_accessor, set([2]))
self.assertEqual(foo_accessor != bar_accessor, set([2,8]))
def test_inplace_mutators(self):
from grease.component.field import FieldAccessor
entities = set([2,6,7])
comp = TestComponent()
for i in range(9):
comp[i] = TestData(size=i, xy=TestData(x=i*10, y=i*-10))
xy_accessor = FieldAccessor(TestField(comp, 'xy'), entities)
sa = size_accessor = FieldAccessor(TestField(comp, 'size'), entities)
size_accessor += 1
xy_accessor.y += 5
self.assertTrue(sa is size_accessor, (sa, size_accessor))
for i in range(9):
if i in entities:
self.assertEqual(comp[i].size, i + 1)
self.assertEqual(comp[i].xy.y, i*-10 + 5)
else:
self.assertEqual(comp[i].size, i)
self.assertEqual(comp[i].xy.y, i*-10)
self.assertEqual(comp[i].xy.x, i*10)
size_accessor -= 24
size_accessor *= -12
size_accessor /= 2
size_accessor //= 3
size_accessor %= 7
size_accessor **= 2
# TODO: these operators are broken in Python3
# size_accessor <<= 3
# size_accessor >>= 1
# size_accessor |= 0x888
# size_accessor &= 0xDD7
# size_accessor ^= 0xF
for i in range(9):
if i in entities:
expected = i + 1 - 24
expected *= -12
expected /= 2
expected //= 3
expected %= 7
expected **= 2
# expected <<= 3
# expected >>= 1
# expected |= 0x888
# expected &= 0xDD7
# expected ^= 0xF
self.assertEqual(comp[i].size, expected)
else:
self.assertEqual(comp[i].size, i)
self.assertEqual(comp[i].xy.x, i*10)
def test_inplace_mutators_join(self):
from grease.component.field import FieldAccessor
comp = TestComponent()
entities1 = set([2,3,7,8])
entities2 = set([1,2,3,8])
comp1 = TestComponent()
comp2 = TestComponent()
for i in range(9):
comp1[i] = TestData(foo=i)
comp2[i] = TestData(bar=i/2 + 1)
foo_accessor = FieldAccessor(TestField(comp1, 'foo'), entities1)
bar_accessor = FieldAccessor(TestField(comp2, 'bar'), entities2)
foo_accessor += bar_accessor
for i in range(9):
if i in entities1 & entities2:
self.assertEqual(comp1[i].foo, i + i/2 + 1)
else:
self.assertEqual(comp1[i].foo, i)
self.assertEqual(comp2[i].bar, i/2 + 1)
foo_accessor -= bar_accessor
for i in range(9):
self.assertEqual(comp1[i].foo, i)
self.assertEqual(comp2[i].bar, i/2 + 1)
if __name__ == '__main__':
unittest.main()
| mit |
Telegea/Smarthub-software | src/mqttclient/mqttc-device.py | 2 | 21033 | #!/usr/bin/env python
###############################################################################
#
# MQTT client for the Telegea Smarthub device (CMD Receiver)
#
# Author: Ondrej Wisniewski
#
# Features:
# - handles commands sent from the Telegea server to the Smarthub
#
# Changelog:
# 02-10-2015: Initial version
# 12-11-2015: Added Service commands for ssh tunnel handling
# 30-11-2016: Integrate Modbus master
# Handle multiple Modbus operations request
# Added command Id to request and response message
# 05-12-2016: Changed default Timeout and Retries constants
# Changed Modbus request response format
# 27-02-2017: Add twos-complement conversion to do_modbusRequest
# 28-02-2017: Add do_thermostatData for handling thermostat data
# 28-02-2017: Replace plant id with unique client id
# Improve error handling
# 09-05-2017: Add season handling for thermostat data
# 05-07-2017: Add do_configData for handling configuration data
# 14-09-2017: Send dead message to broker on exit
# 18-01-2018: Add operation_mode definition to do_thermostatData
# 07-02-2018: Add Modbus40 workaround
# 27-02-2018: Remove twos complement conversion for Modbus values
#
# Copyright 2015-2018, DEK Italia
#
# This file is part of the Telegea platform.
#
# Telegea is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Telegea. If not, see <http://www.gnu.org/licenses/>.
#
#
# Initialization:
# * client connects to broker
# * client subscribes to command messages
# * client publishes "I'm alive" message
# * client waits for command
#
# Loop:
# * client receives command from server
# * client performs command
# * client publishes commands reply
#
# Uses the Paho MQTT Python client library
# https://www.eclipse.org/paho/clients/python
# Install with "pip install paho-mqtt"
#
# Uses the PyModbus Python library
# https://github.com/bashwork/pymodbus
# Install with "pip install -U pymodbus" or "apt-get install python-pymodbus"
#
###############################################################################
import paho.mqtt.client as mqtt
import argparse
import subprocess
import signal
import syslog
import json
import uuid
from pymodbus.client.sync import ModbusTcpClient
from pymodbus.constants import Defaults
VERSION="0.10.2"
# Override the ModbusTcpClient default settings
Defaults.Timeout = 5 # default is 3
Defaults.Retries = 1 # default is 3
# Unique client Id
myId=str(uuid.uuid4())
clientId="smarthub-"+myId
# TSL certificate file
caCerts="/root/mqttclient/ca.crt"
###########################################################
#
# Calculate the twos complement of a 16 bit interger value
#
###########################################################
def twos_comp16(val):
if (val & 0x8000) != 0:
val = val - 0x10000
return val
###########################################################
#
# Perform Modbus request (read or write)
#
###########################################################
def modbusReq(reg, mod, val):
error = False
ip_addr = ipAddr
tcp_port = 5000 + mod
client = ModbusTcpClient(host=ip_addr, port=tcp_port)
if client.connect() == False:
syslog.syslog(syslog.LOG_ERR, "Connection to Modbus slave failed")
return None
if val == None:
fc = 3
# Call modbustcp client to read register value
try:
reply = client.read_holding_registers(address=reg, unit=mod)
except:
error = True
else:
fc = 6
# Call modbustcp client to write register value
try:
reply = client.write_register(address=reg, unit=mod, value=val)
except:
error = True
client.close()
if error:
syslog.syslog(syslog.LOG_ERR, "Error executing Modbus request")
return None
if reply == None:
syslog.syslog(syslog.LOG_ERR, "No reply to Modbus request")
return None
if reply.function_code != fc:
syslog.syslog(syslog.LOG_ERR, "Modbus request returned wrong function code: "+str(reply.function_code))
return None
if val == None:
val = reply.registers[0]
return val
###########################################################
#
# Command handler for Modbus requests
#
###########################################################
def do_modbusRequest(cprm):
# command format:
# 1 <id> [{"mod":<modAddr>,"reg":<regAddr>,"val":<regVal>}, ...]
syslog.syslog(syslog.LOG_NOTICE, "Received Modbus request")
reply = []
if len(cprm) > 2:
# First parameter is the command Id
cmdId = cprm[1]
# Second parameter is the Modbus request list in JSON format
try:
devCmd = json.loads(cprm[2])
# Get each command from the list and execute it
for i in range(len(devCmd)):
reg = devCmd[i]['reg']
mod = devCmd[i]['mod']
if 'val' in devCmd[i]:
val = devCmd[i]['val']
else:
val = None
#syslog.syslog(syslog.LOG_NOTICE, " "+str(i)+": "+json.dumps(devCmd[i]))
if reg>40000:
# FIXME:NIBE Modbus40 workaround
val = modbusReq(reg-40000, 5, val)
else:
val = modbusReq(reg, mod, val)
reply.append({'mod':mod,'reg':reg,'val':val})
except (ValueError, KeyError, TypeError):
syslog.syslog(syslog.LOG_ERR,"JSON format error in request")
# Publish reply with command result
result = {"result":reply}
msg = cmdId+" "+json.dumps(result).replace(" ", "")
#syslog.syslog(syslog.LOG_NOTICE, "msg="+msg)
client.publish(topic_reply, msg, 0, False)
else:
syslog.syslog(syslog.LOG_ERR, "Command missing required parameters")
return;
# Service command types usd by do_serviceCmd()
srvCmdType = {
"1": "/etc/init.d/ssh_tunnel start",
"2": "/etc/init.d/ssh_tunnel stop"
# TODO: add more commands here when needed
}
###########################################################
#
# Command handler for Service command
#
###########################################################
def do_serviceCmd(cprm):
# command format:
# 2 <id> <service command type>
syslog.syslog(syslog.LOG_NOTICE, "Received Service command")
if len(cprm) > 2:
# First parameter is the command Id
cmdId = cprm[1]
# Second parameter is the service command type
try:
srvCmd = srvCmdType[cprm[2]]
syslog.syslog(syslog.LOG_NOTICE, "srvCmd = %s" % srvCmd)
try:
reply = subprocess.check_output(srvCmd.split())
except: #subprocess.CalledProcessError, OSError:
reply = "ERROR"
syslog.syslog(syslog.LOG_ERR, "Service command execution failed")
except KeyError:
reply = "ERROR"
syslog.syslog(syslog.LOG_ERR, "Unknown service command type: %s" % cprm[2])
# Publish reply with command result
msg = cmdId+" "+reply
client.publish(topic_reply, msg, 0, False)
else:
syslog.syslog(syslog.LOG_ERR, "Command missing required parameters")
return;
###########################################################
#
# Command handler for Thermostat data
#
###########################################################
def do_thermostatData(cprm):
DATADIR="/media/data/"
BASENAME="thermostat"
result = {"result":"OK"}
# command format:
# 3 <id> {"zone":<zone>,"season":<season>"daily_schedule":[...]}
syslog.syslog(syslog.LOG_NOTICE, "Received Thermostat data")
if len(cprm) > 2:
# First parameter is the command Id
cmdId = cprm[1]
# Second parameter is the thermostat data JSON format
try:
thDataUpdate = json.loads(cprm[2])
zone = thDataUpdate['zone']
season = thDataUpdate['season']
schedule = thDataUpdate['daily_schedule']
except (ValueError, KeyError, TypeError):
syslog.syslog(syslog.LOG_ERR,"JSON format error in request data")
syslog.syslog(syslog.LOG_ERR,cprm[2])
return
# Update local thermostat data with received data
filename = DATADIR+BASENAME+str(zone)+season+".json"
try:
with open(filename, 'r+') as jsonFile:
try:
thData = json.load(jsonFile)
thData['daily_schedule'] = schedule
thData['operation_mode'] = 'daily_schedule'
except (ValueError, KeyError, TypeError):
syslog.syslog(syslog.LOG_ERR,"JSON format error in local data")
return
jsonFile.seek(0)
json.dump(thData,jsonFile)
jsonFile.truncate()
except IOError:
syslog.syslog(syslog.LOG_ERR,"Local JSON file "+filename+" not found")
return
# Publish reply with command result
msg = cmdId+" "+json.dumps(result)
client.publish(topic_reply, msg, 0, False)
else:
syslog.syslog(syslog.LOG_ERR, "Command missing required parameters")
return;
###########################################################
#
# Command handler for Configuration data
#
###########################################################
def do_configData(cprm):
DATADIR="/media/data/"
FILENAME="alarmconfig.json"
result = {"result":"OK"}
# command format:
# 4 <id> {"sensor_threshold":[...],"control_timeout":[...]}
syslog.syslog(syslog.LOG_NOTICE, "Received Configuration data")
if len(cprm) > 2:
# First parameter is the command Id
cmdId = cprm[1]
# Second parameter is the configuration data JSON format
try:
cDataUpdate = json.loads(cprm[2])
except (ValueError, TypeError):
syslog.syslog(syslog.LOG_ERR,"JSON format error in request data")
syslog.syslog(syslog.LOG_ERR,cprm[2])
return
try:
sensor_threshold_new = cDataUpdate['sensor_threshold']
except (ValueError, KeyError, TypeError):
sensor_threshold_new = None
try:
control_timeout_new = cDataUpdate['control_timeout']
except (ValueError, KeyError, TypeError):
control_timeout_new = None
# Read local configuration data from file
filename = DATADIR+FILENAME
try:
with open(filename, 'r+') as jsonFile:
try:
cData = json.load(jsonFile)
except (ValueError, KeyError, TypeError):
syslog.syslog(syslog.LOG_ERR,"JSON format error in local data")
return
try:
sensor_threshold = cData['sensor_threshold']
except (ValueError, KeyError, TypeError):
sensor_threshold = None
try:
control_timeout = cData['control_timeout']
except (ValueError, KeyError, TypeError):
control_timeout = None
# Update sensor threshold configuration
if sensor_threshold and sensor_threshold_new:
# Check if new register is already in local config
for reg_new in sensor_threshold_new:
found = False
for reg in sensor_threshold:
if reg_new['reg'] == reg['reg']:
found = True
break
if found:
# Register is already present,
# update with new data
reg['alias'] = reg_new['alias']
try:
reg['min'] = reg_new['min']
except KeyError:
try:
del reg['min']
except KeyError:
pass
try:
reg['max'] = reg_new['max']
except KeyError:
try:
del reg['max']
except KeyError:
pass
else:
# Insert new register config
sensor_threshold.append(reg_new)
cData['sensor_threshold'] = sensor_threshold
else:
if sensor_threshold_new:
cData['sensor_threshold'] = sensor_threshold_new
# Update control timeout configuration
if control_timeout and control_timeout_new:
# Check if new register is already in local config
for reg_new in control_timeout_new:
found = False
for reg in control_timeout:
if reg_new['reg'] == reg['reg']:
found = True
break
if found:
# Register is already present,
# update with new data
reg['alias'] = reg_new['alias']
try:
reg['timeout'] = reg_new['timeout']
except KeyError:
try:
del reg['timeout']
except KeyError:
pass
else:
# Insert new register config
control_timeout.append(reg_new)
cData['control_timeout'] = control_timeout
else:
if control_timeout_new:
cData['control_timeout'] = control_timeout_new
# Update local file
jsonFile.seek(0)
json.dump(cData,jsonFile)
jsonFile.truncate()
except IOError:
syslog.syslog(syslog.LOG_ERR,"Local JSON file "+filename+" not found")
return
# Publish reply with command result
msg = cmdId+" "+json.dumps(result)
client.publish(topic_reply, msg, 0, False)
else:
syslog.syslog(syslog.LOG_ERR, "Command missing required parameters")
return;
###########################################################
#
# Command handler for future use
#
###########################################################
def do_whatever(cprm):
syslog.syslog(syslog.LOG_NOTICE, "Received Future use command")
# Publish reply with command result
reply = "OK"
client.publish(topic_reply, reply, 0, False)
return;
###########################################################
#
# The callback for when the client receives a CONNACK
# response from the broker.
#
###########################################################
def on_connect(client, userdata, rc):
syslog.syslog(syslog.LOG_NOTICE, "Connected to MQTT broker")
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe(topic_command)
# Send alive message to broker so subscribers know we are connected
client.publish(topic_will, msg_alive, 0, False)
return;
###########################################################
#
# The callback for when the client disconnects from
# the broker.
#
###########################################################
def on_disconnect(client, userdata, rc):
if rc != 0:
syslog.syslog(syslog.LOG_NOTICE, "Disconnected unexpectedly from MQTT with code " + str(rc))
return;
# Command types used by on_message()
cmdTypeHandler = {
"1": do_modbusRequest,
"2": do_serviceCmd,
"3": do_thermostatData,
"4": do_configData,
"5": do_whatever
}
###########################################################
#
# The callback for when a PUBLISH message is received
# from the broker.
#
###########################################################
def on_message(client, userdata, msg):
syslog.syslog(syslog.LOG_NOTICE, "Received command from broker: " + str(msg.payload))
cmdstr = str(msg.payload)
# Check payload length
if len(cmdstr) > 0:
# get parameters seperated by space
# Format: "<cmdType> <cmdId> <cmdData>"
cmdprm = cmdstr.split(' ', 2)
cmdType = cmdprm[0]
# Handle command according to cmd type
try:
cmdTypeHandler[cmdType](cmdprm)
except KeyError:
syslog.syslog(syslog.LOG_ERR, "Unknown command type: %s" % cmdType)
return;
###########################################################
#
# The signal handler for the TERM and INT signal
#
###########################################################
def on_signal(signum, frame):
# Send dead message to broker so subscribers know we have
# disconnected
client.publish(topic_will, msg_dead, 0, False)
# Disconnect the client, this will exit from the main loop and
# subsequently terminate the client.
client.disconnect()
syslog.syslog(syslog.LOG_NOTICE, "Exiting MQTT client daemon")
###########################################################
#
# Main program
#
###########################################################
# Parse command line
parser = argparse.ArgumentParser()
parser.add_argument('--brokerAddr', '-b', type=str, help='Broker address', required=True)
parser.add_argument('--plantId', '-p', type=int, help='Plant Id', required=True)
parser.add_argument('--ipAddr', '-i', type=str, help='ModbusTCP slave IP address', required=False)
args = parser.parse_args()
# Get parameters
brokerAddr = args.brokerAddr
plantId = str(args.plantId)
if args.ipAddr != None:
ipAddr = args.ipAddr
else:
ipAddr = "127.0.0.1"
# Init syslog
syslog.openlog("mqttc", syslog.LOG_PID|syslog.LOG_CONS, syslog.LOG_USER)
syslog.syslog(syslog.LOG_NOTICE, "Starting MQTT client daemon (version "+VERSION+")")
syslog.syslog(syslog.LOG_NOTICE, " with parameters brokerAddr="+brokerAddr+" plantId="+plantId)
# Define topics
topic_command = "smartbox/" + plantId + "/command"
topic_reply = "smartbox/" + plantId + "/reply"
topic_will = "smartbox/" + plantId + "/deadoralive"
# Define messages to publish
msg_alive = "plant " + plantId + " is alive"
msg_dead = "plant "+ plantId + " is dead"
# Init signal handler
signal.signal(signal.SIGTERM, on_signal)
signal.signal(signal.SIGINT, on_signal)
# Init mqtt client object
client = mqtt.Client(clientId)
client.on_connect = on_connect
client.on_disconnect = on_disconnect
client.on_message = on_message
client.will_set(topic_will, msg_dead, 0, False)
client.tls_set(ca_certs=caCerts)
# Handle first connection here ourselfs
# We leave this here as reference only, will handle first
# connection in the main client.loop_forever() instead
#########################################################
#connected = False
#while connected==False:
# try:
# connected = True
# # Connect to the Telegea mqtt broker
# client.connect(brokerAddr, 1883, 60)
# except socket.error:
# err = sys.exc_info()[0]
# syslog.syslog(syslog.LOG_ERR, "Error %s reconnecting to broker" % err)
# connected = False
# time.sleep(10)
# syslog.syslog(syslog.LOG_DEBUG, "DEBUG: try again")
#########################################################
client.connect_async(brokerAddr, 8883, 60)
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting (also for first connection).
client.loop_forever(retry_first_connection=True)
| gpl-3.0 |
MarkTseng/django-farmersale | farmersale-env/lib/python2.7/site-packages/django/contrib/gis/maps/google/overlays.py | 102 | 11931 | from __future__ import unicode_literals
from django.contrib.gis.geos import fromstr, Point, LineString, LinearRing, Polygon
from django.utils.functional import total_ordering
from django.utils.safestring import mark_safe
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class GEvent(object):
"""
A Python wrapper for the Google GEvent object.
Events can be attached to any object derived from GOverlayBase with the
add_event() call.
For more information please see the Google Maps API Reference:
http://code.google.com/apis/maps/documentation/reference.html#GEvent
Example:
from django.shortcuts import render_to_response
from django.contrib.gis.maps.google import GoogleMap, GEvent, GPolyline
def sample_request(request):
polyline = GPolyline('LINESTRING(101 26, 112 26, 102 31)')
event = GEvent('click',
'function() { location.href = "http://www.google.com"}')
polyline.add_event(event)
return render_to_response('mytemplate.html',
{'google' : GoogleMap(polylines=[polyline])})
"""
def __init__(self, event, action):
"""
Initializes a GEvent object.
Parameters:
event:
string for the event, such as 'click'. The event must be a valid
event for the object in the Google Maps API.
There is no validation of the event type within Django.
action:
string containing a Javascript function, such as
'function() { location.href = "newurl";}'
The string must be a valid Javascript function. Again there is no
validation fo the function within Django.
"""
self.event = event
self.action = action
def __str__(self):
"Returns the parameter part of a GEvent."
return mark_safe('"%s", %s' % (self.event, self.action))
@python_2_unicode_compatible
class GOverlayBase(object):
def __init__(self):
self.events = []
def latlng_from_coords(self, coords):
"Generates a JavaScript array of GLatLng objects for the given coordinates."
return '[%s]' % ','.join('new GLatLng(%s,%s)' % (y, x) for x, y in coords)
def add_event(self, event):
"Attaches a GEvent to the overlay object."
self.events.append(event)
def __str__(self):
"The string representation is the JavaScript API call."
return mark_safe('%s(%s)' % (self.__class__.__name__, self.js_params))
class GPolygon(GOverlayBase):
"""
A Python wrapper for the Google GPolygon object. For more information
please see the Google Maps API Reference:
http://code.google.com/apis/maps/documentation/reference.html#GPolygon
"""
def __init__(self, poly,
stroke_color='#0000ff', stroke_weight=2, stroke_opacity=1,
fill_color='#0000ff', fill_opacity=0.4):
"""
The GPolygon object initializes on a GEOS Polygon or a parameter that
may be instantiated into GEOS Polygon. Please note that this will not
depict a Polygon's internal rings.
Keyword Options:
stroke_color:
The color of the polygon outline. Defaults to '#0000ff' (blue).
stroke_weight:
The width of the polygon outline, in pixels. Defaults to 2.
stroke_opacity:
The opacity of the polygon outline, between 0 and 1. Defaults to 1.
fill_color:
The color of the polygon fill. Defaults to '#0000ff' (blue).
fill_opacity:
The opacity of the polygon fill. Defaults to 0.4.
"""
if isinstance(poly, six.string_types):
poly = fromstr(poly)
if isinstance(poly, (tuple, list)):
poly = Polygon(poly)
if not isinstance(poly, Polygon):
raise TypeError('GPolygon may only initialize on GEOS Polygons.')
# Getting the envelope of the input polygon (used for automatically
# determining the zoom level).
self.envelope = poly.envelope
# Translating the coordinates into a JavaScript array of
# Google `GLatLng` objects.
self.points = self.latlng_from_coords(poly.shell.coords)
# Stroke settings.
self.stroke_color, self.stroke_opacity, self.stroke_weight = stroke_color, stroke_opacity, stroke_weight
# Fill settings.
self.fill_color, self.fill_opacity = fill_color, fill_opacity
super(GPolygon, self).__init__()
@property
def js_params(self):
return '%s, "%s", %s, %s, "%s", %s' % (self.points, self.stroke_color, self.stroke_weight, self.stroke_opacity,
self.fill_color, self.fill_opacity)
class GPolyline(GOverlayBase):
"""
A Python wrapper for the Google GPolyline object. For more information
please see the Google Maps API Reference:
http://code.google.com/apis/maps/documentation/reference.html#GPolyline
"""
def __init__(self, geom, color='#0000ff', weight=2, opacity=1):
"""
The GPolyline object may be initialized on GEOS LineStirng, LinearRing,
and Polygon objects (internal rings not supported) or a parameter that
may instantiated into one of the above geometries.
Keyword Options:
color:
The color to use for the polyline. Defaults to '#0000ff' (blue).
weight:
The width of the polyline, in pixels. Defaults to 2.
opacity:
The opacity of the polyline, between 0 and 1. Defaults to 1.
"""
# If a GEOS geometry isn't passed in, try to construct one.
if isinstance(geom, six.string_types):
geom = fromstr(geom)
if isinstance(geom, (tuple, list)):
geom = Polygon(geom)
# Generating the lat/lng coordinate pairs.
if isinstance(geom, (LineString, LinearRing)):
self.latlngs = self.latlng_from_coords(geom.coords)
elif isinstance(geom, Polygon):
self.latlngs = self.latlng_from_coords(geom.shell.coords)
else:
raise TypeError('GPolyline may only initialize on GEOS LineString, LinearRing, and/or Polygon geometries.')
# Getting the envelope for automatic zoom determination.
self.envelope = geom.envelope
self.color, self.weight, self.opacity = color, weight, opacity
super(GPolyline, self).__init__()
@property
def js_params(self):
return '%s, "%s", %s, %s' % (self.latlngs, self.color, self.weight, self.opacity)
@total_ordering
class GIcon(object):
"""
Creates a GIcon object to pass into a Gmarker object.
The keyword arguments map to instance attributes of the same name. These,
in turn, correspond to a subset of the attributes of the official GIcon
javascript object:
http://code.google.com/apis/maps/documentation/reference.html#GIcon
Because a Google map often uses several different icons, a name field has
been added to the required arguments.
Required Arguments:
varname:
A string which will become the basis for the js variable name of
the marker, for this reason, your code should assign a unique
name for each GIcon you instantiate, otherwise there will be
name space collisions in your javascript.
Keyword Options:
image:
The url of the image to be used as the icon on the map defaults
to 'G_DEFAULT_ICON'
iconsize:
a tuple representing the pixel size of the foreground (not the
shadow) image of the icon, in the format: (width, height) ex.:
GIcon('fast_food',
image="/media/icon/star.png",
iconsize=(15,10))
Would indicate your custom icon was 15px wide and 10px height.
shadow:
the url of the image of the icon's shadow
shadowsize:
a tuple representing the pixel size of the shadow image, format is
the same as ``iconsize``
iconanchor:
a tuple representing the pixel coordinate relative to the top left
corner of the icon image at which this icon is anchored to the map.
In (x, y) format. x increases to the right in the Google Maps
coordinate system and y increases downwards in the Google Maps
coordinate system.)
infowindowanchor:
The pixel coordinate relative to the top left corner of the icon
image at which the info window is anchored to this icon.
"""
def __init__(self, varname, image=None, iconsize=None,
shadow=None, shadowsize=None, iconanchor=None,
infowindowanchor=None):
self.varname = varname
self.image = image
self.iconsize = iconsize
self.shadow = shadow
self.shadowsize = shadowsize
self.iconanchor = iconanchor
self.infowindowanchor = infowindowanchor
def __eq__(self, other):
return self.varname == other.varname
def __lt__(self, other):
return self.varname < other.varname
def __hash__(self):
# XOR with hash of GIcon type so that hash('varname') won't
# equal hash(GIcon('varname')).
return hash(self.__class__) ^ hash(self.varname)
class GMarker(GOverlayBase):
"""
A Python wrapper for the Google GMarker object. For more information
please see the Google Maps API Reference:
http://code.google.com/apis/maps/documentation/reference.html#GMarker
Example:
from django.shortcuts import render_to_response
from django.contrib.gis.maps.google.overlays import GMarker, GEvent
def sample_request(request):
marker = GMarker('POINT(101 26)')
event = GEvent('click',
'function() { location.href = "http://www.google.com"}')
marker.add_event(event)
return render_to_response('mytemplate.html',
{'google' : GoogleMap(markers=[marker])})
"""
def __init__(self, geom, title=None, draggable=False, icon=None):
"""
The GMarker object may initialize on GEOS Points or a parameter
that may be instantiated into a GEOS point. Keyword options map to
GMarkerOptions -- so far only the title option is supported.
Keyword Options:
title:
Title option for GMarker, will be displayed as a tooltip.
draggable:
Draggable option for GMarker, disabled by default.
"""
# If a GEOS geometry isn't passed in, try to construct one.
if isinstance(geom, six.string_types):
geom = fromstr(geom)
if isinstance(geom, (tuple, list)):
geom = Point(geom)
if isinstance(geom, Point):
self.latlng = self.latlng_from_coords(geom.coords)
else:
raise TypeError('GMarker may only initialize on GEOS Point geometry.')
# Getting the envelope for automatic zoom determination.
self.envelope = geom.envelope
# TODO: Add support for more GMarkerOptions
self.title = title
self.draggable = draggable
self.icon = icon
super(GMarker, self).__init__()
def latlng_from_coords(self, coords):
return 'new GLatLng(%s,%s)' % (coords[1], coords[0])
def options(self):
result = []
if self.title:
result.append('title: "%s"' % self.title)
if self.icon:
result.append('icon: %s' % self.icon.varname)
if self.draggable:
result.append('draggable: true')
return '{%s}' % ','.join(result)
@property
def js_params(self):
return '%s, %s' % (self.latlng, self.options())
| mit |
Nitaco/ansible | lib/ansible/module_utils/aws/cloudfront_facts.py | 40 | 11396 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 Willem van Ketwich
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
#
# Author:
# - Willem van Ketwich <willem@vanketwich.com.au>
#
# Common functionality to be used by the modules:
# - cloudfront_distribution
# - cloudfront_invalidation
# - cloudfront_origin_access_identity
"""
Common cloudfront facts shared between modules
"""
from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn
from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict
try:
import botocore
except ImportError:
pass
class CloudFrontFactsServiceManager(object):
"""Handles CloudFront Facts Services"""
def __init__(self, module):
self.module = module
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
self.client = boto3_conn(module, conn_type='client',
resource='cloudfront', region=region,
endpoint=ec2_url, **aws_connect_kwargs)
def get_distribution(self, distribution_id):
try:
return self.client.get_distribution(Id=distribution_id)
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error describing distribution")
def get_distribution_config(self, distribution_id):
try:
return self.client.get_distribution_config(Id=distribution_id)
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error describing distribution configuration")
def get_origin_access_identity(self, origin_access_identity_id):
try:
return self.client.get_cloud_front_origin_access_identity(Id=origin_access_identity_id)
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error describing origin access identity")
def get_origin_access_identity_config(self, origin_access_identity_id):
try:
return self.client.get_cloud_front_origin_access_identity_config(Id=origin_access_identity_id)
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error describing origin access identity configuration")
def get_invalidation(self, distribution_id, invalidation_id):
try:
return self.client.get_invalidation(DistributionId=distribution_id, Id=invalidation_id)
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error describing invalidation")
def get_streaming_distribution(self, distribution_id):
try:
return self.client.get_streaming_distribution(Id=distribution_id)
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error describing streaming distribution")
def get_streaming_distribution_config(self, distribution_id):
try:
return self.client.get_streaming_distribution_config(Id=distribution_id)
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error describing streaming distribution")
def list_origin_access_identities(self):
try:
paginator = self.client.get_paginator('list_cloud_front_origin_access_identities')
result = paginator.paginate().build_full_result()['CloudFrontOriginAccessIdentityList']
return result.get('Items', [])
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error listing cloud front origin access identities")
def list_distributions(self, keyed=True):
try:
paginator = self.client.get_paginator('list_distributions')
result = paginator.paginate().build_full_result().get('DistributionList', {})
distribution_list = result.get('Items', [])
if not keyed:
return distribution_list
return self.keyed_list_helper(distribution_list)
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error listing distributions")
def list_distributions_by_web_acl_id(self, web_acl_id):
try:
result = self.client.list_distributions_by_web_acl_id(WebAclId=web_acl_id)
distribution_list = result.get('DistributionList', {}).get('Items', [])
return self.keyed_list_helper(distribution_list)
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error listing distributions by web acl id")
def list_invalidations(self, distribution_id):
try:
paginator = self.client.get_paginator('list_invalidations')
result = paginator.paginate(DistributionId=distribution_id).build_full_result()
return result.get('InvalidationList', {}).get('Items', [])
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error listing invalidations")
def list_streaming_distributions(self, keyed=True):
try:
paginator = self.client.get_paginator('list_streaming_distributions')
result = paginator.paginate().build_full_result()
streaming_distribution_list = result.get('StreamingDistributionList', {}).get('Items', [])
if not keyed:
return streaming_distribution_list
return self.keyed_list_helper(streaming_distribution_list)
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error listing streaming distributions")
def summary(self):
summary_dict = {}
summary_dict.update(self.summary_get_distribution_list(False))
summary_dict.update(self.summary_get_distribution_list(True))
summary_dict.update(self.summary_get_origin_access_identity_list())
return summary_dict
def summary_get_origin_access_identity_list(self):
try:
origin_access_identity_list = {'origin_access_identities': []}
origin_access_identities = self.list_origin_access_identities()
for origin_access_identity in origin_access_identities:
oai_id = origin_access_identity['Id']
oai_full_response = self.get_origin_access_identity(oai_id)
oai_summary = {'Id': oai_id, 'ETag': oai_full_response['ETag']}
origin_access_identity_list['origin_access_identities'].append(oai_summary)
return origin_access_identity_list
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error generating summary of origin access identities")
def summary_get_distribution_list(self, streaming=False):
try:
list_name = 'streaming_distributions' if streaming else 'distributions'
key_list = ['Id', 'ARN', 'Status', 'LastModifiedTime', 'DomainName', 'Comment', 'PriceClass', 'Enabled']
distribution_list = {list_name: []}
distributions = self.list_streaming_distributions(False) if streaming else self.list_distributions(False)
for dist in distributions:
temp_distribution = {}
for key_name in key_list:
temp_distribution[key_name] = dist[key_name]
temp_distribution['Aliases'] = [alias for alias in dist['Aliases'].get('Items', [])]
temp_distribution['ETag'] = self.get_etag_from_distribution_id(dist['Id'], streaming)
if not streaming:
temp_distribution['WebACLId'] = dist['WebACLId']
invalidation_ids = self.get_list_of_invalidation_ids_from_distribution_id(dist['Id'])
if invalidation_ids:
temp_distribution['Invalidations'] = invalidation_ids
resource_tags = self.client.list_tags_for_resource(Resource=dist['ARN'])
temp_distribution['Tags'] = boto3_tag_list_to_ansible_dict(resource_tags['Tags'].get('Items', []))
distribution_list[list_name].append(temp_distribution)
return distribution_list
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error generating summary of distributions")
except Exception as e:
self.module.fail_json_aws(e, msg="Error generating summary of distributions")
def get_etag_from_distribution_id(self, distribution_id, streaming):
distribution = {}
if not streaming:
distribution = self.get_distribution(distribution_id)
else:
distribution = self.get_streaming_distribution(distribution_id)
return distribution['ETag']
def get_list_of_invalidation_ids_from_distribution_id(self, distribution_id):
try:
invalidation_ids = []
invalidations = self.list_invalidations(distribution_id)
for invalidation in invalidations:
invalidation_ids.append(invalidation['Id'])
return invalidation_ids
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error getting list of invalidation ids")
def get_distribution_id_from_domain_name(self, domain_name):
try:
distribution_id = ""
distributions = self.list_distributions(False)
distributions += self.list_streaming_distributions(False)
for dist in distributions:
if 'Items' in dist['Aliases']:
for alias in dist['Aliases']['Items']:
if str(alias).lower() == domain_name.lower():
distribution_id = dist['Id']
break
return distribution_id
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error getting distribution id from domain name")
def get_aliases_from_distribution_id(self, distribution_id):
try:
distribution = self.get_distribution(distribution_id)
return distribution['DistributionConfig']['Aliases'].get('Items', [])
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error getting list of aliases from distribution_id")
def keyed_list_helper(self, list_to_key):
keyed_list = dict()
for item in list_to_key:
distribution_id = item['Id']
if 'Items' in item['Aliases']:
aliases = item['Aliases']['Items']
for alias in aliases:
keyed_list.update({alias: item})
keyed_list.update({distribution_id: item})
return keyed_list
| gpl-3.0 |
nkgilley/home-assistant | homeassistant/components/wink/alarm_control_panel.py | 7 | 2331 | """Support Wink alarm control panels."""
import logging
import pywink
import homeassistant.components.alarm_control_panel as alarm
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
)
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_DISARMED,
)
from . import DOMAIN, WinkDevice
_LOGGER = logging.getLogger(__name__)
STATE_ALARM_PRIVACY = "Private"
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Wink platform."""
for camera in pywink.get_cameras():
# get_cameras returns multiple device types.
# Only add those that aren't sensors.
try:
camera.capability()
except AttributeError:
_id = camera.object_id() + camera.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkCameraDevice(camera, hass)])
class WinkCameraDevice(WinkDevice, alarm.AlarmControlPanelEntity):
"""Representation a Wink camera alarm."""
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.hass.data[DOMAIN]["entities"]["alarm_control_panel"].append(self)
@property
def state(self):
"""Return the state of the device."""
wink_state = self.wink.state()
if wink_state == "away":
state = STATE_ALARM_ARMED_AWAY
elif wink_state == "home":
state = STATE_ALARM_DISARMED
elif wink_state == "night":
state = STATE_ALARM_ARMED_HOME
else:
state = None
return state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY
def alarm_disarm(self, code=None):
"""Send disarm command."""
self.wink.set_mode("home")
def alarm_arm_home(self, code=None):
"""Send arm home command."""
self.wink.set_mode("night")
def alarm_arm_away(self, code=None):
"""Send arm away command."""
self.wink.set_mode("away")
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {"private": self.wink.private()}
| apache-2.0 |
mozilla/firefox-flicks | vendor-local/lib/python/celery/tests/events/test_state.py | 1 | 11017 | from __future__ import absolute_import
from time import time
from itertools import count
from celery import states
from celery.events import Event
from celery.events.state import State, Worker, Task, HEARTBEAT_EXPIRE_WINDOW
from celery.utils import uuid
from celery.tests.utils import Case
class replay(object):
def __init__(self, state):
self.state = state
self.rewind()
self.setup()
def setup(self):
pass
def __iter__(self):
return self
def __next__(self):
try:
self.state.event(self.events[self.position()])
except IndexError:
raise StopIteration()
next = __next__
def rewind(self):
self.position = count(0).next
return self
def play(self):
for _ in self:
pass
class ev_worker_online_offline(replay):
def setup(self):
self.events = [
Event('worker-online', hostname='utest1'),
Event('worker-offline', hostname='utest1'),
]
class ev_worker_heartbeats(replay):
def setup(self):
self.events = [
Event('worker-heartbeat', hostname='utest1',
timestamp=time() - HEARTBEAT_EXPIRE_WINDOW * 2),
Event('worker-heartbeat', hostname='utest1'),
]
class ev_task_states(replay):
def setup(self):
tid = self.tid = uuid()
self.events = [
Event('task-received', uuid=tid, name='task1',
args='(2, 2)', kwargs="{'foo': 'bar'}",
retries=0, eta=None, hostname='utest1'),
Event('task-started', uuid=tid, hostname='utest1'),
Event('task-revoked', uuid=tid, hostname='utest1'),
Event('task-retried', uuid=tid, exception="KeyError('bar')",
traceback='line 2 at main', hostname='utest1'),
Event('task-failed', uuid=tid, exception="KeyError('foo')",
traceback='line 1 at main', hostname='utest1'),
Event('task-succeeded', uuid=tid, result='4',
runtime=0.1234, hostname='utest1'),
]
class ev_snapshot(replay):
def setup(self):
self.events = [
Event('worker-online', hostname='utest1'),
Event('worker-online', hostname='utest2'),
Event('worker-online', hostname='utest3'),
]
for i in range(20):
worker = not i % 2 and 'utest2' or 'utest1'
type = not i % 2 and 'task2' or 'task1'
self.events.append(Event('task-received', name=type,
uuid=uuid(), hostname=worker))
class test_Worker(Case):
def test_survives_missing_timestamp(self):
worker = Worker(hostname='foo')
worker.on_heartbeat(timestamp=None)
self.assertEqual(worker.heartbeats, [])
def test_repr(self):
self.assertTrue(repr(Worker(hostname='foo')))
class test_Task(Case):
def test_info(self):
task = Task(uuid='abcdefg',
name='tasks.add',
args='(2, 2)',
kwargs='{}',
retries=2,
result=42,
eta=1,
runtime=0.0001,
expires=1,
exception=1,
received=time() - 10,
started=time() - 8,
exchange='celery',
routing_key='celery',
succeeded=time())
self.assertEqual(sorted(list(task._info_fields)),
sorted(task.info().keys()))
self.assertEqual(sorted(list(task._info_fields + ('received', ))),
sorted(task.info(extra=('received', ))))
self.assertEqual(sorted(['args', 'kwargs']),
sorted(task.info(['args', 'kwargs']).keys()))
def test_ready(self):
task = Task(uuid='abcdefg',
name='tasks.add')
task.on_received(timestamp=time())
self.assertFalse(task.ready)
task.on_succeeded(timestamp=time())
self.assertTrue(task.ready)
def test_sent(self):
task = Task(uuid='abcdefg',
name='tasks.add')
task.on_sent(timestamp=time())
self.assertEqual(task.state, states.PENDING)
def test_merge(self):
task = Task()
task.on_failed(timestamp=time())
task.on_started(timestamp=time())
task.on_received(timestamp=time(), name='tasks.add', args=(2, 2))
self.assertEqual(task.state, states.FAILURE)
self.assertEqual(task.name, 'tasks.add')
self.assertTupleEqual(task.args, (2, 2))
task.on_retried(timestamp=time())
self.assertEqual(task.state, states.RETRY)
def test_repr(self):
self.assertTrue(repr(Task(uuid='xxx', name='tasks.add')))
class test_State(Case):
def test_repr(self):
self.assertTrue(repr(State()))
def test_worker_online_offline(self):
r = ev_worker_online_offline(State())
r.next()
self.assertTrue(r.state.alive_workers())
self.assertTrue(r.state.workers['utest1'].alive)
r.play()
self.assertFalse(r.state.alive_workers())
self.assertFalse(r.state.workers['utest1'].alive)
def test_itertasks(self):
s = State()
s.tasks = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'}
self.assertEqual(len(list(s.itertasks(limit=2))), 2)
def test_worker_heartbeat_expire(self):
r = ev_worker_heartbeats(State())
r.next()
self.assertFalse(r.state.alive_workers())
self.assertFalse(r.state.workers['utest1'].alive)
r.play()
self.assertTrue(r.state.alive_workers())
self.assertTrue(r.state.workers['utest1'].alive)
def test_task_states(self):
r = ev_task_states(State())
# RECEIVED
r.next()
self.assertTrue(r.tid in r.state.tasks)
task = r.state.tasks[r.tid]
self.assertEqual(task.state, states.RECEIVED)
self.assertTrue(task.received)
self.assertEqual(task.timestamp, task.received)
self.assertEqual(task.worker.hostname, 'utest1')
# STARTED
r.next()
self.assertTrue(r.state.workers['utest1'].alive,
'any task event adds worker heartbeat')
self.assertEqual(task.state, states.STARTED)
self.assertTrue(task.started)
self.assertEqual(task.timestamp, task.started)
self.assertEqual(task.worker.hostname, 'utest1')
# REVOKED
r.next()
self.assertEqual(task.state, states.REVOKED)
self.assertTrue(task.revoked)
self.assertEqual(task.timestamp, task.revoked)
self.assertEqual(task.worker.hostname, 'utest1')
# RETRY
r.next()
self.assertEqual(task.state, states.RETRY)
self.assertTrue(task.retried)
self.assertEqual(task.timestamp, task.retried)
self.assertEqual(task.worker.hostname, 'utest1')
self.assertEqual(task.exception, "KeyError('bar')")
self.assertEqual(task.traceback, 'line 2 at main')
# FAILURE
r.next()
self.assertEqual(task.state, states.FAILURE)
self.assertTrue(task.failed)
self.assertEqual(task.timestamp, task.failed)
self.assertEqual(task.worker.hostname, 'utest1')
self.assertEqual(task.exception, "KeyError('foo')")
self.assertEqual(task.traceback, 'line 1 at main')
# SUCCESS
r.next()
self.assertEqual(task.state, states.SUCCESS)
self.assertTrue(task.succeeded)
self.assertEqual(task.timestamp, task.succeeded)
self.assertEqual(task.worker.hostname, 'utest1')
self.assertEqual(task.result, '4')
self.assertEqual(task.runtime, 0.1234)
def assertStateEmpty(self, state):
self.assertFalse(state.tasks)
self.assertFalse(state.workers)
self.assertFalse(state.event_count)
self.assertFalse(state.task_count)
def assertState(self, state):
self.assertTrue(state.tasks)
self.assertTrue(state.workers)
self.assertTrue(state.event_count)
self.assertTrue(state.task_count)
def test_freeze_while(self):
s = State()
r = ev_snapshot(s)
r.play()
def work():
pass
s.freeze_while(work, clear_after=True)
self.assertFalse(s.event_count)
s2 = State()
r = ev_snapshot(s2)
r.play()
s2.freeze_while(work, clear_after=False)
self.assertTrue(s2.event_count)
def test_clear_tasks(self):
s = State()
r = ev_snapshot(s)
r.play()
self.assertTrue(s.tasks)
s.clear_tasks(ready=False)
self.assertFalse(s.tasks)
def test_clear(self):
r = ev_snapshot(State())
r.play()
self.assertTrue(r.state.event_count)
self.assertTrue(r.state.workers)
self.assertTrue(r.state.tasks)
self.assertTrue(r.state.task_count)
r.state.clear()
self.assertFalse(r.state.event_count)
self.assertFalse(r.state.workers)
self.assertTrue(r.state.tasks)
self.assertFalse(r.state.task_count)
r.state.clear(False)
self.assertFalse(r.state.tasks)
def test_task_types(self):
r = ev_snapshot(State())
r.play()
self.assertEqual(sorted(r.state.task_types()), ['task1', 'task2'])
def test_tasks_by_timestamp(self):
r = ev_snapshot(State())
r.play()
self.assertEqual(len(r.state.tasks_by_timestamp()), 20)
def test_tasks_by_type(self):
r = ev_snapshot(State())
r.play()
self.assertEqual(len(r.state.tasks_by_type('task1')), 10)
self.assertEqual(len(r.state.tasks_by_type('task2')), 10)
def test_alive_workers(self):
r = ev_snapshot(State())
r.play()
self.assertEqual(len(r.state.alive_workers()), 3)
def test_tasks_by_worker(self):
r = ev_snapshot(State())
r.play()
self.assertEqual(len(r.state.tasks_by_worker('utest1')), 10)
self.assertEqual(len(r.state.tasks_by_worker('utest2')), 10)
def test_survives_unknown_worker_event(self):
s = State()
s.worker_event('worker-unknown-event-xxx', {'foo': 'bar'})
s.worker_event('worker-unknown-event-xxx', {'hostname': 'xxx',
'foo': 'bar'})
def test_survives_unknown_task_event(self):
s = State()
s.task_event('task-unknown-event-xxx', {'foo': 'bar',
'uuid': 'x',
'hostname': 'y'})
def test_callback(self):
scratch = {}
def callback(state, event):
scratch['recv'] = True
s = State(callback=callback)
s.event({'type': 'worker-online'})
self.assertTrue(scratch.get('recv'))
| bsd-3-clause |
FCP-INDI/nipype | nipype/interfaces/ants/tests/test_auto_MultiplyImages.py | 12 | 1335 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ....testing import assert_equal
from ..utils import MultiplyImages
def test_MultiplyImages_inputs():
input_map = dict(args=dict(argstr='%s',
),
dimension=dict(argstr='%d',
mandatory=True,
position=0,
usedefault=False,
),
environ=dict(nohash=True,
usedefault=True,
),
first_input=dict(argstr='%s',
mandatory=True,
position=1,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
num_threads=dict(nohash=True,
usedefault=True,
),
output_product_image=dict(argstr='%s',
mandatory=True,
position=3,
),
second_input=dict(argstr='%s',
mandatory=True,
position=2,
),
terminal_output=dict(nohash=True,
),
)
inputs = MultiplyImages.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_MultiplyImages_outputs():
output_map = dict(output_product_image=dict(),
)
outputs = MultiplyImages.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
johnkeepmoving/oss-ftp | python27/win32/Lib/distutils/tests/test_build_py.py | 46 | 5064 | """Tests for distutils.command.build_py."""
import os
import sys
import StringIO
import unittest
from distutils.command.build_py import build_py
from distutils.core import Distribution
from distutils.errors import DistutilsFileError
from distutils.tests import support
from test.test_support import run_unittest
class BuildPyTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_package_data(self):
sources = self.mkdtemp()
f = open(os.path.join(sources, "__init__.py"), "w")
try:
f.write("# Pretend this is a package.")
finally:
f.close()
f = open(os.path.join(sources, "README.txt"), "w")
try:
f.write("Info about this package")
finally:
f.close()
destination = self.mkdtemp()
dist = Distribution({"packages": ["pkg"],
"package_dir": {"pkg": sources}})
# script_name need not exist, it just need to be initialized
dist.script_name = os.path.join(sources, "setup.py")
dist.command_obj["build"] = support.DummyCommand(
force=0,
build_lib=destination)
dist.packages = ["pkg"]
dist.package_data = {"pkg": ["README.txt"]}
dist.package_dir = {"pkg": sources}
cmd = build_py(dist)
cmd.compile = 1
cmd.ensure_finalized()
self.assertEqual(cmd.package_data, dist.package_data)
cmd.run()
# This makes sure the list of outputs includes byte-compiled
# files for Python modules but not for package data files
# (there shouldn't *be* byte-code files for those!).
#
self.assertEqual(len(cmd.get_outputs()), 3)
pkgdest = os.path.join(destination, "pkg")
files = os.listdir(pkgdest)
self.assertIn("__init__.py", files)
self.assertIn("README.txt", files)
# XXX even with -O, distutils writes pyc, not pyo; bug?
if sys.dont_write_bytecode:
self.assertNotIn("__init__.pyc", files)
else:
self.assertIn("__init__.pyc", files)
def test_empty_package_dir(self):
# See SF 1668596/1720897.
cwd = os.getcwd()
# create the distribution files.
sources = self.mkdtemp()
open(os.path.join(sources, "__init__.py"), "w").close()
testdir = os.path.join(sources, "doc")
os.mkdir(testdir)
open(os.path.join(testdir, "testfile"), "w").close()
os.chdir(sources)
old_stdout = sys.stdout
sys.stdout = StringIO.StringIO()
try:
dist = Distribution({"packages": ["pkg"],
"package_dir": {"pkg": ""},
"package_data": {"pkg": ["doc/*"]}})
# script_name need not exist, it just need to be initialized
dist.script_name = os.path.join(sources, "setup.py")
dist.script_args = ["build"]
dist.parse_command_line()
try:
dist.run_commands()
except DistutilsFileError:
self.fail("failed package_data test when package_dir is ''")
finally:
# Restore state.
os.chdir(cwd)
sys.stdout = old_stdout
def test_dir_in_package_data(self):
"""
A directory in package_data should not be added to the filelist.
"""
# See bug 19286
sources = self.mkdtemp()
pkg_dir = os.path.join(sources, "pkg")
os.mkdir(pkg_dir)
open(os.path.join(pkg_dir, "__init__.py"), "w").close()
docdir = os.path.join(pkg_dir, "doc")
os.mkdir(docdir)
open(os.path.join(docdir, "testfile"), "w").close()
# create the directory that could be incorrectly detected as a file
os.mkdir(os.path.join(docdir, 'otherdir'))
os.chdir(sources)
dist = Distribution({"packages": ["pkg"],
"package_data": {"pkg": ["doc/*"]}})
# script_name need not exist, it just need to be initialized
dist.script_name = os.path.join(sources, "setup.py")
dist.script_args = ["build"]
dist.parse_command_line()
try:
dist.run_commands()
except DistutilsFileError:
self.fail("failed package_data when data dir includes a dir")
def test_dont_write_bytecode(self):
# makes sure byte_compile is not used
pkg_dir, dist = self.create_dist()
cmd = build_py(dist)
cmd.compile = 1
cmd.optimize = 1
old_dont_write_bytecode = sys.dont_write_bytecode
sys.dont_write_bytecode = True
try:
cmd.byte_compile([])
finally:
sys.dont_write_bytecode = old_dont_write_bytecode
self.assertIn('byte-compiling is disabled', self.logs[0][1])
def test_suite():
return unittest.makeSuite(BuildPyTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| mit |
nanolearningllc/edx-platform-cypress | common/lib/xmodule/xmodule/editing_module.py | 120 | 4149 | """Descriptors for XBlocks/Xmodules, that provide editing of atrributes"""
from pkg_resources import resource_string
from xmodule.mako_module import MakoModuleDescriptor
from xblock.fields import Scope, String
import logging
log = logging.getLogger(__name__)
class EditingFields(object):
"""Contains specific template information (the raw data body)"""
data = String(scope=Scope.content, default='')
class EditingDescriptor(EditingFields, MakoModuleDescriptor):
"""
Module that provides a raw editing view of its data and children. It does not
perform any validation on its definition---just passes it along to the browser.
This class is intended to be used as a mixin.
"""
mako_template = "widgets/raw-edit.html"
@property
def non_editable_metadata_fields(self):
"""
`data` should not be editable in the Studio settings editor.
"""
non_editable_fields = super(EditingDescriptor, self).non_editable_metadata_fields
non_editable_fields.append(self.fields['data'])
return non_editable_fields
# cdodge: a little refactoring here, since we're basically doing the same thing
# here as with our parent class, let's call into it to get the basic fields
# set and then add our additional fields. Trying to keep it DRY.
def get_context(self):
_context = MakoModuleDescriptor.get_context(self)
# Add our specific template information (the raw data body)
_context.update({'data': self.data})
return _context
class TabsEditingDescriptor(EditingFields, MakoModuleDescriptor):
"""
Module that provides a raw editing view of its data and children. It does not
perform any validation on its definition---just passes it along to the browser.
This class is intended to be used as a mixin.
Engine (module_edit.js) wants for metadata editor
template to be always loaded, so don't forget to include
settings tab in your module descriptor.
"""
mako_template = "widgets/tabs-aggregator.html"
css = {'scss': [resource_string(__name__, 'css/tabs/tabs.scss')]}
js = {'coffee': [resource_string(
__name__, 'js/src/tabs/tabs-aggregator.coffee')]}
js_module_name = "TabsEditingDescriptor"
tabs = []
def get_context(self):
_context = super(TabsEditingDescriptor, self).get_context()
_context.update({
'tabs': self.tabs,
'html_id': self.location.html_id(), # element_id
'data': self.data,
})
return _context
@classmethod
def get_css(cls):
# load every tab's css
for tab in cls.tabs:
tab_styles = tab.get('css', {})
for css_type, css_content in tab_styles.items():
if css_type in cls.css:
cls.css[css_type].extend(css_content)
else:
cls.css[css_type] = css_content
return cls.css
class XMLEditingDescriptor(EditingDescriptor):
"""
Module that provides a raw editing view of its data as XML. It does not perform
any validation of its definition
"""
css = {'scss': [resource_string(__name__, 'css/codemirror/codemirror.scss')]}
js = {'coffee': [resource_string(__name__, 'js/src/raw/edit/xml.coffee')]}
js_module_name = "XMLEditingDescriptor"
class MetadataOnlyEditingDescriptor(EditingDescriptor):
"""
Module which only provides an editing interface for the metadata, it does
not expose a UI for editing the module data
"""
js = {'coffee': [resource_string(__name__, 'js/src/raw/edit/metadata-only.coffee')]}
js_module_name = "MetadataOnlyEditingDescriptor"
mako_template = "widgets/metadata-only-edit.html"
class JSONEditingDescriptor(EditingDescriptor):
"""
Module that provides a raw editing view of its data as XML. It does not perform
any validation of its definition
"""
css = {'scss': [resource_string(__name__, 'css/codemirror/codemirror.scss')]}
js = {'coffee': [resource_string(__name__, 'js/src/raw/edit/json.coffee')]}
js_module_name = "JSONEditingDescriptor"
| agpl-3.0 |
DVSBA/ajenti | plugins/network/ncs_bsd_ipv4.py | 17 | 1104 | from ajenti.ui import *
from api import *
class BSDIPv4NetworkConfigSet(NetworkConfigBit):
cls = 'bsd-ipv4'
title = 'IPv4'
autovars = ['address', 'netmask', 'broadcast', 'metric', 'mtu']
def get_ui(self):
p = UI.Container(
UI.Formline(
UI.TextInput(name='address', value=self.iface['address']),
text='Address',
),
UI.Formline(
UI.TextInput(name='netmask', value=self.iface['netmask']),
text='Network mask',
),
UI.Formline(
UI.TextInput(name='broadcast', value=self.iface['broadcast']),
text='Broadcast',
),
UI.Formline(
UI.TextInput(name='metric', value=self.iface['metric']),
text='Routing metric',
),
UI.Formline(
UI.TextInput(name='mtu', value=self.iface['mtu']),
text='MTU',
)
)
return p
| lgpl-3.0 |
sss/calibre-at-bzr | manual/plugin_examples/interface_demo/__init__.py | 10 | 3187 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
# The class that all Interface Action plugin wrappers must inherit from
from calibre.customize import InterfaceActionBase
class InterfacePluginDemo(InterfaceActionBase):
'''
This class is a simple wrapper that provides information about the actual
plugin class. The actual interface plugin class is called InterfacePlugin
and is defined in the ui.py file, as specified in the actual_plugin field
below.
The reason for having two classes is that it allows the command line
calibre utilities to run without needing to load the GUI libraries.
'''
name = 'Interface Plugin Demo'
description = 'An advanced plugin demo'
supported_platforms = ['windows', 'osx', 'linux']
author = 'Kovid Goyal'
version = (1, 0, 0)
minimum_calibre_version = (0, 7, 53)
#: This field defines the GUI plugin class that contains all the code
#: that actually does something. Its format is module_path:class_name
#: The specified class must be defined in the specified module.
actual_plugin = 'calibre_plugins.interface_demo.ui:InterfacePlugin'
def is_customizable(self):
'''
This method must return True to enable customization via
Preferences->Plugins
'''
return True
def config_widget(self):
'''
Implement this method and :meth:`save_settings` in your plugin to
use a custom configuration dialog.
This method, if implemented, must return a QWidget. The widget can have
an optional method validate() that takes no arguments and is called
immediately after the user clicks OK. Changes are applied if and only
if the method returns True.
If for some reason you cannot perform the configuration at this time,
return a tuple of two strings (message, details), these will be
displayed as a warning dialog to the user and the process will be
aborted.
The base class implementation of this method raises NotImplementedError
so by default no user configuration is possible.
'''
# It is important to put this import statement here rather than at the
# top of the module as importing the config class will also cause the
# GUI libraries to be loaded, which we do not want when using calibre
# from the command line
from calibre_plugins.interface_demo.config import ConfigWidget
return ConfigWidget()
def save_settings(self, config_widget):
'''
Save the settings specified by the user with config_widget.
:param config_widget: The widget returned by :meth:`config_widget`.
'''
config_widget.save_settings()
# Apply the changes
ac = self.actual_plugin_
if ac is not None:
ac.apply_settings()
| gpl-3.0 |
huertasdanny/inertialsense | src/scripts/IS_raw2msg.py | 1 | 1078 | #!/usr/bin/env python
import rospy
import serial
import checksum
from serial import SerialException
from std_msgs.msg import String
from create_dummy_msg import msg_creator
from sensor_msgs.msg import Imu
from sensor_msgs.msg import MagneticField as Mag
from inertialsense.msg import Rawgps
from inertialsense.msg import Bar
# code for using inertial sense library to convert binary data to ROS messages
def IS_raw2msg(ser):
msgs = dict()
msg2create = rospy.get_param('dataDesired')
#takes in the serial connection information
bitstream = ser.read(ser.inWaiting())
if len(bitstream) != 0:
rospy.loginfo('Reading data from serial')
## here is where function calls to IS c++ lib are made to parse data.
#for now creating dummy data
for topic, makemsg in msg2create.items():
if makemsg:
msgs[topic] = msg_creator(topic)
rospy.loginfo('Received parsed %s message', topic)
else:
rospy.loginfo('no data received')
return msgs | mit |
tahir24434/hydra | src/integrationtest/python/mesos_marathon_tests.py | 1 | 10264 | __author__ = 'sushil'
from sys import path
path.append("src/main/python")
# from mockito import mock, verify
import unittest
import numbers
import requests
import time
import zmq
from pprint import pprint
from hydra.lib.hydrabase import HydraBase
'''
tests :
-Check connectivity to mesos
-check connectivity to marathon
-check number of slaves
- check cpu/memory etc. on slave
- check app server deployment
- chack launching of app-s
- check communication to app-s
- check destroy of app-s
- check launch of client app-c
- check communication between app-s, app-c
- check scale up of app-s
check scale down of app-s
check cli outputs
'''
# def setUpModule(): # NOQA
# # gets called only once per run
# print("SETUPMODULE CALLED")
# def teadDownModule(): # NOQA
# # gets called only once per run
# print("TearDown Module Called")
def send_zmq_message(this, sock, message, expected_val=None):
# pprint('sending message:' + message)
sock.send_string(message)
m = sock.recv().decode("utf-8")
# pprint('\tgot response :' + m)
if expected_val:
this.assertEqual(m, expected_val)
return m
class hydraUnitTest(unittest.TestCase): # NOQA
@classmethod
def setUpClass(cls):
# Gets called only once per class invocation
# print("SETUP CLASS CALLED")
# rt = cls()
# cls._rt = .init('basicTest', 'hydra.ini')
cls.runtest = HydraBase('basicTest', None, None,
startappserver=False)
# return rt
@classmethod
def tearDownClass(cls):
# get's called only once per class invocation
# print("TearDown Class Called")
cls.runtest.stop_appserver()
del cls.runtest
cls.runtest = None
def setUp(self):
# get's called per test
# print ("SETUP Called")
self.rt = self.__class__.runtest
self.rt.init_mesos()
self.rt.init_marathon()
self.rt.init_appserver_dir()
self.rt.start_appserver()
def tearDown(self):
# gets called per test
# print("TearDown Caleldd")
self.rt = None
def test_mesos_health(self):
# slaveCount = self.rt.mesos.get_slave_cnt()
# self.assertTrue(slaveCount > 0)
self.assertTrue(self.rt.get_mesos_health(), "Unable to get health status from mesos")
ver = self.rt.get_mesos_version()
self.assertNotEqual(ver, None, "Unable to get version information from mesos")
self.assertTrue(len(ver['version']) > 0, "Unable to get a valid version for mesos")
def test_mesos_version(self):
ver = self.rt.get_mesos_version()
self.assertNotEqual(ver, None, "Unable to get version information from mesos")
self.assertTrue(len(ver['version']) > 0, "Unable to get a valid version for mesos")
def test_mesos_stats(self):
stats = self.rt.get_mesos_stats()
self.assertNotEqual(stats, None, "Unable to get stats information from mesos")
self.assertTrue(isinstance(stats['cpus_total'], numbers.Number))
self.assertTrue(isinstance(stats['mem_total_bytes'], numbers.Number))
self.assertTrue(isinstance(stats['mem_free_bytes'], numbers.Number))
self.assertTrue(stats['cpus_total'] > 0)
self.assertTrue(stats['mem_total_bytes'] > 0)
self.assertTrue(stats['mem_free_bytes'] > 0)
def test_mesos_slaves(self):
slave_count = self.rt.get_mesos_slave_count()
self.assertTrue(slave_count > 0, 'No slaves detected on mesos cluster')
def test_marathon_connectivity(self):
a = self.rt.ping()
self.assertEqual(a, 'pong\n')
def test_app_server_start(self):
for idx in range(0, 2):
try:
r = requests.get('http://127.0.0.1:' + str(self.rt.myport) + '/')
break
except:
# try for 2 seconds to get the connection
pass
time.sleep(1)
r = requests.get('http://127.0.0.1:' + str(self.rt.myport) + '/')
self.assertEqual(r.status_code, 200)
self.assertTrue(r.content.decode("utf-8").find('basicTest.tgz') >= 0)
def test_app_launch(self):
tapp = 'testapp1'
# clean up any previous app by this name
self.rt.delete_app(tapp)
self.rt.create_hydra_app(name=tapp, app_path='hydra.selftest.agents.Test',
app_args='5598 0',
ports=[0],
cpus=0.01, mem=32)
taskip = self.rt.find_ip_uniqueapp(tapp)
tasks = self.rt.get_app_tasks(tapp)
self.assertTrue(len(tasks) == 1)
self.assertTrue(len(tasks[0].ports) == 1)
taskport = str(tasks[0].ports[0])
pprint('task is launched at ip=' + taskip + ":" + taskport)
# stop and clean up
self.rt.delete_app(tapp)
self.assertTrue(len(taskip) >= 7)
a = self.rt.get_app(tapp)
self.assertTrue(not a)
def test_app_communication(self):
tapp = 'testapp2'
# clean up any previous app by this name
self.rt.delete_app(tapp)
self.rt.create_hydra_app(name=tapp, app_path='hydra.selftest.agents.Test',
app_args='5598 0',
cpus=0.01, mem=32)
taskip = self.rt.find_ip_uniqueapp(tapp)
tasks = self.rt.get_app_tasks(tapp)
self.assertTrue(len(tasks) == 1)
self.assertTrue(len(tasks[0].ports) == 1)
taskport = str(tasks[0].ports[0])
pprint('task is launched at ip=' + taskip + ":" + taskport)
# now send a message to this app to find out how it's doing
zctx = zmq.Context()
zsocket = zctx.socket(zmq.REQ)
zsocket.connect("tcp://%s:%s" % (taskip, taskport))
zsocket.send_string('ping')
message = zsocket.recv().decode("utf-8")
# stop and clean up
self.rt.delete_app(tapp)
self.assertEqual(message, 'pong')
'''
TODO: Enable this test case.
def test_multiple_apps(self):
tapp_cli0 = 'testapp.c0'
tapp_srv = 'testapp.s'
# clean up any previous app by this name
self.rt.delete_app(tapp_cli0)
self.rt.delete_app(tapp_srv)
self.rt.create_hydra_app(name=tapp_srv, app_path='hydra.selftest.agents.Test',
app_args='5598 0',
ports=[0],
cpus=0.01, mem=32)
srvip = self.rt.find_ip_uniqueapp(tapp_srv)
tasks = self.rt.get_app_tasks(tapp_srv)
self.assertTrue(len(tasks) == 1)
self.assertTrue(len(tasks[0].ports) == 1)
taskport = str(tasks[0].ports[0])
srvipport = srvip + ':' + taskport
self.rt.create_hydra_app(name=tapp_cli0, app_path='hydra.selftest.agents.Test',
ports=[0],
app_args='0 %s:5598' % srvip,
cpus=0.01, mem=32)
cliip0 = self.rt.find_ip_uniqueapp(tapp_cli0)
tasks = self.rt.get_app_tasks(tapp_cli0)
self.assertTrue(len(tasks) == 1)
self.assertTrue(len(tasks[0].ports) == 1)
taskport = str(tasks[0].ports[0])
cliip0 += ':' + taskport
pprint('task is launched at srvip=' + srvipport + ' cliip0=' + cliip0)
# now send a message to this app to find out how it's doing
zctx = zmq.Context()
clisocket0 = zctx.socket(zmq.REQ)
srvsocket = zctx.socket(zmq.REQ)
clisocket0.connect("tcp://%s" % cliip0)
srvsocket.connect("tcp://%s" % srvipport)
# check if we can talk to both client and server programs
send_zmq_message(self, clisocket0, 'ping', 'pong')
send_zmq_message(self, srvsocket, 'ping', 'pong')
# find out what port is reported by the marathon
tasks0 = self.rt.get_app_tasks(tapp_cli0)
for task in tasks0:
print(" PORTS reported by api taskid[" + task.id + " PORT = " + pformat(task.ports))
send_zmq_message(self, srvsocket, 'disable_pub', 'ok')
send_zmq_message(self, srvsocket, 'reset_pub', 'ok')
send_zmq_message(self, clisocket0, 'reset_sub', 'ok')
send_zmq_message(self, srvsocket, 'delay:0.01', 'ok')
send_zmq_message(self, srvsocket, 'enable_pub', 'ok')
time.sleep(1)
send_zmq_message(self, srvsocket, 'disable_pub', 'ok')
srv_cnt = send_zmq_message(self, srvsocket, 'cnt_pub')
cli_cnt = send_zmq_message(self, clisocket0, 'cnt_sub')
pprint("Srv_cnt = " + str(srv_cnt))
pprint("cli_cnt = " + str(cli_cnt))
self.assertEqual(srv_cnt, cli_cnt)
send_zmq_message(self, srvsocket, 'reset_pub', 'ok')
self.rt.__mt.scale_app(tapp_cli0, 10)
self.rt.__mt.wait_app_ready(tapp_cli0, 10)
cliiplist = []
tasks0 = self.rt.get_app_tasks(tapp_cli0)
for task in tasks0:
taskip = self.rt.get_ip_hostname(task.host)
print(" PORTS reported by api taskid[" + task.id + " IP_PORT = " + taskip + ":" + pformat(task.ports[0]))
cliiplist.append(taskip + ':' + str(task.ports[0]))
# launch 2 more clients
clisockets = []
for ipp in cliiplist:
sock = zctx.socket(zmq.REQ)
sock.connect("tcp://%s" % ipp)
clisockets.append(sock)
for cli in clisockets:
send_zmq_message(self, cli, 'ping', 'pong')
send_zmq_message(self, cli, 'reset_sub', 'ok')
send_zmq_message(self, srvsocket, 'enable_pub', 'ok')
time.sleep(1)
send_zmq_message(self, srvsocket, 'disable_pub', 'ok')
srv_cnt = send_zmq_message(self, srvsocket, 'cnt_pub')
cli_cnt = []
for cli in clisockets:
cli_cnt.append(send_zmq_message(self, cli, 'cnt_sub'))
for idx in range(0, len(cli_cnt)):
pprint(' cli_cnt%d = %s' % (idx, cli_cnt[idx]))
# stop and clean up
self.rt.delete_app(tapp_srv)
self.rt.delete_app(tapp_cli0)
for idx in range(0, len(cli_cnt)):
self.assertEqual(srv_cnt, cli_cnt[idx])
'''
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
jakirkham/bokeh | bokeh/core/property/instance.py | 3 | 5059 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2018, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide the Instance property.
The Instance property is used to construct object graphs of Bokeh models,
where one Bokeh model refers to another.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from importlib import import_module
# External imports
from six import iteritems, string_types
# Bokeh imports
from .bases import DeserializationError, Property
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'Instance',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Instance(Property):
''' Accept values that are instances of |HasProps|.
'''
def __init__(self, instance_type, default=None, help=None):
if not isinstance(instance_type, (type,) + string_types):
raise ValueError("expected a type or string, got %s" % instance_type)
from ..has_props import HasProps
if isinstance(instance_type, type) and not issubclass(instance_type, HasProps):
raise ValueError("expected a subclass of HasProps, got %s" % instance_type)
self._instance_type = instance_type
super(Instance, self).__init__(default=default, help=help)
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, self.instance_type.__name__)
@property
def has_ref(self):
return True
@property
def instance_type(self):
if isinstance(self._instance_type, string_types):
module, name = self._instance_type.rsplit(".", 1)
self._instance_type = getattr(import_module(module, "bokeh"), name)
return self._instance_type
def from_json(self, json, models=None):
if json is None:
return None
elif isinstance(json, dict):
from ...model import Model
if issubclass(self.instance_type, Model):
if models is None:
raise DeserializationError("%s can't deserialize without models" % self)
else:
model = models.get(json["id"])
if model is not None:
return model
else:
raise DeserializationError("%s failed to deserialize reference to %s" % (self, json))
else:
attrs = {}
for name, value in iteritems(json):
prop_descriptor = self.instance_type.lookup(name).property
attrs[name] = prop_descriptor.from_json(value, models)
# XXX: this doesn't work when Instance(Superclass) := Subclass()
# Serialization dict must carry type information to resolve this.
return self.instance_type(**attrs)
else:
raise DeserializationError("%s expected a dict or None, got %s" % (self, json))
def validate(self, value, detail=True):
super(Instance, self).validate(value, detail)
if value is not None:
if not isinstance(value, self.instance_type):
msg = "" if not detail else "expected an instance of type %s, got %s of type %s" % (self.instance_type.__name__, value, type(value).__name__)
raise ValueError(msg)
def _may_have_unstable_default(self):
# because the instance value is mutable
return True
def _sphinx_type(self):
fullname = "%s.%s" % (self.instance_type.__module__, self.instance_type.__name__)
return self._sphinx_prop_link() + "( %s )" % self._sphinx_model_link(fullname)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause |
alien4cloud/alien4cloud-cloudify3-provider | alien4cloud-cloudify3-provider/src/test/resources/outputs/blueprints/openstack/artifact_test/wrapper/Tomcat/tosca.interfaces.node.lifecycle.Standard/create/_a4c_create.py | 6 | 15469 |
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
from cloudify.state import ctx_parameters as inputs
import subprocess
import os
import re
import sys
import time
import threading
import platform
from StringIO import StringIO
from cloudify_rest_client import CloudifyClient
from cloudify import utils
if 'MANAGER_REST_PROTOCOL' in os.environ and os.environ['MANAGER_REST_PROTOCOL'] == "https":
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port(), protocol='https', trust_all=True)
else:
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port())
def convert_env_value_to_string(envDict):
for key, value in envDict.items():
envDict[str(key)] = str(envDict.pop(key))
def get_attribute_user(ctx):
if get_attribute(ctx, 'user'):
return get_attribute(ctx, 'user')
else:
return get_attribute(ctx, 'cloudify_agent')['user']
def get_attribute_key(ctx):
if get_attribute(ctx, 'key'):
return get_attribute(ctx, 'key')
else:
return get_attribute(ctx, 'cloudify_agent')['key']
def get_host(entity):
if entity.instance.relationships:
for relationship in entity.instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target
return None
def has_attribute_mapping(entity, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, entity.node.properties))
mapping_configuration = entity.node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def process_attribute_mapping(entity, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = entity.node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(entity, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and entity.instance.relationships:
for relationship in entity.instance.relationships:
if mapping_configuration['parameters'][1] in relationship.type_hierarchy:
return data_retriever_function(relationship.target, mapping_configuration['parameters'][2])
return ""
def get_nested_attribute(entity, attribute_names):
deep_properties = get_attribute(entity, attribute_names[0])
attribute_names_iter = iter(attribute_names)
next(attribute_names_iter)
for attribute_name in attribute_names_iter:
if deep_properties is None:
return ""
else:
deep_properties = deep_properties.get(attribute_name, None)
return deep_properties
def _all_instances_get_nested_attribute(entity, attribute_names):
return None
def get_attribute(entity, attribute_name):
if has_attribute_mapping(entity, attribute_name):
# First check if any mapping exist for attribute
mapped_value = process_attribute_mapping(entity, attribute_name, get_attribute)
ctx.logger.info('Mapping exists for attribute {0} with value {1}'.format(attribute_name, mapped_value))
return mapped_value
# No mapping exist, try to get directly the attribute from the entity
attribute_value = entity.instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
ctx.logger.info('Found the attribute {0} with value {1} on the node {2}'.format(attribute_name, attribute_value, entity.node.id))
return attribute_value
# Attribute retrieval fails, fall back to property
property_value = entity.node.properties.get(attribute_name, None)
if property_value is not None:
return property_value
# Property retrieval fails, fall back to host instance
host = get_host(entity)
if host is not None:
ctx.logger.info('Attribute not found {0} go up to the parent node {1}'.format(attribute_name, host.node.id))
return get_attribute(host, attribute_name)
# Nothing is found
return ""
def _all_instances_get_attribute(entity, attribute_name):
result_map = {}
# get all instances data using cfy rest client
# we have to get the node using the rest client with node_instance.node_id
# then we will have the relationships
node = client.nodes.get(ctx.deployment.id, entity.node.id)
all_node_instances = client.node_instances.list(ctx.deployment.id, entity.node.id)
for node_instance in all_node_instances:
prop_value = __recursively_get_instance_data(node, node_instance, attribute_name)
if prop_value is not None:
ctx.logger.info('Found the property/attribute {0} with value {1} on the node {2} instance {3}'.format(attribute_name, prop_value, entity.node.id,
node_instance.id))
result_map[node_instance.id + '_'] = prop_value
return result_map
def get_property(entity, property_name):
# Try to get the property value on the node
property_value = entity.node.properties.get(property_name, None)
if property_value is not None:
ctx.logger.info('Found the property {0} with value {1} on the node {2}'.format(property_name, property_value, entity.node.id))
return property_value
# No property found on the node, fall back to the host
host = get_host(entity)
if host is not None:
ctx.logger.info('Property not found {0} go up to the parent node {1}'.format(property_name, host.node.id))
return get_property(host, property_name)
return ""
def get_instance_list(node_id):
result = ''
all_node_instances = client.node_instances.list(ctx.deployment.id, node_id)
for node_instance in all_node_instances:
if len(result) > 0:
result += ','
result += node_instance.id
return result
def get_host_node_name(instance):
for relationship in instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target.node.id
return None
def __get_relationship(node, target_name, relationship_type):
for relationship in node.relationships:
if relationship.get('target_id') == target_name and relationship_type in relationship.get('type_hierarchy'):
return relationship
return None
def __has_attribute_mapping(node, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, node.properties))
mapping_configuration = node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def __process_attribute_mapping(node, node_instance, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(node, node_instance, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and node_instance.relationships:
for rel in node_instance.relationships:
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if mapping_configuration['parameters'][1] in relationship.get('type_hierarchy'):
target_instance = client.node_instances.get(rel.get('target_id'))
target_node = client.nodes.get(ctx.deployment.id, target_instance.node_id)
return data_retriever_function(target_node, target_instance, mapping_configuration['parameters'][2])
return None
def __recursively_get_instance_data(node, node_instance, attribute_name):
if __has_attribute_mapping(node, attribute_name):
return __process_attribute_mapping(node, node_instance, attribute_name, __recursively_get_instance_data)
attribute_value = node_instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
return attribute_value
elif node_instance.relationships:
for rel in node_instance.relationships:
# on rel we have target_name, target_id (instanceId), type
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if 'cloudify.relationships.contained_in' in relationship.get('type_hierarchy'):
parent_instance = client.node_instances.get(rel.get('target_id'))
parent_node = client.nodes.get(ctx.deployment.id, parent_instance.node_id)
return __recursively_get_instance_data(parent_node, parent_instance, attribute_name)
return None
else:
return None
env_map = {}
env_map['NODE'] = ctx.node.id
env_map['INSTANCE'] = ctx.instance.id
env_map['INSTANCES'] = get_instance_list(ctx.node.id)
env_map['HOST'] = get_host_node_name(ctx.instance)
env_map['A4C_EXECUTION_HOST'] = get_attribute(ctx, 'ip_address')
env_map['A4C_EXECUTION_USER'] = get_attribute_user(ctx)
env_map['A4C_EXECUTION_KEY'] = get_attribute_key(ctx)
env_map['TOMCAT_HOME'] = r'/opt/tomcat'
env_map['TOMCAT_PORT'] = r'80'
env_map['TOMCAT_URL'] = r'http://mirrors.ircam.fr/pub/apache/tomcat/tomcat-8/v8.0.29/bin/apache-tomcat-8.0.29.tar.gz'
if inputs.get('process', None) is not None and inputs['process'].get('env', None) is not None:
ctx.logger.info('Operation is executed with environment variable {0}'.format(inputs['process']['env']))
env_map.update(inputs['process']['env'])
def parse_output(output):
# by convention, the last output is the result of the operation
last_output = None
outputs = {}
pattern = re.compile('EXPECTED_OUTPUT_(\w+)=(.*)')
for line in output.splitlines():
match = pattern.match(line)
if match is None:
last_output = line
else:
output_name = match.group(1)
output_value = match.group(2)
outputs[output_name] = output_value
return {'last_output': last_output, 'outputs': outputs}
def execute(script_path, process, outputNames, command_prefix=None, cwd=None):
os.chmod(script_path, 0755)
on_posix = 'posix' in sys.builtin_module_names
env = os.environ.copy()
process_env = process.get('env', {})
env.update(process_env)
if outputNames is not None:
env['EXPECTED_OUTPUTS'] = outputNames
if platform.system() == 'Windows':
wrapper_path = ctx.download_resource("scriptWrapper.bat")
else:
wrapper_path = ctx.download_resource("scriptWrapper.sh")
os.chmod(wrapper_path, 0755)
command = '{0} {1}'.format(wrapper_path, script_path)
else:
command = script_path
if command_prefix is not None:
command = "{0} {1}".format(command_prefix, command)
ctx.logger.info('Executing: {0} in env {1}'.format(command, env))
process = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
cwd=cwd,
bufsize=1,
close_fds=on_posix)
return_code = None
stdout_consumer = OutputConsumer(process.stdout)
stderr_consumer = OutputConsumer(process.stderr)
while True:
return_code = process.poll()
if return_code is not None:
break
time.sleep(0.1)
stdout_consumer.join()
stderr_consumer.join()
parsed_output = parse_output(stdout_consumer.buffer.getvalue())
if outputNames is not None:
outputNameList = outputNames.split(';')
for outputName in outputNameList:
ctx.logger.info('Ouput name: {0} value : {1}'.format(outputName, parsed_output['outputs'].get(outputName, None)))
if return_code != 0:
error_message = "Script {0} encountered error with return code {1} and standard output {2}, error output {3}".format(command, return_code,
stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
error_message = str(unicode(error_message, errors='ignore'))
ctx.logger.error(error_message)
raise NonRecoverableError(error_message)
else:
ok_message = "Script {0} executed normally with standard output {1} and error output {2}".format(command, stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
ok_message = str(unicode(ok_message, errors='ignore'))
ctx.logger.info(ok_message)
return parsed_output
class OutputConsumer(object):
def __init__(self, out):
self.out = out
self.buffer = StringIO()
self.consumer = threading.Thread(target=self.consume_output)
self.consumer.daemon = True
self.consumer.start()
def consume_output(self):
for line in iter(self.out.readline, b''):
self.buffer.write(line)
self.out.close()
def join(self):
self.consumer.join()
new_script_process = {'env': env_map}
operationOutputNames = None
convert_env_value_to_string(new_script_process['env'])
parsed_output = execute(ctx.download_resource('_a4c_impl_artifact/Tomcat/tosca.interfaces.node.lifecycle.Standard/create/tomcat_install.sh'), new_script_process, operationOutputNames)
outputs = parsed_output['outputs'].items()
for k,v in outputs:
ctx.logger.info('Output name: {0} value: {1}'.format(k, v))
ctx.instance.runtime_properties['_a4c_OO:tosca.interfaces.node.lifecycle.Standard:create:{0}'.format(k)] = v
ctx.instance.runtime_properties['server_url'] = r'http://' + get_attribute(ctx, 'public_ip_address') + r':' + r'80'
ctx.instance.update()
| apache-2.0 |
Dhivyap/ansible | lib/ansible/modules/cloud/azure/azure_rm_applicationsecuritygroup.py | 27 | 8584 | #!/usr/bin/python
#
# Copyright (c) 2018 Yunge Zhu, <yungez@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_applicationsecuritygroup
version_added: "2.8"
short_description: Manage Azure Application Security Group
description:
- Create, update and delete instance of Azure Application Security Group.
options:
resource_group:
description:
- The name of the resource group.
required: True
name:
description:
- The name of the application security group.
required: True
location:
description:
- Resource location. If not set, location from the resource group will be used as default.
state:
description:
- Assert the state of the Application Security Group.
- Use C(present) to create or update an Application Security Group and C(absent) to delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
- azure_tags
author:
- Yunge Zhu (@yungezz)
'''
EXAMPLES = '''
- name: Create application security group
azure_rm_applicationsecuritygroup:
resource_group: myResourceGroup
name: mySecurityGroup
location: eastus
tags:
foo: bar
'''
RETURN = '''
id:
description:
- Resource id of the application security group.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/applicationSecurityGroups/
mySecurityGroup"
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller
from msrestazure.azure_operation import AzureOperationPoller
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, CreateOrUpdate, Delete = range(3)
class AzureRMApplicationSecurityGroup(AzureRMModuleBase):
"""Configuration class for an Azure RM Application Security Group resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
location=dict(
type='str'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group = None
self.location = None
self.name = None
self.tags = None
self.state = None
self.results = dict(changed=False)
self.to_do = Actions.NoAction
super(AzureRMApplicationSecurityGroup, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()) + ['tags']:
if hasattr(self, key):
setattr(self, key, kwargs[key])
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
self.location = resource_group.location
old_response = self.get_applicationsecuritygroup()
if not old_response:
self.log("Application Security Group instance doesn't exist")
if self.state == 'present':
self.to_do = Actions.CreateOrUpdate
else:
self.log("Old instance didn't exist")
else:
self.log("Application Security Group instance already exists")
if self.state == 'present':
if self.check_update(old_response):
self.to_do = Actions.CreateOrUpdate
update_tags, self.tags = self.update_tags(old_response.get('tags', None))
if update_tags:
self.to_do = Actions.CreateOrUpdate
elif self.state == 'absent':
self.to_do = Actions.Delete
if self.to_do == Actions.CreateOrUpdate:
self.log("Need to Create / Update the Application Security Group instance")
self.results['changed'] = True
if self.check_mode:
return self.results
response = self.create_update_applicationsecuritygroup()
self.results['id'] = response['id']
elif self.to_do == Actions.Delete:
self.log("Delete Application Security Group instance")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_applicationsecuritygroup()
return self.results
def check_update(self, existing_asg):
if self.location and self.location.lower() != existing_asg['location'].lower():
self.module.warn("location cannot be updated. Existing {0}, input {1}".format(existing_asg['location'], self.location))
return False
def create_update_applicationsecuritygroup(self):
'''
Create or update Application Security Group.
:return: deserialized Application Security Group instance state dictionary
'''
self.log("Creating / Updating the Application Security Group instance {0}".format(self.name))
param = dict(name=self.name,
tags=self.tags,
location=self.location)
try:
response = self.network_client.application_security_groups.create_or_update(resource_group_name=self.resource_group,
application_security_group_name=self.name,
parameters=param)
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error creating/updating Application Security Group instance.')
self.fail("Error creating/updating Application Security Group instance: {0}".format(str(exc)))
return response.as_dict()
def delete_applicationsecuritygroup(self):
'''
Deletes specified Application Security Group instance.
:return: True
'''
self.log("Deleting the Application Security Group instance {0}".format(self.name))
try:
response = self.network_client.application_security_groups.delete(resource_group_name=self.resource_group,
application_security_group_name=self.name)
except CloudError as e:
self.log('Error deleting the Application Security Group instance.')
self.fail("Error deleting the Application Security Group instance: {0}".format(str(e)))
return True
def get_applicationsecuritygroup(self):
'''
Gets the properties of the specified Application Security Group.
:return: deserialized Application Security Group instance state dictionary
'''
self.log("Checking if the Application Security Group instance {0} is present".format(self.name))
found = False
try:
response = self.network_client.application_security_groups.get(resource_group_name=self.resource_group,
application_security_group_name=self.name)
self.log("Response : {0}".format(response))
self.log("Application Security Group instance : {0} found".format(response.name))
return response.as_dict()
except CloudError as e:
self.log('Did not find the Application Security Group instance.')
return False
def main():
"""Main execution"""
AzureRMApplicationSecurityGroup()
if __name__ == '__main__':
main()
| gpl-3.0 |
OpenDrift/opendrift | tests/models/test_readers.py | 1 | 29038 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of OpenDrift.
#
# OpenDrift is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2
#
# OpenDrift is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenDrift. If not, see <https://www.gnu.org/licenses/>.
#
# Copyright 2015, Knut-Frode Dagestad, MET Norway
import unittest
from datetime import datetime, timedelta
import numpy as np
from opendrift.models.oceandrift import OceanDrift
from opendrift.models.leeway import Leeway
from opendrift.models.openoil import OpenOil
from opendrift.readers import reader_netCDF_CF_generic
from opendrift.readers import reader_ROMS_native
from opendrift.readers import reader_global_landmask
from opendrift.readers import reader_constant
from opendrift.readers import reader_lazy
from opendrift.readers import reader_from_url
from opendrift.models.pelagicegg import PelagicEggDrift
from opendrift.readers import reader_current_from_track
o = OceanDrift(loglevel=20)
reader_list = [
'www.nonexistingurl.com',
o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc',
'/nonexistingdisk/nonexistingfile.ext',
o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/AROME_MetCoOp_00_DEF_20160202_subset.nc']
class TestReaders(unittest.TestCase):
"""Tests for readers"""
def test_adding_readers(self):
o = OceanDrift()
landmask = reader_global_landmask.Reader(
extent=[-1.5, 7, 59, 64])
r = reader_ROMS_native.Reader(o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc')
o.add_reader([r, landmask])
self.assertEqual(o.priority_list['land_binary_mask'],
['roms native', 'global_landmask'])
self.assertEqual(o.priority_list['x_sea_water_velocity'],
['roms native'])
# Switch order
o = OceanDrift()
o.add_reader([landmask, r])
self.assertEqual(o.priority_list['land_binary_mask'],
['global_landmask', 'roms native'])
self.assertEqual(o.priority_list['x_sea_water_velocity'],
['roms native'])
# Test add_readers_from_list
o = OceanDrift()
o.add_readers_from_list(reader_list, lazy=False)
self.assertEqual(o.priority_list['x_sea_water_velocity'],
['roms native'])
self.assertEqual(o.priority_list['x_wind'],
[o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/AROME_MetCoOp_00_DEF_20160202_subset.nc'])
def test_repeated_run(self):
# NOTE: this test fails if outfile is not None
#outfile = 'leeway_test.nc'
outfile = None
o = OceanDrift(loglevel=50)
o.set_config('drift:vertical_mixing', False)
o.add_readers_from_list(reader_list)
o.seed_elements(lon=14, lat=67.85,
time=datetime(2016, 2, 2, 12))
o.run(steps=5, outfile=outfile)
lon1 = o.get_property('lon')[0]
# Repeated run with same object
o.seed_elements(lon=14, lat=67.85,
time=datetime(2016, 2, 2, 12))
o.run(steps=5, outfile=outfile)
lon2 = o.get_property('lon')[0]
# Third run, with different config
o.seed_elements(lon=14, lat=67.85,
time=datetime(2016, 2, 2, 12),
wind_drift_factor=.1)
o.run(steps=5)
lon3 = o.get_property('lon')[0]
# Fourth run, with different time
o.reset() # Reset is needed due to new start_time
o.seed_elements(lon=14, lat=67.85,
time=datetime(2016, 2, 2, 13),
wind_drift_factor=.1)
o.run(steps=5, outfile=outfile)
lon4 = o.get_property('lon')[0]
# Check results
self.assertEqual(lon1[-1][0], lon2[-1][0])
self.assertNotEqual(lon3[-1][0], lon2[-1][0])
#os.remove(outfile)
def test_reader_from_url(self):
readers = reader_from_url(reader_list)
self.assertIsNone(readers[0])
self.assertTrue(isinstance(readers[1],
reader_ROMS_native.Reader))
self.assertIsNone(readers[2])
self.assertTrue(isinstance(readers[3],
reader_netCDF_CF_generic.Reader))
def test_lazy_reader(self):
o = OceanDrift(loglevel=20)
lr = reader_lazy.Reader(o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc')
self.assertFalse(lr.initialised)
self.assertEqual(len(lr.covers_positions([15], [69])[0]), 1)
self.assertEqual(len(lr.covers_positions([0], [0])[0]), 0)
self.assertTrue(lr.initialised)
# Make a corresponding, unlazy reader
rr = reader_ROMS_native.Reader(o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc')
self.assertEqual(len(rr.covers_positions([15], [69])[0]), 1)
self.assertEqual(len(rr.covers_positions([0], [0])[0]), 0)
# Check that both readers provide the same attributes
for att in rr.__dict__:
self.assertEqual(type(lr.__getattr__(att)),
type(getattr(rr, att)))
if type(getattr(rr, att)) in [float, int, dict, str, list,
datetime, timedelta, bool,
np.float64]:
self.assertEqual(lr.__getattr__(att),
getattr(rr, att))
elif type(getattr(rr, att)) in [np.ndarray]:
self.assertIsNone(np.testing.assert_array_equal(
lr.__getattr__(att),
getattr(rr, att)))
else:
print('Skipping: ' + att + ' ' +
str(type(getattr(rr, att))))
def test_lazy_reader_oildrift(self):
o = OpenOil(loglevel=0)
reader_constant_wind = \
reader_constant.Reader({'x_wind':5, 'y_wind': 6,
'sea_ice_area_fraction': 0})
# Added ice area to prevent problems with masking
# with older versions of netCDF library
o.add_reader(reader_constant_wind)
o.add_readers_from_list(reader_list, lazy=True)
self.assertEqual(len(o._lazy_readers()), 4)
o.seed_elements(lon=14, lat=67.85,
time=datetime(2016, 2, 2, 12))
o.run(steps=5)
print(o) # Debug, this fails for old libraries
self.assertEqual(len(o._lazy_readers()), 2)
self.assertEqual(len(o.discarded_readers), 1)
def test_ROMS_native_stranding(self):
o = OceanDrift(loglevel=0)
r = reader_ROMS_native.Reader(o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc')
o.add_reader(r)
o.set_config('general:use_auto_landmask', False)
o.set_config('drift:vertical_mixing', False)
o.set_config('environment:fallback:x_wind', 0)
o.set_config('environment:fallback:y_wind', 10)
o.seed_elements(lon=15.2, lat=68.3, time=r.start_time,
wind_drift_factor=.02,
number=10, radius=1000)
o.run(steps=8)
self.assertEqual(o.num_elements_deactivated(), 2)
#def test_lazy_readers_and_corrupt_data(self):
# o = OceanDrift(loglevel=0)
# o.add_readers_from_list([o.test_data_folder() +
# '2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc'])
# reader_constant_current_corrupt = \
# reader_constant.Reader({'x_sea_water_velocity': np.nan,
# 'y_sea_water_velocity': np.nan})
# o.add_reader(reader_constant_current_corrupt)
# o.add_readers_from_list([o.test_data_folder() +
# '2Feb2016_Nordic_sigma_3d/Arctic20_1to5Feb_2016.nc'])
# print o
# o.seed_elements(lon=14.5, lat=68, time=datetime(2016,2,4))
# o.set_config('environment:fallback:'x_wind', 0)
# o.set_config('environment:fallback:'y_wind', 0)
# o.set_config('environment:fallback:'x_sea_water_velocity', None)
# o.set_config('environment:fallback:'y_sea_water_velocity', None)
# o.set_config('environment:fallback:'land_binary_mask', 0)
# print o
# o.run(steps=1)
#def test_oildrift_backwards(self):
# o = OpenOil(loglevel=20)
# reader_constant_wind = \
# reader_constant.Reader({'x_wind':5, 'y_wind': 6})
# o.add_reader(reader_constant_wind)
# o.add_readers_from_list(reader_list, lazy=True)
# self.assertEqual(len(o._lazy_readers()), 4)
# o.seed_elements(lon=14, lat=67.85,
# time=datetime(2016, 2, 2, 12))
# o.set_config()
# o.run(steps=5)
# self.assertEqual(len(o._lazy_readers()), 2)
# self.assertEqual(len(o.discarded_readers), 1)
#def test_lazy_reader_oildrift_real(self):
# o = OpenOil(loglevel=0)
# o.add_readers_from_file(o.test_data_folder() +
# '../../opendrift/scripts/data_sources.txt')
# o.seed_elements(lon=4, lat=60.0,
# time=datetime(2018, 7, 2, 12))
# o.run(steps=5)
# print o
def test_lazy_reader_leeway_compare(self):
o1 = Leeway(loglevel=0)
#o1.set_config('environment:fallback:land_binary_mask', 0)
o1.required_variables = [r for r in o1.required_variables
if r != 'land_binary_mask']
o1.add_readers_from_list(reader_list, lazy=False)
time = o1.readers['roms native'].start_time
o1.seed_elements(lat=67.85, lon=14, time=time)
o1.run(steps=5)
o2 = Leeway(loglevel=20)
#o2.set_config('environment:fallback:land_binary_mask', 0)
o2.required_variables = [r for r in o1.required_variables
if r != 'land_binary_mask']
o2.add_readers_from_list(reader_list, lazy=True)
o2.seed_elements(lat=67.85, lon=14, time=time)
o2.run(steps=5)
# Some differences in wind and current components
# due to different coordinate system
for var in o1.history.dtype.names:
if var in ['x_wind', 'y_wind', 'x_sea_water_velocity',
'y_sea_water_velocity']:
tolerance = 1
else:
tolerance = 5
self.assertIsNone(np.testing.assert_array_almost_equal(
o1.history[var], o2.history[var], tolerance))
def test_constant_and_lazy_reader_leeway(self):
cw = reader_constant.Reader({'x_wind':5, 'y_wind': 6})
cc = reader_constant.Reader({'x_sea_water_velocity':0,
'y_sea_water_velocity': .2})
o = Leeway(loglevel=20)
o.add_reader([cw, cc])
o.add_readers_from_list(reader_list)
o.set_config('environment:fallback:x_sea_water_velocity', 0.0)
o.set_config('environment:fallback:y_sea_water_velocity', 0.1)
time = datetime(2016,2,2,12)
o.seed_elements(lat=67.85, lon=14, time=time)
o.run(steps=2)
self.assertAlmostEqual(o.elements.lat[0], 67.8548, 3)
def test_automatic_landmask(self):
o = OceanDrift(loglevel=20)
self.assertRaises(ValueError, o.run)
o.seed_elements(lon=4, lat=60, time=datetime(2016,9,1))
o.run(steps=2)
def test_reader_coverage(self):
r = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'16Nov2015_NorKyst_z_surface/norkyst800_subset_16Nov2015.nc')
# Element outside reader domain
self.assertEqual(len(r.covers_positions(5, 80)[0]), 0)
x, y = r.lonlat2xy(5, 80)
self.assertRaises(ValueError, r.check_arguments,
'y_sea_water_velocity', r.start_time, x, y, 0)
# Element inside reader domain
self.assertEqual(len(r.covers_positions(5, 60)[0]), 1)
x, y = r.lonlat2xy(5, 60)
var, time, x2, y2, z2, outside = \
r.check_arguments('y_sea_water_velocity', r.start_time, x, y, 0)
self.assertEqual(var, ['y_sea_water_velocity'])
self.assertEqual(time, r.start_time)
self.assertEqual(x, x2)
self.assertEqual(y, y2)
self.assertEqual(0, z2)
self.assertEqual(len(outside), 0)
def test_outside_reader_time_coverage(self):
o = PelagicEggDrift()
reader = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'16Nov2015_NorKyst_z_surface/norkyst800_subset_16Nov2015.nc')
o.add_reader(reader)
o.set_config('environment:fallback:x_sea_water_velocity', 1)
o.set_config('environment:fallback:land_binary_mask', 0)
o.set_config('drift:vertical_mixing', False)
o.seed_elements(lon=4.8, lat=60, number=1, time=reader.end_time)
o.run(steps=2)
# Check that fallback value is used when outside time coverage
self.assertEqual(o.history['x_sea_water_velocity'][0][-1], 1.0)
def test_reader_netcdf(self):
"""Check reader functionality."""
reader1 = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'16Nov2015_NorKyst_z_surface/norkyst800_subset_16Nov2015.nc')
reader2 = reader_ROMS_native.Reader(o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc')
readers = [reader1, reader2]
for r in readers:
print(r)
# Make four points:
# 1) outside lower left, 2) lower left, 3) center of domain
# 4) outside upper right
# and assure that only 2) and 3) are marked as covered
# Upper right is skipped, as lonlat2xy may lie slightly outside
x = np.array([r.xmin - r.delta_x, r.xmin, (r.xmin + r.xmax)/2,
r.xmax + r.delta_x])
y = np.array([r.ymin - r.delta_y, r.ymin, (r.ymin + r.ymax)/2,
r.ymax + r.delta_y])
lons, lats = r.xy2lonlat(x, y)
covered = r.covers_positions(lons, lats, 0)[0]
if len(covered) != 1:
self.assertEqual(covered.tolist(), [1, 2])
else:
if covered == [2]:
print('#'*60)
print('#'*60)
print('WARNING: A point on the boundary is considered ' \
'outside after conversion x,y -> lon,lat -> x,y. ' \
'This is different from "standard", but is due to ' \
'rounding differences and not considered to be an ' \
'error. Numpy version is %s' % (np.__version__))
print('#'*60)
print('#'*60)
else:
self.assertTrue(False) # Should never happen!
self.assertTrue(r.covers_time(r.start_time))
self.assertFalse(r.covers_time(r.start_time - r.time_step))
self.assertFalse(r.proj.crs.is_geographic)
def test_vertical_profiles(self):
norkyst3d = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'14Jan2016_NorKyst_z_3d/NorKyst-800m_ZDEPTHS_his_00_3Dsubset.nc')
lon = np.array([4.73])
lat = np.array([62.35])
variables = ['x_sea_water_velocity', 'x_sea_water_velocity',
'sea_water_temperature']
x,y = norkyst3d.lonlat2xy(lon, lat)
data = norkyst3d.get_variables(variables,
time=norkyst3d.start_time,
x=x, y=y, z=[0, -100])
self.assertEqual(data['z'][4], -25)
self.assertEqual(data['z'][4], -25)
self.assertAlmostEqual(data['sea_water_temperature'][:,0,0][7],
9.220000267028809)
def test_vertical_interpolation(self):
norkyst3d = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'14Jan2016_NorKyst_z_3d/NorKyst-800m_ZDEPTHS_his_00_3Dsubset.nc')
lon = np.array([4.73, 4.75])
lat = np.array([62.35, 62.30])
z = np.array([0, -33])
variables = ['x_sea_water_velocity', 'x_sea_water_velocity',
'sea_water_temperature']
# Call get_variables_interpolated which interpolates both in
# space (horizontally, vertically) and then in time
data, profiles = norkyst3d.get_variables_interpolated(
variables, profiles=['sea_water_temperature'],
profiles_depth = [-100, 0],
time = norkyst3d.start_time + timedelta(seconds=900),
lon=lon, lat=lat, z=z)
# Check surface value
self.assertEqual(data['sea_water_temperature'][0],
profiles['sea_water_temperature'][0,0])
# Check interpolated temperature at 33 m depth
self.assertAlmostEqual(data['sea_water_temperature'][1],
8.32, 2)
#import matplotlib.pyplot as plt
#plt.plot(profiles['sea_water_temperature'][:,0])
#plt.plot(profiles['sea_water_temperature'][:,1], 'r')
#plt.show()
def test_vertical_interpolation_sigma(self):
nordic3d = reader_ROMS_native.Reader(o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc')
lon = np.array([12.46, 12.46, 12.46])
lat = np.array([68.21, 69.31, 69.31])
z = np.array([-33, 0, -2500])
x, y = nordic3d.lonlat2xy(lon, lat)
variables = ['x_sea_water_velocity', 'y_sea_water_velocity',
'sea_water_temperature']
# Call get_variables_interpolated which interpolates both in
data = nordic3d.get_variables(variables,
time = nordic3d.start_time + timedelta(seconds=900),
x=x, y=y, z=z)
self.assertAlmostEqual(data['sea_water_temperature'][0,60, 60],
3.447, 2)
#3.59, 2)
self.assertAlmostEqual(data['sea_water_temperature'][-1,60, 60],
-0.783, 2)
#-0.803, 2)
def test_get_environment(self):
o = PelagicEggDrift(loglevel=0)
reader_nordic = reader_ROMS_native.Reader(o.test_data_folder() + '2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc', name='Nordic')
reader_arctic = reader_netCDF_CF_generic.Reader(o.test_data_folder() + '2Feb2016_Nordic_sigma_3d/Arctic20_1to5Feb_2016.nc', name='Arctic')
######################################################
# Vertical interpolation is another issue to be fixed:
reader_nordic.zlevels = reader_arctic.z
######################################################
o.add_reader([reader_nordic, reader_arctic])
# One point covered only by Nordic, two points coverd
# by both readers, and two points covered by none of the readers
testlon = np.array((14.0, 20.0, 20.1, 4, 5))
testlat = np.array((70.1, 76.0, 76.1, 60, 60))
testz = np.random.uniform(0, 0, len(testlon))
self.assertIsNone(np.testing.assert_array_almost_equal(
[0], reader_nordic.covers_positions(testlon, testlat, testz)[0]))
self.assertIsNone(np.testing.assert_array_almost_equal(
[0, 1, 2],
reader_arctic.covers_positions(testlon, testlat, testz)[0]))
o.seed_elements(testlon, testlat, z=testz, time=reader_nordic.start_time)
o.set_config('environment:fallback:land_binary_mask', 0)
env, env_profiles, missing = \
o.get_environment(list(o.required_variables),
reader_nordic.start_time,
testlon, testlat, testz,
o.required_profiles)
self.assertAlmostEqual(env['sea_water_temperature'][0], 4.251, 2)
self.assertAlmostEqual(env['sea_water_temperature'][1], 0.122, 3)
self.assertAlmostEqual(env['sea_water_temperature'][4], 10.0)
self.assertIsNone(np.testing.assert_array_almost_equal(
missing, [False,False,False,False,False]))
self.assertAlmostEqual(env_profiles['sea_water_temperature'][0,0],
4.251, 2)
self.assertAlmostEqual(env_profiles['sea_water_temperature'][0,4], 10)
#self.assertAlmostEqual(env_profiles['sea_water_temperature'][8,2], 10)
self.assertAlmostEqual(env_profiles['sea_water_temperature'][7,2],
2.159, 3)
# Get separate data
env2, env_profiles2, missing2 = \
o.get_environment(['x_sea_water_velocity', 'y_sea_water_velocity',
'sea_water_temperature'],
reader_nordic.start_time,
testlon, testlat, testz,
['sea_water_temperature'])
self.assertTrue(env_profiles2 is not None)
self.assertEqual(set(env_profiles2.keys()),
set(['z', 'sea_water_temperature']))
# Get separate data, without profile
env3, env_profiles3, missing3 = \
o.get_environment(['x_sea_water_velocity', 'y_sea_water_velocity',
'sea_water_temperature'],
reader_nordic.start_time,
testlon, testlat, testz,
profiles=None)
self.assertTrue(env_profiles3 is None)
# Get separate data
env4, env_profiles4, missing4 = \
o.get_environment(['x_sea_water_velocity', 'y_sea_water_velocity',
'sea_water_temperature'],
reader_nordic.start_time,
testlon, testlat, testz,
['sea_water_temperature'])
self.assertIsNone(np.testing.assert_array_almost_equal(
env['x_sea_water_velocity'],
env2['x_sea_water_velocity']))
self.assertIsNone(np.testing.assert_array_almost_equal(
env_profiles2['sea_water_temperature'].ravel(),
env_profiles4['sea_water_temperature'].ravel()))
def test_constant_reader(self):
o = OpenOil(loglevel=0)
cw = reader_constant.Reader({'x_wind':5, 'y_wind': 6})
cc = reader_constant.Reader({'x_sea_water_velocity':0, 'y_sea_water_velocity': .2})
cs = reader_constant.Reader({'sea_water_temperature': 278})
r = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'16Nov2015_NorKyst_z_surface/norkyst800_subset_16Nov2015.nc')
o.add_reader([cw, cc, r])
# TODO: should check why adding constant reader with
# sea_water_temperature gives Deprecated warning
#o.add_reader([cw, cc, cs, r])
o.seed_elements(lon=4, lat=60, time=r.start_time, number=5)
o.run(steps=3)
def test_clip_domain(self):
o = OceanDrift(loglevel=50)
r1 = reader_ROMS_native.Reader(o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc')
r1.clip_boundary_pixels(20)
r2 = reader_ROMS_native.Reader(o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc')
self.assertEqual(r2.shape, (151, 81))
self.assertEqual(r1.shape, (111, 41))
self.assertEqual(r1.xmin, 20)
o1 = OceanDrift(loglevel=50)
o1.set_config('environment:fallback:x_sea_water_velocity', None)
o1.add_reader(r1)
o1.seed_elements(lon=15, lat=70.1, time=r1.start_time)
o1.set_config('environment:fallback:land_binary_mask', 0)
o1.run(time_step=3600*3, duration=timedelta(hours=48))
o2 = OceanDrift(loglevel=50)
o2.set_config('environment:fallback:x_sea_water_velocity', None)
o2.add_reader(r2)
o2.seed_elements(lon=15, lat=70.1, time=r1.start_time)
o2.set_config('environment:fallback:land_binary_mask', 0)
o2.run(time_step=3600*3, duration=timedelta(hours=48))
# Compare
lat1 = o1.get_property('lat')[0]
lat2 = o2.get_property('lat')[0]
self.assertEqual(len(lat1), 13)
self.assertEqual(len(lat2), 17)
self.assertIsNone(np.testing.assert_allclose(
lat1[0:12], lat2[0:12]))
# Test reader netCDF_CF_generic
r = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'16Nov2015_NorKyst_z_surface/norkyst800_subset_16Nov2015.nc')
self.assertEqual(r.shape, (301, 201))
o3 = OceanDrift(loglevel=50)
o3.set_config('environment:fallback:x_sea_water_velocity', None)
o3.set_config('environment:fallback:land_binary_mask', 0)
o3.add_reader(r)
o3.seed_elements(lon=4.36, lat=61.7, time=r.start_time)
o3.run(steps=24)
r.clip_boundary_pixels(10)
self.assertEqual(r.shape, (281, 181))
o4 = OceanDrift(loglevel=50)
o4.set_config('environment:fallback:x_sea_water_velocity', None)
o4.set_config('environment:fallback:land_binary_mask', 0)
o4.add_reader(r)
o4.seed_elements(lon=4.36, lat=61.7, time=r.start_time)
o4.run(steps=24)
# Compare
lat3 = o3.get_property('lat')[0]
lat4 = o4.get_property('lat')[0]
self.assertEqual(len(lat3), 25)
self.assertEqual(len(lat4), 13)
self.assertIsNone(np.testing.assert_allclose(
lat3[0:12], lat4[0:12]))
def test_reader_current_from_track(self):
"""Check if extrapolated currents are of expected value"""
obslon = [3.1, 3.123456]
obslat = [61.1, 61.132198]
obstime = [datetime(2015, 11, 16, 0), datetime(2015, 11, 16, 6)]
o = OceanDrift(loglevel=20)
reader_wind = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'16Nov2015_NorKyst_z_surface/arome_subset_16Nov2015.nc')
reader_current = reader_current_from_track.Reader(obslon, obslat, obstime,
wind_east=0, wind_north=0, windreader=reader_wind, wind_factor=0.018)
self.assertAlmostEqual(reader_current.x_sea_water_velocity.data[0],0.2236, 4)
def test_valid_minmax(self):
"""Check that invalid values are replaced with fallback."""
o = OceanDrift(loglevel=20)
from opendrift.readers.basereader import variables
minval = variables.standard_names['x_wind']['valid_min']
# Setting valid_min to -5, to check that replacement works
variables.standard_names['x_wind']['valid_min'] = -5
reader_wind = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'16Nov2015_NorKyst_z_surface/arome_subset_16Nov2015.nc')
o.add_reader(reader_wind)
o.set_config('environment:fallback:x_sea_water_velocity', 0)
o.set_config('environment:fallback:x_wind', 2.0)
o.set_config('environment:fallback:y_sea_water_velocity', 0)
o.set_config('environment:fallback:land_binary_mask', 0)
o.seed_elements(lon=4, lat=60, time=reader_wind.start_time)
o.run(steps=1)
variables.standard_names['x_wind']['valid_min'] = minval # reset
w = o.get_property('x_wind')[0][0]
self.assertAlmostEqual(w, 2.0, 1)
def test_valid_minmax_nanvalues(self):
from opendrift.readers.basereader import variables
# Reducing max current speed to test masking
maxval = variables.standard_names['x_sea_water_velocity']['valid_max']
variables.standard_names['x_sea_water_velocity']['valid_max'] = .1
o = OceanDrift(loglevel=20)
o.set_config('environment:fallback:land_binary_mask', 0)
norkyst = reader_netCDF_CF_generic.Reader(o.test_data_folder() + '14Jan2016_NorKyst_z_3d/NorKyst-800m_ZDEPTHS_his_00_3Dsubset.nc')
o.add_reader(norkyst)
o.seed_elements(lon=4.95, lat=62, number=10, time=norkyst.start_time)
o.run(steps=2)
variables.standard_names['x_sea_water_velocity']['valid_max'] = maxval # reset
u = o.get_property('x_sea_water_velocity')[0]
self.assertAlmostEqual(u.max(), -.069, 3) # Some numerical error allowed
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
frank-tancf/scikit-learn | examples/feature_selection/plot_f_test_vs_mi.py | 75 | 1647 | """
===========================================
Comparison of F-test and mutual information
===========================================
This example illustrates the differences between univariate F-test statistics
and mutual information.
We consider 3 features x_1, x_2, x_3 distributed uniformly over [0, 1], the
target depends on them as follows:
y = x_1 + sin(6 * pi * x_2) + 0.1 * N(0, 1), that is the third features is completely irrelevant.
The code below plots the dependency of y against individual x_i and normalized
values of univariate F-tests statistics and mutual information.
As F-test captures only linear dependency, it rates x_1 as the most
discriminative feature. On the other hand, mutual information can capture any
kind of dependency between variables and it rates x_2 as the most
discriminative feature, which probably agrees better with our intuitive
perception for this example. Both methods correctly marks x_3 as irrelevant.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_selection import f_regression, mutual_info_regression
np.random.seed(0)
X = np.random.rand(1000, 3)
y = X[:, 0] + np.sin(6 * np.pi * X[:, 1]) + 0.1 * np.random.randn(1000)
f_test, _ = f_regression(X, y)
f_test /= np.max(f_test)
mi = mutual_info_regression(X, y)
mi /= np.max(mi)
plt.figure(figsize=(15, 5))
for i in range(3):
plt.subplot(1, 3, i + 1)
plt.scatter(X[:, i], y)
plt.xlabel("$x_{}$".format(i + 1), fontsize=14)
if i == 0:
plt.ylabel("$y$", fontsize=14)
plt.title("F-test={:.2f}, MI={:.2f}".format(f_test[i], mi[i]),
fontsize=16)
plt.show()
| bsd-3-clause |
monikagrabowska/osf.io | tests/framework_tests/test_analytics.py | 3 | 7534 | # -*- coding: utf-8 -*-
"""
Unit tests for analytics logic in framework/analytics/__init__.py
"""
import unittest
import pytest
from django.utils import timezone
from nose.tools import * # flake8: noqa (PEP8 asserts)
from flask import Flask
from datetime import datetime
from framework import analytics, sessions
from framework.sessions import session
from osf.models import PageCounter
from tests.base import OsfTestCase
from osf_tests.factories import UserFactory, ProjectFactory
pytestmark = pytest.mark.django_db
class TestAnalytics(OsfTestCase):
def test_get_total_activity_count(self):
user = UserFactory()
date = timezone.now()
assert_equal(analytics.get_total_activity_count(user._id), 0)
assert_equal(analytics.get_total_activity_count(user._id), user.get_activity_points(db=None))
analytics.increment_user_activity_counters(user._id, 'project_created', date.isoformat(), db=None)
assert_equal(analytics.get_total_activity_count(user._id, db=None), 1)
assert_equal(analytics.get_total_activity_count(user._id, db=None), user.get_activity_points(db=None))
def test_increment_user_activity_counters(self):
user = UserFactory()
date = timezone.now()
assert_equal(user.get_activity_points(db=None), 0)
analytics.increment_user_activity_counters(user._id, 'project_created', date.isoformat(), db=None)
assert_equal(user.get_activity_points(db=None), 1)
class UpdateCountersTestCase(OsfTestCase):
def setUp(self):
decoratorapp = Flask('decorators')
self.ctx = decoratorapp.test_request_context()
self.ctx.push()
# TODO: Think of something better @sloria @jmcarp
sessions.set_session(sessions.Session())
def tearDown(self):
self.ctx.pop()
class TestUpdateCounters(UpdateCountersTestCase):
def setUp(self):
super(TestUpdateCounters, self).setUp()
self.node = ProjectFactory()
self.fid = 'foo'
self.vid = 1
self.userid = 'test123'
self.node_info = {
'contributors': ['test123', 'test234']
}
def test_update_counters_file(self):
@analytics.update_counters('download:{target_id}:{fid}', db=None)
def download_file_(**kwargs):
return kwargs.get('node') or kwargs.get('project')
count = analytics.get_basic_counters('download:{0}:{1}'.format(self.node._id, self.fid), db=None)
assert_equal(count, (None, None))
download_file_(node=self.node, fid=self.fid)
count = analytics.get_basic_counters('download:{0}:{1}'.format(self.node._id, self.fid), db=None)
assert_equal(count, (1, 1))
page = 'download:{0}:{1}'.format(self.node._id, self.fid)
session.data['visited'].append(page)
download_file_(node=self.node, fid=self.fid)
count = analytics.get_basic_counters('download:{0}:{1}'.format(self.node._id, self.fid), db=None)
assert_equal(count, (1, 2))
def test_update_counters_file_user_is_contributor(self):
@analytics.update_counters('download:{target_id}:{fid}', db=None, node_info=self.node_info)
def download_file_(**kwargs):
return kwargs.get('node') or kwargs.get('project')
count = analytics.get_basic_counters('download:{0}:{1}'.format(self.node._id, self.fid), db=None)
assert_equal(count, (None, None))
download_file_(node=self.node, fid=self.fid)
count = analytics.get_basic_counters('download:{0}:{1}'.format(self.node._id, self.fid), db=None)
assert_equal(count, (1, 1))
page = 'download:{0}:{1}'.format(self.node._id, self.fid)
session.data['visited'].append(page)
session.data['auth_user_id'] = self.userid
download_file_(node=self.node, fid=self.fid)
count = analytics.get_basic_counters('download:{0}:{1}'.format(self.node._id, self.fid), db=None)
assert_equal(count, (1, 1))
def test_update_counters_file_user_is_not_contributor(self):
@analytics.update_counters('download:{target_id}:{fid}', db=None, node_info=self.node_info)
def download_file_(**kwargs):
return kwargs.get('node') or kwargs.get('project')
count = analytics.get_basic_counters('download:{0}:{1}'.format(self.node._id, self.fid), db=None)
assert_equal(count, (None, None))
download_file_(node=self.node, fid=self.fid)
count = analytics.get_basic_counters('download:{0}:{1}'.format(self.node._id, self.fid), db=None)
assert_equal(count, (1, 1))
page = 'download:{0}:{1}'.format(self.node._id, self.fid)
session.data['visited'].append(page)
session.data['auth_user_id'] = "asv12uey821vavshl"
download_file_(node=self.node, fid=self.fid)
count = analytics.get_basic_counters('download:{0}:{1}'.format(self.node._id, self.fid), db=None)
assert_equal(count, (1, 2))
def test_update_counters_file_version(self):
@analytics.update_counters('download:{target_id}:{fid}:{vid}', db=None)
def download_file_version_(**kwargs):
return kwargs.get('node') or kwargs.get('project')
count = analytics.get_basic_counters('download:{0}:{1}:{2}'.format(self.node._id, self.fid, self.vid), db=None)
assert_equal(count, (None, None))
download_file_version_(node=self.node, fid=self.fid, vid=self.vid)
count = analytics.get_basic_counters('download:{0}:{1}:{2}'.format(self.node._id, self.fid, self.vid), db=None)
assert_equal(count, (1, 1))
page = 'download:{0}:{1}:{2}'.format(self.node._id, self.fid, self.vid)
session.data['visited'].append(page)
download_file_version_(node=self.node, fid=self.fid, vid=self.vid)
count = analytics.get_basic_counters('download:{0}:{1}:{2}'.format(self.node._id, self.fid, self.vid), db=None)
assert_equal(count, (1, 2))
def test_get_basic_counters(self):
page = 'node:' + str(self.node._id)
PageCounter.objects.create(_id=page, total=5, unique=3)
count = analytics.get_basic_counters(page, db=None)
assert_equal(count, (3, 5))
@unittest.skip('Reverted the fix for #2281. Unskip this once we use GUIDs for keys in the download counts collection')
def test_update_counters_different_files(self):
# Regression test for https://github.com/CenterForOpenScience/osf.io/issues/2281
@analytics.update_counters('download:{target_id}:{fid}', db=None)
def download_file_(**kwargs):
return kwargs.get('node') or kwargs.get('project')
fid1 = 'test.analytics.py'
fid2 = 'test_analytics.py'
download_file_(node=self.node, fid=fid1)
count = analytics.get_basic_counters('download:{0}:{1}'.format(self.node._id, fid1), db=None)
assert_equal(count, (1, 1))
count = analytics.get_basic_counters('download:{0}:{1}'.format(self.node._id, fid2), db=None)
assert_equal(count, (None, None))
page = 'download:{0}:{1}'.format(self.node._id, fid1)
session.data['visited'].append(page)
download_file_(node=self.node, fid=fid1)
download_file_(node=self.node, fid=fid2)
count = analytics.get_basic_counters('download:{0}:{1}'.format(self.node._id, fid1), db=None)
assert_equal(count, (1, 2))
count = analytics.get_basic_counters('download:{0}:{1}'.format(self.node._id, fid2), db=None)
assert_equal(count, (1, 1))
| apache-2.0 |
sbuss/voteswap | lib/networkx/algorithms/link_analysis/hits_alg.py | 10 | 9437 | """Hubs and authorities analysis of graph structure.
"""
# Copyright (C) 2008-2012 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
# NetworkX:http://networkx.github.io/
import networkx as nx
from networkx.exception import NetworkXError
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
__all__ = ['hits','hits_numpy','hits_scipy','authority_matrix','hub_matrix']
def hits(G,max_iter=100,tol=1.0e-8,nstart=None,normalized=True):
"""Return HITS hubs and authorities values for nodes.
The HITS algorithm computes two numbers for a node.
Authorities estimates the node value based on the incoming links.
Hubs estimates the node value based on outgoing links.
Parameters
----------
G : graph
A NetworkX graph
max_iter : interger, optional
Maximum number of iterations in power method.
tol : float, optional
Error tolerance used to check convergence in power method iteration.
nstart : dictionary, optional
Starting value of each node for power method iteration.
normalized : bool (default=True)
Normalize results by the sum of all of the values.
Returns
-------
(hubs,authorities) : two-tuple of dictionaries
Two dictionaries keyed by node containing the hub and authority
values.
Examples
--------
>>> G=nx.path_graph(4)
>>> h,a=nx.hits(G)
Notes
-----
The eigenvector calculation is done by the power iteration method
and has no guarantee of convergence. The iteration will stop
after max_iter iterations or an error tolerance of
number_of_nodes(G)*tol has been reached.
The HITS algorithm was designed for directed graphs but this
algorithm does not check if the input graph is directed and will
execute on undirected graphs.
References
----------
.. [1] A. Langville and C. Meyer,
"A survey of eigenvector methods of web information retrieval."
http://citeseer.ist.psu.edu/713792.html
.. [2] Jon Kleinberg,
Authoritative sources in a hyperlinked environment
Journal of the ACM 46 (5): 604-32, 1999.
doi:10.1145/324133.324140.
http://www.cs.cornell.edu/home/kleinber/auth.pdf.
"""
if type(G) == nx.MultiGraph or type(G) == nx.MultiDiGraph:
raise Exception("hits() not defined for graphs with multiedges.")
if len(G) == 0:
return {},{}
# choose fixed starting vector if not given
if nstart is None:
h=dict.fromkeys(G,1.0/G.number_of_nodes())
else:
h=nstart
# normalize starting vector
s=1.0/sum(h.values())
for k in h:
h[k]*=s
i=0
while True: # power iteration: make up to max_iter iterations
hlast=h
h=dict.fromkeys(hlast.keys(),0)
a=dict.fromkeys(hlast.keys(),0)
# this "matrix multiply" looks odd because it is
# doing a left multiply a^T=hlast^T*G
for n in h:
for nbr in G[n]:
a[nbr]+=hlast[n]*G[n][nbr].get('weight',1)
# now multiply h=Ga
for n in h:
for nbr in G[n]:
h[n]+=a[nbr]*G[n][nbr].get('weight',1)
# normalize vector
s=1.0/max(h.values())
for n in h: h[n]*=s
# normalize vector
s=1.0/max(a.values())
for n in a: a[n]*=s
# check convergence, l1 norm
err=sum([abs(h[n]-hlast[n]) for n in h])
if err < tol:
break
if i>max_iter:
raise NetworkXError(\
"HITS: power iteration failed to converge in %d iterations."%(i+1))
i+=1
if normalized:
s = 1.0/sum(a.values())
for n in a:
a[n] *= s
s = 1.0/sum(h.values())
for n in h:
h[n] *= s
return h,a
def authority_matrix(G,nodelist=None):
"""Return the HITS authority matrix."""
M=nx.to_numpy_matrix(G,nodelist=nodelist)
return M.T*M
def hub_matrix(G,nodelist=None):
"""Return the HITS hub matrix."""
M=nx.to_numpy_matrix(G,nodelist=nodelist)
return M*M.T
def hits_numpy(G,normalized=True):
"""Return HITS hubs and authorities values for nodes.
The HITS algorithm computes two numbers for a node.
Authorities estimates the node value based on the incoming links.
Hubs estimates the node value based on outgoing links.
Parameters
----------
G : graph
A NetworkX graph
normalized : bool (default=True)
Normalize results by the sum of all of the values.
Returns
-------
(hubs,authorities) : two-tuple of dictionaries
Two dictionaries keyed by node containing the hub and authority
values.
Examples
--------
>>> G=nx.path_graph(4)
>>> h,a=nx.hits(G)
Notes
-----
The eigenvector calculation uses NumPy's interface to LAPACK.
The HITS algorithm was designed for directed graphs but this
algorithm does not check if the input graph is directed and will
execute on undirected graphs.
References
----------
.. [1] A. Langville and C. Meyer,
"A survey of eigenvector methods of web information retrieval."
http://citeseer.ist.psu.edu/713792.html
.. [2] Jon Kleinberg,
Authoritative sources in a hyperlinked environment
Journal of the ACM 46 (5): 604-32, 1999.
doi:10.1145/324133.324140.
http://www.cs.cornell.edu/home/kleinber/auth.pdf.
"""
try:
import numpy as np
except ImportError:
raise ImportError(\
"hits_numpy() requires NumPy: http://scipy.org/")
if len(G) == 0:
return {},{}
H=nx.hub_matrix(G,G.nodes())
e,ev=np.linalg.eig(H)
m=e.argsort()[-1] # index of maximum eigenvalue
h=np.array(ev[:,m]).flatten()
A=nx.authority_matrix(G,G.nodes())
e,ev=np.linalg.eig(A)
m=e.argsort()[-1] # index of maximum eigenvalue
a=np.array(ev[:,m]).flatten()
if normalized:
h = h/h.sum()
a = a/a.sum()
else:
h = h/h.max()
a = a/a.max()
hubs=dict(zip(G.nodes(),map(float,h)))
authorities=dict(zip(G.nodes(),map(float,a)))
return hubs,authorities
def hits_scipy(G,max_iter=100,tol=1.0e-6,normalized=True):
"""Return HITS hubs and authorities values for nodes.
The HITS algorithm computes two numbers for a node.
Authorities estimates the node value based on the incoming links.
Hubs estimates the node value based on outgoing links.
Parameters
----------
G : graph
A NetworkX graph
max_iter : interger, optional
Maximum number of iterations in power method.
tol : float, optional
Error tolerance used to check convergence in power method iteration.
nstart : dictionary, optional
Starting value of each node for power method iteration.
normalized : bool (default=True)
Normalize results by the sum of all of the values.
Returns
-------
(hubs,authorities) : two-tuple of dictionaries
Two dictionaries keyed by node containing the hub and authority
values.
Examples
--------
>>> G=nx.path_graph(4)
>>> h,a=nx.hits(G)
Notes
-----
This implementation uses SciPy sparse matrices.
The eigenvector calculation is done by the power iteration method
and has no guarantee of convergence. The iteration will stop
after max_iter iterations or an error tolerance of
number_of_nodes(G)*tol has been reached.
The HITS algorithm was designed for directed graphs but this
algorithm does not check if the input graph is directed and will
execute on undirected graphs.
References
----------
.. [1] A. Langville and C. Meyer,
"A survey of eigenvector methods of web information retrieval."
http://citeseer.ist.psu.edu/713792.html
.. [2] Jon Kleinberg,
Authoritative sources in a hyperlinked environment
Journal of the ACM 46 (5): 604-632, 1999.
doi:10.1145/324133.324140.
http://www.cs.cornell.edu/home/kleinber/auth.pdf.
"""
try:
import scipy.sparse
import numpy as np
except ImportError:
raise ImportError(\
"hits_scipy() requires SciPy: http://scipy.org/")
if len(G) == 0:
return {},{}
M=nx.to_scipy_sparse_matrix(G,nodelist=G.nodes())
(n,m)=M.shape # should be square
A=M.T*M # authority matrix
x=scipy.ones((n,1))/n # initial guess
# power iteration on authority matrix
i=0
while True:
xlast=x
x=A*x
x=x/x.max()
# check convergence, l1 norm
err=scipy.absolute(x-xlast).sum()
if err < tol:
break
if i>max_iter:
raise NetworkXError(\
"HITS: power iteration failed to converge in %d iterations."%(i+1))
i+=1
a=np.asarray(x).flatten()
# h=M*a
h=np.asarray(M*a).flatten()
if normalized:
h = h/h.sum()
a = a/a.sum()
hubs=dict(zip(G.nodes(),map(float,h)))
authorities=dict(zip(G.nodes(),map(float,a)))
return hubs,authorities
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
try:
import scipy
except:
raise SkipTest("SciPy not available")
| mit |
tchellomello/home-assistant | homeassistant/components/counter/reproduce_state.py | 16 | 2573 | """Reproduce an Counter state."""
import asyncio
import logging
from typing import Any, Dict, Iterable, Optional
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.core import Context, State
from homeassistant.helpers.typing import HomeAssistantType
from . import (
ATTR_INITIAL,
ATTR_MAXIMUM,
ATTR_MINIMUM,
ATTR_STEP,
DOMAIN,
SERVICE_CONFIGURE,
VALUE,
)
_LOGGER = logging.getLogger(__name__)
async def _async_reproduce_state(
hass: HomeAssistantType,
state: State,
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Reproduce a single state."""
cur_state = hass.states.get(state.entity_id)
if cur_state is None:
_LOGGER.warning("Unable to find entity %s", state.entity_id)
return
if not state.state.isdigit():
_LOGGER.warning(
"Invalid state specified for %s: %s", state.entity_id, state.state
)
return
# Return if we are already at the right state.
if (
cur_state.state == state.state
and cur_state.attributes.get(ATTR_INITIAL) == state.attributes.get(ATTR_INITIAL)
and cur_state.attributes.get(ATTR_MAXIMUM) == state.attributes.get(ATTR_MAXIMUM)
and cur_state.attributes.get(ATTR_MINIMUM) == state.attributes.get(ATTR_MINIMUM)
and cur_state.attributes.get(ATTR_STEP) == state.attributes.get(ATTR_STEP)
):
return
service_data = {ATTR_ENTITY_ID: state.entity_id, VALUE: state.state}
service = SERVICE_CONFIGURE
if ATTR_INITIAL in state.attributes:
service_data[ATTR_INITIAL] = state.attributes[ATTR_INITIAL]
if ATTR_MAXIMUM in state.attributes:
service_data[ATTR_MAXIMUM] = state.attributes[ATTR_MAXIMUM]
if ATTR_MINIMUM in state.attributes:
service_data[ATTR_MINIMUM] = state.attributes[ATTR_MINIMUM]
if ATTR_STEP in state.attributes:
service_data[ATTR_STEP] = state.attributes[ATTR_STEP]
await hass.services.async_call(
DOMAIN, service, service_data, context=context, blocking=True
)
async def async_reproduce_states(
hass: HomeAssistantType,
states: Iterable[State],
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Reproduce Counter states."""
await asyncio.gather(
*(
_async_reproduce_state(
hass, state, context=context, reproduce_options=reproduce_options
)
for state in states
)
)
| apache-2.0 |
leiferikb/bitpop | src/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/port/server_process_unittest.py | 2 | 6117 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import time
import webkitpy.thirdparty.unittest2 as unittest
from webkitpy.layout_tests.port.factory import PortFactory
from webkitpy.layout_tests.port import server_process
from webkitpy.common.system.systemhost import SystemHost
from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.common.system.outputcapture import OutputCapture
class TrivialMockPort(object):
def __init__(self):
self.host = MockSystemHost()
self.host.executive.kill_process = lambda x: None
self.host.executive.kill_process = lambda x: None
def results_directory(self):
return "/mock-results"
def process_kill_time(self):
return 1
class MockFile(object):
def __init__(self, server_process):
self._server_process = server_process
self.closed = False
def fileno(self):
return 1
def write(self, line):
self._server_process.broken_pipes.append(self)
raise IOError
def close(self):
self.closed = True
class MockProc(object):
def __init__(self, server_process):
self.stdin = MockFile(server_process)
self.stdout = MockFile(server_process)
self.stderr = MockFile(server_process)
self.pid = 1
def poll(self):
return 1
def wait(self):
return 0
class FakeServerProcess(server_process.ServerProcess):
def _start(self):
self._proc = MockProc(self)
self.stdin = self._proc.stdin
self.stdout = self._proc.stdout
self.stderr = self._proc.stderr
self._pid = self._proc.pid
self.broken_pipes = []
class TestServerProcess(unittest.TestCase):
def test_basic(self):
cmd = [sys.executable, '-c', 'import sys; import time; time.sleep(0.02); print "stdout"; sys.stdout.flush(); print >>sys.stderr, "stderr"']
host = SystemHost()
factory = PortFactory(host)
port = factory.get()
now = time.time()
proc = server_process.ServerProcess(port, 'python', cmd)
proc.write('')
self.assertEqual(proc.poll(), None)
self.assertFalse(proc.has_crashed())
# check that doing a read after an expired deadline returns
# nothing immediately.
line = proc.read_stdout_line(now - 1)
self.assertEqual(line, None)
# FIXME: This part appears to be flaky. line should always be non-None.
# FIXME: https://bugs.webkit.org/show_bug.cgi?id=88280
line = proc.read_stdout_line(now + 1.0)
if line:
self.assertEqual(line.strip(), "stdout")
line = proc.read_stderr_line(now + 1.0)
if line:
self.assertEqual(line.strip(), "stderr")
proc.stop(0)
def test_cleanup(self):
port_obj = TrivialMockPort()
server_process = FakeServerProcess(port_obj=port_obj, name="test", cmd=["test"])
server_process._start()
server_process.stop()
self.assertTrue(server_process.stdin.closed)
self.assertTrue(server_process.stdout.closed)
self.assertTrue(server_process.stderr.closed)
def test_broken_pipe(self):
port_obj = TrivialMockPort()
port_obj.host.platform.os_name = 'win'
server_process = FakeServerProcess(port_obj=port_obj, name="test", cmd=["test"])
server_process.write("should break")
self.assertTrue(server_process.has_crashed())
self.assertIsNotNone(server_process.pid())
self.assertIsNone(server_process._proc)
self.assertEqual(server_process.broken_pipes, [server_process.stdin])
port_obj.host.platform.os_name = 'mac'
server_process = FakeServerProcess(port_obj=port_obj, name="test", cmd=["test"])
server_process.write("should break")
self.assertTrue(server_process.has_crashed())
self.assertIsNone(server_process._proc)
self.assertEqual(server_process.broken_pipes, [server_process.stdin])
class TestQuoteData(unittest.TestCase):
def test_plain(self):
qd = server_process.quote_data
self.assertEqual(qd("foo"), ["foo"])
def test_trailing_spaces(self):
qd = server_process.quote_data
self.assertEqual(qd("foo "),
["foo\x20\x20"])
def test_newlines(self):
qd = server_process.quote_data
self.assertEqual(qd("foo \nbar\n"),
["foo\x20\\n", "bar\\n"])
def test_binary_data(self):
qd = server_process.quote_data
self.assertEqual(qd("\x00\x01ab"),
["\\x00\\x01ab"])
| gpl-3.0 |
minlexx/pyevemon | esi_client/models/post_characters_character_id_fittings_internal_server_error.py | 1 | 3184 | # coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online
OpenAPI spec version: 0.4.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class PostCharactersCharacterIdFittingsInternalServerError(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, error=None):
"""
PostCharactersCharacterIdFittingsInternalServerError - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'error': 'str'
}
self.attribute_map = {
'error': 'error'
}
self._error = error
@property
def error(self):
"""
Gets the error of this PostCharactersCharacterIdFittingsInternalServerError.
Internal server error message
:return: The error of this PostCharactersCharacterIdFittingsInternalServerError.
:rtype: str
"""
return self._error
@error.setter
def error(self, error):
"""
Sets the error of this PostCharactersCharacterIdFittingsInternalServerError.
Internal server error message
:param error: The error of this PostCharactersCharacterIdFittingsInternalServerError.
:type: str
"""
self._error = error
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, PostCharactersCharacterIdFittingsInternalServerError):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| gpl-3.0 |
trondeau/gnuradio | gr-wxgui/python/wxgui/pubsub.py | 92 | 4862 | #!/usr/bin/env python
#
# Copyright 2008 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
Abstract GNU Radio publisher/subscriber interface
This is a proof of concept implementation, will likely change significantly.
"""
class pubsub(dict):
def __init__(self):
self._publishers = { }
self._subscribers = { }
self._proxies = { }
def __missing__(self, key, value=None):
dict.__setitem__(self, key, value)
self._publishers[key] = None
self._subscribers[key] = []
self._proxies[key] = None
def __setitem__(self, key, val):
if not self.has_key(key):
self.__missing__(key, val)
elif self._proxies[key] is not None:
(p, pkey) = self._proxies[key]
p[pkey] = val
else:
dict.__setitem__(self, key, val)
for sub in self._subscribers[key]:
# Note this means subscribers will get called in the thread
# context of the 'set' caller.
sub(val)
def __getitem__(self, key):
if not self.has_key(key): self.__missing__(key)
if self._proxies[key] is not None:
(p, pkey) = self._proxies[key]
return p[pkey]
elif self._publishers[key] is not None:
return self._publishers[key]()
else:
return dict.__getitem__(self, key)
def publish(self, key, publisher):
if not self.has_key(key): self.__missing__(key)
if self._proxies[key] is not None:
(p, pkey) = self._proxies[key]
p.publish(pkey, publisher)
else:
self._publishers[key] = publisher
def subscribe(self, key, subscriber):
if not self.has_key(key): self.__missing__(key)
if self._proxies[key] is not None:
(p, pkey) = self._proxies[key]
p.subscribe(pkey, subscriber)
else:
self._subscribers[key].append(subscriber)
def unpublish(self, key):
if self._proxies[key] is not None:
(p, pkey) = self._proxies[key]
p.unpublish(pkey)
else:
self._publishers[key] = None
def unsubscribe(self, key, subscriber):
if self._proxies[key] is not None:
(p, pkey) = self._proxies[key]
p.unsubscribe(pkey, subscriber)
else:
self._subscribers[key].remove(subscriber)
def proxy(self, key, p, pkey=None):
if not self.has_key(key): self.__missing__(key)
if pkey is None: pkey = key
self._proxies[key] = (p, pkey)
def unproxy(self, key):
self._proxies[key] = None
# Test code
if __name__ == "__main__":
import sys
o = pubsub()
# Non-existent key gets auto-created with None value
print "Auto-created key 'foo' value:", o['foo']
# Add some subscribers
# First is a bare function
def print_len(x):
print "len=%i" % (len(x), )
o.subscribe('foo', print_len)
# The second is a class member function
class subber(object):
def __init__(self, param):
self._param = param
def printer(self, x):
print self._param, `x`
s = subber('param')
o.subscribe('foo', s.printer)
# The third is a lambda function
o.subscribe('foo', lambda x: sys.stdout.write('val='+`x`+'\n'))
# Update key 'foo', will notify subscribers
print "Updating 'foo' with three subscribers:"
o['foo'] = 'bar';
# Remove first subscriber
o.unsubscribe('foo', print_len)
# Update now will only trigger second and third subscriber
print "Updating 'foo' after removing a subscriber:"
o['foo'] = 'bar2';
# Publish a key as a function, in this case, a lambda function
o.publish('baz', lambda : 42)
print "Published value of 'baz':", o['baz']
# Unpublish the key
o.unpublish('baz')
# This will return None, as there is no publisher
print "Value of 'baz' with no publisher:", o['baz']
# Set 'baz' key, it gets cached
o['baz'] = 'bazzz'
# Now will return cached value, since no provider
print "Cached value of 'baz' after being set:", o['baz']
| gpl-3.0 |
shanemcd/ansible | lib/ansible/playbook/handler.py | 133 | 1974 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.task import Task
class Handler(Task):
_listen = FieldAttribute(isa='list')
def __init__(self, block=None, role=None, task_include=None):
self._flagged_hosts = []
super(Handler, self).__init__(block=block, role=role, task_include=task_include)
def __repr__(self):
''' returns a human readable representation of the handler '''
return "HANDLER: %s" % self.get_name()
@staticmethod
def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
t = Handler(block=block, role=role, task_include=task_include)
return t.load_data(data, variable_manager=variable_manager, loader=loader)
def flag_for_host(self, host):
# assert instanceof(host, Host)
if host not in self._flagged_hosts:
self._flagged_hosts.append(host)
def has_triggered(self, host):
return host in self._flagged_hosts
def serialize(self):
result = super(Handler, self).serialize()
result['is_handler'] = True
return result
| gpl-3.0 |
kaplun/inspire-next | tests/integration/crossref/test_crossref_views.py | 3 | 3342 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
import json
import os
import pkg_resources
import pytest
import requests_mock
from flask_security.utils import hash_password
from invenio_accounts.models import SessionActivity, User
from invenio_accounts.testutils import login_user_via_session
from invenio_db import db
@pytest.fixture(scope='module')
def users(app):
"""Create users needed in this test module."""
scientist = User(
email='scientist@inspirehep.net',
password=hash_password('scientist'),
active=True,
)
db.session.add(scientist)
db.session.commit()
yield
User.query.filter_by(email='scientist@inspirehep.net').delete()
db.session.commit()
@pytest.fixture(scope='function')
def log_in_as_scientist(users, app_client):
"""Ensure that we're logged in as an unprivileged user."""
login_user_via_session(app_client, email='scientist@inspirehep.net')
yield
SessionActivity.query.delete()
db.session.commit()
def test_crossref_search_handles_the_response_when_the_request_is_valid(log_in_as_scientist, app_client):
with requests_mock.Mocker() as requests_mocker:
requests_mocker.register_uri(
'GET', 'http://api.crossref.org/works/10.1086/305772',
text=pkg_resources.resource_string(
__name__, os.path.join('fixtures', '10.1086_305772.json')),
)
response = app_client.get('/doi/search?doi=10.1086/305772')
assert response.status_code == 200
result = json.loads(response.data)
assert result['query'] != {}
assert result['source'] == 'crossref'
assert result['status'] == 'success'
def test_crossref_search_handles_the_response_when_the_request_asks_for_a_non_existing_doi(log_in_as_scientist, app_client):
with requests_mock.Mocker() as requests_mocker:
requests_mocker.register_uri(
'GET', 'http://api.crossref.org/works/does-not-exist',
status_code=404,
text=pkg_resources.resource_string(
__name__, os.path.join('fixtures', 'does-not-exist')),
)
response = app_client.get('/doi/search?doi=does-not-exist')
assert response.status_code == 404
result = json.loads(response.data)
assert result['query'] == {}
assert result['source'] == 'crossref'
assert result['status'] == 'notfound'
| gpl-3.0 |
auready/django | tests/custom_pk/tests.py | 31 | 7291 | from django.db import IntegrityError, transaction
from django.test import TestCase, skipIfDBFeature
from .models import Bar, Business, Employee, Foo
class BasicCustomPKTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.dan = Employee.objects.create(
employee_code=123, first_name="Dan", last_name="Jones",
)
cls.fran = Employee.objects.create(
employee_code=456, first_name="Fran", last_name="Bones",
)
cls.business = Business.objects.create(name="Sears")
cls.business.employees.add(cls.dan, cls.fran)
def test_querysets(self):
"""
Both pk and custom attribute_name can be used in filter and friends
"""
self.assertQuerysetEqual(
Employee.objects.filter(pk=123), [
"Dan Jones",
],
str
)
self.assertQuerysetEqual(
Employee.objects.filter(employee_code=123), [
"Dan Jones",
],
str
)
self.assertQuerysetEqual(
Employee.objects.filter(pk__in=[123, 456]), [
"Fran Bones",
"Dan Jones",
],
str
)
self.assertQuerysetEqual(
Employee.objects.all(), [
"Fran Bones",
"Dan Jones",
],
str
)
self.assertQuerysetEqual(
Business.objects.filter(name="Sears"), [
"Sears"
],
lambda b: b.name
)
self.assertQuerysetEqual(
Business.objects.filter(pk="Sears"), [
"Sears",
],
lambda b: b.name
)
def test_querysets_related_name(self):
"""
Custom pk doesn't affect related_name based lookups
"""
self.assertQuerysetEqual(
self.business.employees.all(), [
"Fran Bones",
"Dan Jones",
],
str
)
self.assertQuerysetEqual(
self.fran.business_set.all(), [
"Sears",
],
lambda b: b.name
)
def test_querysets_relational(self):
"""
Queries across tables, involving primary key
"""
self.assertQuerysetEqual(
Employee.objects.filter(business__name="Sears"), [
"Fran Bones",
"Dan Jones",
],
str,
)
self.assertQuerysetEqual(
Employee.objects.filter(business__pk="Sears"), [
"Fran Bones",
"Dan Jones",
],
str,
)
self.assertQuerysetEqual(
Business.objects.filter(employees__employee_code=123), [
"Sears",
],
lambda b: b.name
)
self.assertQuerysetEqual(
Business.objects.filter(employees__pk=123), [
"Sears",
],
lambda b: b.name,
)
self.assertQuerysetEqual(
Business.objects.filter(employees__first_name__startswith="Fran"), [
"Sears",
],
lambda b: b.name
)
def test_get(self):
"""
Get can accept pk or the real attribute name
"""
self.assertEqual(Employee.objects.get(pk=123), self.dan)
self.assertEqual(Employee.objects.get(pk=456), self.fran)
with self.assertRaises(Employee.DoesNotExist):
Employee.objects.get(pk=42)
# Use the name of the primary key, rather than pk.
self.assertEqual(Employee.objects.get(employee_code=123), self.dan)
def test_pk_attributes(self):
"""
pk and attribute name are available on the model
No default id attribute is added
"""
# pk can be used as a substitute for the primary key.
# The primary key can be accessed via the pk property on the model.
e = Employee.objects.get(pk=123)
self.assertEqual(e.pk, 123)
# Or we can use the real attribute name for the primary key:
self.assertEqual(e.employee_code, 123)
with self.assertRaises(AttributeError):
e.id
def test_in_bulk(self):
"""
Custom pks work with in_bulk, both for integer and non-integer types
"""
emps = Employee.objects.in_bulk([123, 456])
self.assertEqual(emps[123], self.dan)
self.assertEqual(Business.objects.in_bulk(["Sears"]), {
"Sears": self.business,
})
def test_save(self):
"""
custom pks do not affect save
"""
fran = Employee.objects.get(pk=456)
fran.last_name = "Jones"
fran.save()
self.assertQuerysetEqual(
Employee.objects.filter(last_name="Jones"), [
"Dan Jones",
"Fran Jones",
],
str
)
class CustomPKTests(TestCase):
def test_custom_pk_create(self):
"""
New objects can be created both with pk and the custom name
"""
Employee.objects.create(employee_code=1234, first_name="Foo", last_name="Bar")
Employee.objects.create(pk=1235, first_name="Foo", last_name="Baz")
Business.objects.create(name="Bears")
Business.objects.create(pk="Tears")
def test_unicode_pk(self):
# Primary key may be unicode string
Business.objects.create(name='jaźń')
def test_unique_pk(self):
# The primary key must also obviously be unique, so trying to create a
# new object with the same primary key will fail.
Employee.objects.create(
employee_code=123, first_name="Frank", last_name="Jones"
)
with self.assertRaises(IntegrityError):
with transaction.atomic():
Employee.objects.create(employee_code=123, first_name="Fred", last_name="Jones")
def test_zero_non_autoincrement_pk(self):
Employee.objects.create(
employee_code=0, first_name="Frank", last_name="Jones"
)
employee = Employee.objects.get(pk=0)
self.assertEqual(employee.employee_code, 0)
def test_custom_field_pk(self):
# Regression for #10785 -- Custom fields can be used for primary keys.
new_bar = Bar.objects.create()
new_foo = Foo.objects.create(bar=new_bar)
f = Foo.objects.get(bar=new_bar.pk)
self.assertEqual(f, new_foo)
self.assertEqual(f.bar, new_bar)
f = Foo.objects.get(bar=new_bar)
self.assertEqual(f, new_foo),
self.assertEqual(f.bar, new_bar)
# SQLite lets objects be saved with an empty primary key, even though an
# integer is expected. So we can't check for an error being raised in that
# case for SQLite. Remove it from the suite for this next bit.
@skipIfDBFeature('supports_unspecified_pk')
def test_required_pk(self):
# The primary key must be specified, so an error is raised if you
# try to create an object without it.
with self.assertRaises(IntegrityError):
with transaction.atomic():
Employee.objects.create(first_name="Tom", last_name="Smith")
| bsd-3-clause |
ouyangshiliang/hg255 | scripts/dl_cleanup.py | 131 | 5871 | #!/usr/bin/env python
"""
# OpenWRT download directory cleanup utility.
# Delete all but the very last version of the program tarballs.
#
# Copyright (c) 2010 Michael Buesch <mb@bu3sch.de>
"""
import sys
import os
import re
import getopt
# Commandline options
opt_dryrun = False
def parseVer_1234(match, filepath):
progname = match.group(1)
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
(int(match.group(4)) << 32) |\
(int(match.group(5)) << 16)
return (progname, progversion)
def parseVer_123(match, filepath):
progname = match.group(1)
try:
patchlevel = match.group(5)
except (IndexError), e:
patchlevel = None
if patchlevel:
patchlevel = ord(patchlevel[0])
else:
patchlevel = 0
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
(int(match.group(4)) << 32) |\
patchlevel
return (progname, progversion)
def parseVer_12(match, filepath):
progname = match.group(1)
try:
patchlevel = match.group(4)
except (IndexError), e:
patchlevel = None
if patchlevel:
patchlevel = ord(patchlevel[0])
else:
patchlevel = 0
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
patchlevel
return (progname, progversion)
def parseVer_r(match, filepath):
progname = match.group(1)
progversion = (int(match.group(2)) << 64)
return (progname, progversion)
def parseVer_ymd(match, filepath):
progname = match.group(1)
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
(int(match.group(4)) << 32)
return (progname, progversion)
def parseVer_GIT(match, filepath):
progname = match.group(1)
st = os.stat(filepath)
progversion = int(st.st_mtime) << 64
return (progname, progversion)
extensions = (
".tar.gz",
".tar.bz2",
".orig.tar.gz",
".orig.tar.bz2",
".zip",
".tgz",
".tbz",
)
versionRegex = (
(re.compile(r"(.+)[-_]([0-9a-fA-F]{40,40})"), parseVer_GIT), # xxx-GIT_SHASUM
(re.compile(r"(.+)[-_](\d+)\.(\d+)\.(\d+)\.(\d+)"), parseVer_1234), # xxx-1.2.3.4
(re.compile(r"(.+)[-_](\d\d\d\d)-?(\d\d)-?(\d\d)"), parseVer_ymd), # xxx-YYYY-MM-DD
(re.compile(r"(.+)[-_](\d+)\.(\d+)\.(\d+)(\w?)"), parseVer_123), # xxx-1.2.3a
(re.compile(r"(.+)[-_](\d+)_(\d+)_(\d+)"), parseVer_123), # xxx-1_2_3
(re.compile(r"(.+)[-_](\d+)\.(\d+)(\w?)"), parseVer_12), # xxx-1.2a
(re.compile(r"(.+)[-_]r?(\d+)"), parseVer_r), # xxx-r1111
)
blacklist = [
("linux", re.compile(r"linux-.*")),
("gcc", re.compile(r"gcc-.*")),
("wl_apsta", re.compile(r"wl_apsta.*")),
(".fw", re.compile(r".*\.fw")),
(".arm", re.compile(r".*\.arm")),
(".bin", re.compile(r".*\.bin")),
("rt-firmware", re.compile(r"RT[\d\w]+_Firmware.*")),
]
class EntryParseError(Exception): pass
class Entry:
def __init__(self, directory, filename):
self.directory = directory
self.filename = filename
self.progname = ""
self.fileext = ""
for ext in extensions:
if filename.endswith(ext):
filename = filename[0:0-len(ext)]
self.fileext = ext
break
else:
print self.filename, "has an unknown file-extension"
raise EntryParseError("ext")
for (regex, parseVersion) in versionRegex:
match = regex.match(filename)
if match:
(self.progname, self.version) = parseVersion(
match, directory + "/" + filename + self.fileext)
break
else:
print self.filename, "has an unknown version pattern"
raise EntryParseError("ver")
def deleteFile(self):
path = (self.directory + "/" + self.filename).replace("//", "/")
print "Deleting", path
if not opt_dryrun:
os.unlink(path)
def __eq__(self, y):
return self.filename == y.filename
def __ge__(self, y):
return self.version >= y.version
def usage():
print "OpenWRT download directory cleanup utility"
print "Usage: " + sys.argv[0] + " [OPTIONS] <path/to/dl>"
print ""
print " -d|--dry-run Do a dry-run. Don't delete any files"
print " -B|--show-blacklist Show the blacklist and exit"
print " -w|--whitelist ITEM Remove ITEM from blacklist"
def main(argv):
global opt_dryrun
try:
(opts, args) = getopt.getopt(argv[1:],
"hdBw:",
[ "help", "dry-run", "show-blacklist", "whitelist=", ])
if len(args) != 1:
usage()
return 1
except getopt.GetoptError:
usage()
return 1
directory = args[0]
for (o, v) in opts:
if o in ("-h", "--help"):
usage()
return 0
if o in ("-d", "--dry-run"):
opt_dryrun = True
if o in ("-w", "--whitelist"):
for i in range(0, len(blacklist)):
(name, regex) = blacklist[i]
if name == v:
del blacklist[i]
break
else:
print "Whitelist error: Item", v,\
"is not in blacklist"
return 1
if o in ("-B", "--show-blacklist"):
for (name, regex) in blacklist:
print name
return 0
# Create a directory listing and parse the file names.
entries = []
for filename in os.listdir(directory):
if filename == "." or filename == "..":
continue
for (name, regex) in blacklist:
if regex.match(filename):
if opt_dryrun:
print filename, "is blacklisted"
break
else:
try:
entries.append(Entry(directory, filename))
except (EntryParseError), e: pass
# Create a map of programs
progmap = {}
for entry in entries:
if entry.progname in progmap.keys():
progmap[entry.progname].append(entry)
else:
progmap[entry.progname] = [entry,]
# Traverse the program map and delete everything but the last version
for prog in progmap:
lastVersion = None
versions = progmap[prog]
for version in versions:
if lastVersion is None or version >= lastVersion:
lastVersion = version
if lastVersion:
for version in versions:
if version != lastVersion:
version.deleteFile()
if opt_dryrun:
print "Keeping", lastVersion.filename
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| gpl-2.0 |
xingyepei/edx-platform | cms/djangoapps/contentstore/views/tests/test_course_updates.py | 117 | 14061 | """
unit tests for course_info views and models.
"""
import json
from mock import patch
from django.test.utils import override_settings
from contentstore.models import PushNotificationConfig
from contentstore.tests.test_course_settings import CourseTestCase
from contentstore.utils import reverse_course_url, reverse_usage_url
from opaque_keys.edx.keys import UsageKey
from xmodule.modulestore.django import modulestore
class CourseUpdateTest(CourseTestCase):
def create_update_url(self, provided_id=None, course_key=None):
if course_key is None:
course_key = self.course.id
kwargs = {'provided_id': str(provided_id)} if provided_id else None
return reverse_course_url('course_info_update_handler', course_key, kwargs=kwargs)
# The do all and end all of unit test cases.
def test_course_update(self):
"""Go through each interface and ensure it works."""
def get_response(content, date):
"""
Helper method for making call to server and returning response.
Does not supply a provided_id.
"""
payload = {'content': content, 'date': date}
url = self.create_update_url()
resp = self.client.ajax_post(url, payload)
self.assertContains(resp, '', status_code=200)
return json.loads(resp.content)
resp = self.client.get_html(
reverse_course_url('course_info_handler', self.course.id)
)
self.assertContains(resp, 'Course Updates', status_code=200)
init_content = '<iframe width="560" height="315" src="http://www.youtube.com/embed/RocY-Jd93XU" frameborder="0">'
content = init_content + '</iframe>'
payload = get_response(content, 'January 8, 2013')
self.assertHTMLEqual(payload['content'], content)
first_update_url = self.create_update_url(provided_id=payload['id'])
content += '<div>div <p>p<br/></p></div>'
payload['content'] = content
# POST requests were coming in w/ these header values causing an error; so, repro error here
resp = self.client.ajax_post(
first_update_url, payload, HTTP_X_HTTP_METHOD_OVERRIDE="PUT", REQUEST_METHOD="POST"
)
self.assertHTMLEqual(content, json.loads(resp.content)['content'],
"iframe w/ div")
# refetch using provided id
refetched = self.client.get_json(first_update_url)
self.assertHTMLEqual(
content, json.loads(refetched.content)['content'], "get w/ provided id"
)
# now put in an evil update
content = '<ol/>'
payload = get_response(content, 'January 11, 2013')
self.assertHTMLEqual(content, payload['content'], "self closing ol")
course_update_url = self.create_update_url()
resp = self.client.get_json(course_update_url)
payload = json.loads(resp.content)
self.assertTrue(len(payload) == 2)
# try json w/o required fields
self.assertContains(
self.client.ajax_post(course_update_url, {'garbage': 1}),
'Failed to save', status_code=400
)
# test an update with text in the tail of the header
content = 'outside <strong>inside</strong> after'
payload = get_response(content, 'June 22, 2000')
self.assertHTMLEqual(content, payload['content'], "text outside tag")
# now try to update a non-existent update
content = 'blah blah'
payload = {'content': content, 'date': 'January 21, 2013'}
self.assertContains(
self.client.ajax_post(course_update_url + '9', payload),
'Failed to save', status_code=400
)
# update w/ malformed html
content = '<garbage tag No closing brace to force <span>error</span>'
payload = {'content': content,
'date': 'January 11, 2013'}
self.assertContains(
self.client.ajax_post(course_update_url, payload),
'<garbage'
)
# set to valid html which would break an xml parser
content = "<p><br><br></p>"
payload = get_response(content, 'January 11, 2013')
self.assertHTMLEqual(content, payload['content'])
# now try to delete a non-existent update
self.assertContains(self.client.delete(course_update_url + '19'), "delete", status_code=400)
# now delete a real update
content = 'blah blah'
payload = get_response(content, 'January 28, 2013')
this_id = payload['id']
self.assertHTMLEqual(content, payload['content'], "single iframe")
# first count the entries
resp = self.client.get_json(course_update_url)
payload = json.loads(resp.content)
before_delete = len(payload)
url = self.create_update_url(provided_id=this_id)
resp = self.client.delete(url)
payload = json.loads(resp.content)
self.assertTrue(len(payload) == before_delete - 1)
def test_course_updates_compatibility(self):
'''
Test that course updates doesn't break on old data (content in 'data' field).
Note: new data will save as list in 'items' field.
'''
# get the updates and populate 'data' field with some data.
location = self.course.id.make_usage_key('course_info', 'updates')
course_updates = modulestore().create_item(
self.user.id,
location.course_key,
location.block_type,
block_id=location.block_id
)
update_date = u"January 23, 2014"
update_content = u"Hello world!"
update_data = u"<ol><li><h2>" + update_date + "</h2>" + update_content + "</li></ol>"
course_updates.data = update_data
modulestore().update_item(course_updates, self.user.id)
# test getting all updates list
course_update_url = self.create_update_url()
resp = self.client.get_json(course_update_url)
payload = json.loads(resp.content)
self.assertEqual(payload, [{u'date': update_date, u'content': update_content, u'id': 1}])
self.assertTrue(len(payload) == 1)
# test getting single update item
first_update_url = self.create_update_url(provided_id=payload[0]['id'])
resp = self.client.get_json(first_update_url)
payload = json.loads(resp.content)
self.assertEqual(payload, {u'date': u'January 23, 2014', u'content': u'Hello world!', u'id': 1})
self.assertHTMLEqual(update_date, payload['date'])
self.assertHTMLEqual(update_content, payload['content'])
# test that while updating it converts old data (with string format in 'data' field)
# to new data (with list format in 'items' field) and respectively updates 'data' field.
course_updates = modulestore().get_item(location)
self.assertEqual(course_updates.items, [])
# now try to update first update item
update_content = 'Testing'
payload = {'content': update_content, 'date': update_date}
resp = self.client.ajax_post(
course_update_url + '1', payload, HTTP_X_HTTP_METHOD_OVERRIDE="PUT", REQUEST_METHOD="POST"
)
self.assertHTMLEqual(update_content, json.loads(resp.content)['content'])
course_updates = modulestore().get_item(location)
self.assertEqual(course_updates.items, [{u'date': update_date, u'content': update_content, u'id': 1}])
# course_updates 'data' field should update accordingly
update_data = u"<section><article><h2>{date}</h2>{content}</article></section>".format(date=update_date, content=update_content)
self.assertEqual(course_updates.data, update_data)
# test delete course update item (soft delete)
course_updates = modulestore().get_item(location)
self.assertEqual(course_updates.items, [{u'date': update_date, u'content': update_content, u'id': 1}])
# now try to delete first update item
resp = self.client.delete(course_update_url + '1')
self.assertEqual(json.loads(resp.content), [])
# confirm that course update is soft deleted ('status' flag set to 'deleted') in db
course_updates = modulestore().get_item(location)
self.assertEqual(course_updates.items,
[{u'date': update_date, u'content': update_content, u'id': 1, u'status': 'deleted'}])
# now try to get deleted update
resp = self.client.get_json(course_update_url + '1')
payload = json.loads(resp.content)
self.assertEqual(payload.get('error'), u"Course update not found.")
self.assertEqual(resp.status_code, 404)
# now check that course update don't munges html
update_content = u"""<problem>
<p></p>
<multiplechoiceresponse>
<pre><problem>
<p></p></pre>
<div><foo>bar</foo></div>"""
payload = {'content': update_content, 'date': update_date}
resp = self.client.ajax_post(
course_update_url, payload, REQUEST_METHOD="POST"
)
self.assertHTMLEqual(update_content, json.loads(resp.content)['content'])
def test_no_ol_course_update(self):
'''Test trying to add to a saved course_update which is not an ol.'''
# get the updates and set to something wrong
location = self.course.id.make_usage_key('course_info', 'updates')
modulestore().create_item(
self.user.id,
location.course_key,
location.block_type,
block_id=location.block_id
)
course_updates = modulestore().get_item(location)
course_updates.data = 'bad news'
modulestore().update_item(course_updates, self.user.id)
init_content = '<iframe width="560" height="315" src="http://www.youtube.com/embed/RocY-Jd93XU" frameborder="0">'
content = init_content + '</iframe>'
payload = {'content': content, 'date': 'January 8, 2013'}
course_update_url = self.create_update_url()
resp = self.client.ajax_post(course_update_url, payload)
payload = json.loads(resp.content)
self.assertHTMLEqual(payload['content'], content)
# now confirm that the bad news and the iframe make up single update
resp = self.client.get_json(course_update_url)
payload = json.loads(resp.content)
self.assertTrue(len(payload) == 1)
def post_course_update(self, send_push_notification=False):
"""
Posts an update to the course
"""
course_update_url = self.create_update_url(course_key=self.course.id)
# create a course via the view handler
self.client.ajax_post(course_update_url)
content = u"Sample update"
payload = {'content': content, 'date': 'January 8, 2013'}
if send_push_notification:
payload['push_notification_selected'] = True
resp = self.client.ajax_post(course_update_url, payload)
# check that response status is 200 not 400
self.assertEqual(resp.status_code, 200)
payload = json.loads(resp.content)
self.assertHTMLEqual(payload['content'], content)
@patch("contentstore.push_notification.send_push_course_update")
def test_post_course_update(self, mock_push_update):
"""
Test that a user can successfully post on course updates and handouts of a course
"""
self.post_course_update()
# check that push notifications are not sent
self.assertFalse(mock_push_update.called)
updates_location = self.course.id.make_usage_key('course_info', 'updates')
self.assertTrue(isinstance(updates_location, UsageKey))
self.assertEqual(updates_location.name, u'updates')
# check posting on handouts
handouts_location = self.course.id.make_usage_key('course_info', 'handouts')
course_handouts_url = reverse_usage_url('xblock_handler', handouts_location)
content = u"Sample handout"
payload = {'data': content}
resp = self.client.ajax_post(course_handouts_url, payload)
# check that response status is 200 not 500
self.assertEqual(resp.status_code, 200)
payload = json.loads(resp.content)
self.assertHTMLEqual(payload['data'], content)
@patch("contentstore.push_notification.send_push_course_update")
def test_notifications_enabled_but_not_requested(self, mock_push_update):
PushNotificationConfig(enabled=True).save()
self.post_course_update()
self.assertFalse(mock_push_update.called)
@patch("contentstore.push_notification.send_push_course_update")
def test_notifications_enabled_and_sent(self, mock_push_update):
PushNotificationConfig(enabled=True).save()
self.post_course_update(send_push_notification=True)
self.assertTrue(mock_push_update.called)
@override_settings(PARSE_KEYS={"APPLICATION_ID": "TEST_APPLICATION_ID", "REST_API_KEY": "TEST_REST_API_KEY"})
@patch("contentstore.push_notification.Push")
def test_notifications_sent_to_parse(self, mock_parse_push):
PushNotificationConfig(enabled=True).save()
self.post_course_update(send_push_notification=True)
self.assertEquals(mock_parse_push.alert.call_count, 2)
@override_settings(PARSE_KEYS={"APPLICATION_ID": "TEST_APPLICATION_ID", "REST_API_KEY": "TEST_REST_API_KEY"})
@patch("contentstore.push_notification.log_exception")
@patch("contentstore.push_notification.Push")
def test_notifications_error_from_parse(self, mock_parse_push, mock_log_exception):
PushNotificationConfig(enabled=True).save()
from parse_rest.core import ParseError
mock_parse_push.alert.side_effect = ParseError
self.post_course_update(send_push_notification=True)
self.assertTrue(mock_log_exception.called)
| agpl-3.0 |
dombrno/PG | Source/jugfile.py | 1 | 2178 | import numpy as np
import sys
from jug import TaskGenerator
from os.path import expanduser
HOME = expanduser("~")
if "storage" in HOME:
HOME = "/storage/home/geffroy"
sys.path.append(HOME + "/Code/PG/Source")
from phase_fluctuations import DWaveModel
from MCMC import MCMCDriver
# pylint: disable=E1101
N_RUNS = 32
TARGET_SNAPSHOTS = 32
TEMPERATURES = [275.0, 320.0, 360.0, 395.0,
430.0, 470.0, 490.0, 550.0, 575.0, 625.0]
THERMALIZATION_STEPS = 150
ROOT_PHASE_SEED = 123456789
ROOT_MC_SEED = 234567
OBSERVABLE_NAME = "correlation_length"
OBSERVABLE_NAME2 = "DOS"
MC_SEEDS = [ROOT_MC_SEED + i for i in np.arange(N_RUNS)]
PHASE_SEEDS = [ROOT_PHASE_SEED + i for i in np.arange(N_RUNS)]
COMPUTATION_PARAMS = []
for i in range(N_RUNS):
for temp in TEMPERATURES:
COMPUTATION_PARAMS.append({"mc_seed": MC_SEEDS[i],
"phase_seed": PHASE_SEEDS[i],
"temperature": temp})
@TaskGenerator
def get_result(in_phase_seed, in_mc_seed, in_temperature):
"""generic task generator constructor"""
t_cst = 0.25
bcs_params = {"width": 32, "chem_potential": 0.0,
"hopping_constant": t_cst, "J_constant": 0.1 * t_cst / 0.89,
"g_constant": 0.25, "delta": 1.0 * t_cst, "use_assaad": True,
"uniform_phase": False, "temperature": in_temperature,
"seed": in_phase_seed}
my_model = DWaveModel(bcs_params)
mc_params = {"seed": in_mc_seed, "intervals": 5,
"target_snapshots": TARGET_SNAPSHOTS,
"observable_list": [OBSERVABLE_NAME, OBSERVABLE_NAME2],
"algorithm": "cluster"}
my_driver = MCMCDriver(my_model, mc_params)
my_driver.thermalize(THERMALIZATION_STEPS)
my_driver.execute()
return my_driver.result
@TaskGenerator
def join(partials):
"""Put all the results together"""
return np.array([my_elem for my_elem in partials])
fullresults = join([get_result(elem["phase_seed"],
elem["mc_seed"],
elem["temperature"])
for elem in COMPUTATION_PARAMS])
| bsd-2-clause |
rahul67/hue | apps/sqoop/src/sqoop/api/job.py | 25 | 7728 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from django.utils.encoding import smart_str
from django.utils.translation import ugettext as _
from django.views.decorators.cache import never_cache
from sqoop import client, conf
from sqoop.client.exception import SqoopException
from decorators import get_job_or_exception
from desktop.lib.django_util import JsonResponse
from desktop.lib.exceptions import StructuredException
from desktop.lib.rest.http_client import RestException
from exception import handle_rest_exception
from utils import list_to_dict
__all__ = ['get_jobs', 'create_job', 'update_job', 'job', 'jobs', 'job_clone', 'job_delete', 'job_start', 'job_stop', 'job_status']
LOG = logging.getLogger(__name__)
@never_cache
def get_jobs(request):
response = {
'status': 0,
'errors': None,
'jobs': []
}
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE)
jobs = c.get_jobs()
response['jobs'] = list_to_dict(jobs)
except RestException, e:
response.update(handle_rest_exception(e, _('Could not get jobs.')))
return JsonResponse(response)
@never_cache
def create_job(request):
if request.method != 'POST':
raise StructuredException(code="INVALID_METHOD", message=_('POST request required.'), error_code=405)
response = {
'status': 0,
'errors': None,
'job': None
}
if 'job' not in request.POST:
raise StructuredException(code="INVALID_REQUEST_ERROR", message=_('Error saving job'), data={'errors': 'job is missing.'}, error_code=400)
d = json.loads(smart_str(request.POST['job']))
job = client.Job.from_dict(d)
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE)
response['job'] = c.create_job(job).to_dict()
except RestException, e:
response.update(handle_rest_exception(e, _('Could not create job.')))
except SqoopException, e:
response['status'] = 100
response['errors'] = e.to_dict()
return JsonResponse(response)
@never_cache
def update_job(request, job):
if request.method != 'POST':
raise StructuredException(code="INVALID_METHOD", message=_('POST request required.'), error_code=405)
response = {
'status': 0,
'errors': None,
'job': None
}
if 'job' not in request.POST:
raise StructuredException(code="INVALID_REQUEST_ERROR", message=_('Error saving job'), data={'errors': 'job is missing.'}, error_code=400)
job.update_from_dict(json.loads(smart_str(request.POST['job'])))
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE)
response['job'] = c.update_job(job).to_dict()
except RestException, e:
response.update(handle_rest_exception(e, _('Could not update job.')))
except SqoopException, e:
response['status'] = 100
response['errors'] = e.to_dict()
return JsonResponse(response)
@never_cache
def jobs(request):
if request.method == 'GET':
return get_jobs(request)
elif request.method == 'POST':
return create_job(request)
else:
raise StructuredException(code="INVALID_METHOD", message=_('GET or POST request required.'), error_code=405)
@never_cache
@get_job_or_exception()
def job(request, job):
response = {
'status': 0,
'errors': None,
'job': None
}
if request.method == 'GET':
response['job'] = job.to_dict()
return JsonResponse(response)
elif request.method == 'POST':
return update_job(request, job)
else:
raise StructuredException(code="INVALID_METHOD", message=_('GET or POST request required.'), error_code=405)
@never_cache
@get_job_or_exception()
def job_clone(request, job):
if request.method != 'POST':
raise StructuredException(code="INVALID_METHOD", message=_('POST request required.'), error_code=405)
response = {
'status': 0,
'errors': None,
'job': None
}
job.id = -1
job.name = '%s-copy' % job.name
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE)
response['job'] = c.create_job(job).to_dict()
except RestException, e:
response.update(handle_rest_exception(e, _('Could not clone job.')))
except SqoopException, e:
response['status'] = 100
response['errors'] = e.to_dict()
return JsonResponse(response)
@never_cache
@get_job_or_exception()
def job_delete(request, job):
if request.method != 'POST':
raise StructuredException(code="INVALID_METHOD", message=_('POST request required.'), error_code=405)
response = {
'status': 0,
'errors': None,
'job': None
}
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE)
c.delete_job(job)
except RestException, e:
response.update(handle_rest_exception(e, _('Could not delete job.')))
except SqoopException, e:
response['status'] = 100
response['errors'] = e.to_dict()
return JsonResponse(response)
@never_cache
@get_job_or_exception()
def job_start(request, job):
if request.method != 'POST':
raise StructuredException(code="INVALID_METHOD", message=_('POST request required.'), error_code=405)
response = {
'status': 0,
'errors': None,
'submission': None
}
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE)
response['submission'] = c.start_job(job).to_dict()
except RestException, e:
response.update(handle_rest_exception(e, _('Could not start job.')))
except SqoopException, e:
response['status'] = 100
response['errors'] = [e.to_dict()]
return JsonResponse(response)
@never_cache
@get_job_or_exception()
def job_stop(request, job):
if request.method != 'POST':
raise StructuredException(code="INVALID_METHOD", message=_('POST request required.'), error_code=405)
response = {
'status': 0,
'errors': None,
'submission': None
}
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE)
response['submission'] = c.stop_job(job).to_dict()
except RestException, e:
response.update(handle_rest_exception(e, _('Could not stop job.')))
except SqoopException, e:
response['status'] = 100
response['errors'] = e.to_dict()
return JsonResponse(response)
@never_cache
@get_job_or_exception()
def job_status(request, job):
if request.method != 'GET':
raise StructuredException(code="INVALID_METHOD", message=_('GET request required.'), error_code=405)
response = {
'status': 0,
'errors': None,
'submission': None
}
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE)
response['submission'] = c.get_job_status(job).to_dict()
except RestException, e:
response.update(handle_rest_exception(e, _('Could not get job status.')))
except SqoopException, e:
response['status'] = 100
response['errors'] = e.to_dict()
return JsonResponse(response)
| apache-2.0 |
JiscPER/magnificent-octopus | octopus/modules/store/store.py | 2 | 7571 | from octopus.core import app
from octopus.lib import plugin
import os, shutil, codecs, requests
class StoreException(Exception):
pass
class StoreFactory(object):
@classmethod
def get(cls):
"""
Returns an implementation of the base Store class
"""
si = app.config.get("STORE_IMPL")
sm = plugin.load_class(si)
return sm()
@classmethod
def tmp(cls):
"""
Returns an implementation of the base Store class which should be able
to provide local temp storage to the app. In addition to the methods supplied
by Store, it must also provide a "path" function to give the path on-disk to
the file
"""
si = app.config.get("STORE_TMP_IMPL")
sm = plugin.load_class(si)
return sm()
class Store(object):
def store(self, container_id, target_name, source_path=None, source_stream=None):
pass
def exists(self, container_id):
return False
def list(self, container_id):
pass
def get(self, container_id, target_name):
return None
def delete(self, container_id, target_name=None):
pass
class StoreLocal(Store):
"""
Primitive local storage system. Use this for testing in place of remote store
"""
def __init__(self):
self.dir = app.config.get("STORE_LOCAL_DIR")
if self.dir is None:
raise StoreException("STORE_LOCAL_DIR is not defined in config")
def store(self, container_id, target_name, source_path=None, source_stream=None):
cpath = os.path.join(self.dir, container_id)
if not os.path.exists(cpath):
os.makedirs(cpath)
tpath = os.path.join(cpath, target_name)
if source_path:
shutil.copyfile(source_path, tpath)
elif source_stream:
with codecs.open(tpath, "wb") as f:
f.write(source_stream.read())
def exists(self, container_id):
cpath = os.path.join(self.dir, container_id)
return os.path.exists(cpath) and os.path.isdir(cpath)
def list(self, container_id):
cpath = os.path.join(self.dir, container_id)
return os.listdir(cpath)
def get(self, container_id, target_name):
cpath = os.path.join(self.dir, container_id, target_name)
if os.path.exists(cpath) and os.path.isfile(cpath):
f = codecs.open(cpath, "r")
return f
def delete(self, container_id, target_name=None):
cpath = os.path.join(self.dir, container_id)
if target_name is not None:
cpath = os.path.join(cpath, target_name)
if os.path.exists(cpath):
if os.path.isfile(cpath):
os.remove(cpath)
else:
shutil.rmtree(cpath)
class StoreJper(Store):
# to update this, it is in octopus so go into octopus then pull. then merge if necessary.
# then push these changes to octopus develop. then go back up to jper and it should show the commit of octopus has changed
# so then commit jper again.
def __init__(self):
self.url = app.config.get("STORE_JPER_URL")
if self.url is None:
raise StoreException("STORE_JPER_URL is not defined in config")
def store(self, container_id, target_name, source_path=None, source_stream=None):
cpath = os.path.join(self.url, container_id)
r = requests.get(cpath)
if r.status_code != 200:
requests.put(cpath)
try:
app.logger.info('Store - Container:' + container_id + ' ' + cpath + ' container to be created ' + str(r.status_code))
except:
pass
else:
try:
app.logger.info('Store - Container:' + container_id + ' ' + cpath + ' container already exists ' + str(r.status_code))
except:
pass
tpath = os.path.join(cpath, target_name)
if source_path is not None:
try:
app.logger.info('Store - Container:' + container_id + ' attempting to save source path to ' + tpath)
except:
pass
with open(source_path,'rb') as payload:
#headers = {'content-type': 'application/x-www-form-urlencoded'}
#r = requests.post(tpath, data=payload, verify=False, headers=headers)
r = requests.post(tpath, files={'file': payload})
elif source_stream is not None:
try:
app.logger.info('Store - Container:' + container_id + ' attempting to save source stream to ' + tpath)
except:
pass
#headers = {'content-type': 'application/x-www-form-urlencoded'}
#r = requests.post(tpath, data=source_stream, verify=False, headers=headers)
r = requests.post(tpath, files={'file': source_stream})
try:
app.logger.info('Store - Container:' + container_id + ' ' + tpath + ' request resulted in ' + str(r.status_code))
except:
pass
def exists(self, container_id):
cpath = os.path.join(self.url, container_id)
r = requests.get(cpath)
try:
app.logger.info('Store - Container:' + container_id + ' checking existence ' + str(r.status_code))
except:
pass
if r.status_code == 200:
try:
listing = r.json()
return isinstance(listing,list)
except:
return False
return True
else:
return False
def list(self, container_id):
cpath = os.path.join(self.url, container_id)
r = requests.get(cpath)
try:
app.logger.info('Store - Container:' + container_id + ' listing requested and returned')
except:
pass
try:
return r.json()
except:
return []
def get(self, container_id, target_name):
cpath = os.path.join(self.url, container_id, target_name)
r = requests.get(cpath, stream=True)
if r.status_code == 200:
try:
app.logger.info('Store - Container:' + container_id + ' ' + cpath + ' retrieved and returning raw')
except:
pass
return r.raw
else:
try:
app.logger.info('Store - Container:' + container_id + ' ' + cpath + ' could not be retrieved')
except:
pass
return False
def delete(self, container_id, target_name=None):
cpath = os.path.join(self.url, container_id)
if target_name is not None:
cpath = os.path.join(cpath, target_name)
try:
app.logger.info('Store - Container:' + container_id + ' ' + cpath + ' is being deleted')
except:
pass
requests.delete(cpath)
class TempStore(StoreLocal):
def __init__(self):
self.dir = app.config.get("STORE_TMP_DIR")
if self.dir is None:
raise StoreException("STORE_TMP_DIR is not defined in config")
def path(self, container_id, filename, must_exist=True):
fpath = os.path.join(self.dir, container_id, filename)
if not os.path.exists(fpath) and must_exist:
raise StoreException("Unable to create path for container {x}, file {y}".format(x=container_id, y=filename))
return fpath
def list_container_ids(self):
return [x for x in os.listdir(self.dir) if os.path.isdir(os.path.join(self.dir, x))]
| apache-2.0 |
awkspace/ansible | lib/ansible/plugins/callback/osx_say.py | 10 | 2870 | # (c) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: osx_say
type: notification
requirements:
- whitelisting in configuration
- the '/usr/bin/say' command line program (standard on macOS)
short_description: oneline Ansible screen output
version_added: historical
description:
- This plugin will use the 'say' program to "speak" about play events.
'''
import subprocess
import os
from ansible.plugins.callback import CallbackBase
FAILED_VOICE = "Zarvox"
REGULAR_VOICE = "Trinoids"
HAPPY_VOICE = "Cellos"
LASER_VOICE = "Princess"
SAY_CMD = "/usr/bin/say"
class CallbackModule(CallbackBase):
"""
makes Ansible much more exciting on macOS.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'osx_say'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self):
super(CallbackModule, self).__init__()
# plugin disable itself if say is not present
# ansible will not call any callback if disabled is set to True
if not os.path.exists(SAY_CMD):
self.disabled = True
self._display.warning("%s does not exist, plugin %s disabled" % (SAY_CMD, os.path.basename(__file__)))
def say(self, msg, voice):
subprocess.call([SAY_CMD, msg, "--voice=%s" % (voice)])
def runner_on_failed(self, host, res, ignore_errors=False):
self.say("Failure on host %s" % host, FAILED_VOICE)
def runner_on_ok(self, host, res):
self.say("pew", LASER_VOICE)
def runner_on_skipped(self, host, item=None):
self.say("pew", LASER_VOICE)
def runner_on_unreachable(self, host, res):
self.say("Failure on host %s" % host, FAILED_VOICE)
def runner_on_async_ok(self, host, res, jid):
self.say("pew", LASER_VOICE)
def runner_on_async_failed(self, host, res, jid):
self.say("Failure on host %s" % host, FAILED_VOICE)
def playbook_on_start(self):
self.say("Running Playbook", REGULAR_VOICE)
def playbook_on_notify(self, host, handler):
self.say("pew", LASER_VOICE)
def playbook_on_task_start(self, name, is_conditional):
if not is_conditional:
self.say("Starting task: %s" % name, REGULAR_VOICE)
else:
self.say("Notifying task: %s" % name, REGULAR_VOICE)
def playbook_on_setup(self):
self.say("Gathering facts", REGULAR_VOICE)
def playbook_on_play_start(self, name):
self.say("Starting play: %s" % name, HAPPY_VOICE)
def playbook_on_stats(self, stats):
self.say("Play complete", HAPPY_VOICE)
| gpl-3.0 |
CS-SI/QGIS | tests/src/python/test_provider_oracle.py | 11 | 7039 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for the Oracle provider.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '2016-07-06'
__copyright__ = 'Copyright 2016, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
import os
from qgis.core import QgsSettings, QgsVectorLayer, QgsFeatureRequest, NULL
from qgis.PyQt.QtCore import QDate, QTime, QDateTime, QVariant
from utilities import unitTestDataPath
from qgis.testing import start_app, unittest
from providertestbase import ProviderTestCase
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestPyQgsOracleProvider(unittest.TestCase, ProviderTestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
cls.dbconn = "host=localhost port=1521 user='QGIS' password='qgis'"
if 'QGIS_ORACLETEST_DB' in os.environ:
cls.dbconn = os.environ['QGIS_ORACLETEST_DB']
# Create test layers
cls.vl = QgsVectorLayer(
cls.dbconn + ' sslmode=disable key=\'pk\' srid=4326 type=POINT table="QGIS"."SOME_DATA" (GEOM) sql=', 'test', 'oracle')
assert(cls.vl.isValid())
cls.source = cls.vl.dataProvider()
cls.poly_vl = QgsVectorLayer(
cls.dbconn + ' sslmode=disable key=\'pk\' srid=4326 type=POLYGON table="QGIS"."SOME_POLY_DATA" (GEOM) sql=', 'test', 'oracle')
assert(cls.poly_vl.isValid())
cls.poly_provider = cls.poly_vl.dataProvider()
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
def enableCompiler(self):
QgsSettings().setValue('/qgis/compileExpressions', True)
def disableCompiler(self):
QgsSettings().setValue('/qgis/compileExpressions', False)
def uncompiledFilters(self):
filters = set([
'(name = \'Apple\') is not null',
'"name" || \' \' || "name" = \'Orange Orange\'',
'"name" || \' \' || "cnt" = \'Orange 100\'',
'\'x\' || "name" IS NOT NULL',
'\'x\' || "name" IS NULL',
'false and NULL',
'true and NULL',
'NULL and false',
'NULL and true',
'NULL and NULL',
'false or NULL',
'true or NULL',
'NULL or false',
'NULL or true',
'NULL or NULL',
'not null',
'sqrt(pk) >= 2',
'radians(cnt) < 2',
'degrees(pk) <= 200',
'abs(cnt) <= 200',
'cos(pk) < 0',
'sin(pk) < 0',
'tan(pk) < 0',
'acos(-1) < pk',
'asin(1) < pk',
'atan(3.14) < pk',
'atan2(3.14, pk) < 1',
'exp(pk) < 10',
'ln(pk) <= 1',
'log(3, pk) <= 1',
'log10(pk) < 0.5',
'round(3.14) <= pk',
'floor(3.14) <= pk',
'ceil(3.14) <= pk',
'pk < pi()',
'round(cnt / 66.67) <= 2',
'floor(cnt / 66.67) <= 2',
'ceil(cnt / 66.67) <= 2',
'pk < pi() / 2',
'x($geometry) < -70',
'y($geometry) > 70',
'xmin($geometry) < -70',
'ymin($geometry) > 70',
'xmax($geometry) < -70',
'ymax($geometry) > 70',
'disjoint($geometry,geom_from_wkt( \'Polygon ((-72.2 66.1, -65.2 66.1, -65.2 72.0, -72.2 72.0, -72.2 66.1))\'))',
'intersects($geometry,geom_from_wkt( \'Polygon ((-72.2 66.1, -65.2 66.1, -65.2 72.0, -72.2 72.0, -72.2 66.1))\'))',
'contains(geom_from_wkt( \'Polygon ((-72.2 66.1, -65.2 66.1, -65.2 72.0, -72.2 72.0, -72.2 66.1))\'),$geometry)',
'distance($geometry,geom_from_wkt( \'Point (-70 70)\')) > 7',
'intersects($geometry,geom_from_gml( \'<gml:Polygon srsName="EPSG:4326"><gml:outerBoundaryIs><gml:LinearRing><gml:coordinates>-72.2,66.1 -65.2,66.1 -65.2,72.0 -72.2,72.0 -72.2,66.1</gml:coordinates></gml:LinearRing></gml:outerBoundaryIs></gml:Polygon>\'))',
'x($geometry) < -70',
'y($geometry) > 79',
'xmin($geometry) < -70',
'ymin($geometry) < 76',
'xmax($geometry) > -68',
'ymax($geometry) > 80',
'area($geometry) > 10',
'perimeter($geometry) < 12',
'relate($geometry,geom_from_wkt( \'Polygon ((-68.2 82.1, -66.95 82.1, -66.95 79.05, -68.2 79.05, -68.2 82.1))\')) = \'FF2FF1212\'',
'relate($geometry,geom_from_wkt( \'Polygon ((-68.2 82.1, -66.95 82.1, -66.95 79.05, -68.2 79.05, -68.2 82.1))\'), \'****F****\')',
'crosses($geometry,geom_from_wkt( \'Linestring (-68.2 82.1, -66.95 82.1, -66.95 79.05)\'))',
'overlaps($geometry,geom_from_wkt( \'Polygon ((-68.2 82.1, -66.95 82.1, -66.95 79.05, -68.2 79.05, -68.2 82.1))\'))',
'within($geometry,geom_from_wkt( \'Polygon ((-75.1 76.1, -75.1 81.6, -68.8 81.6, -68.8 76.1, -75.1 76.1))\'))',
'overlaps(translate($geometry,-1,-1),geom_from_wkt( \'Polygon ((-75.1 76.1, -75.1 81.6, -68.8 81.6, -68.8 76.1, -75.1 76.1))\'))',
'overlaps(buffer($geometry,1),geom_from_wkt( \'Polygon ((-75.1 76.1, -75.1 81.6, -68.8 81.6, -68.8 76.1, -75.1 76.1))\'))',
'intersects(centroid($geometry),geom_from_wkt( \'Polygon ((-74.4 78.2, -74.4 79.1, -66.8 79.1, -66.8 78.2, -74.4 78.2))\'))',
'intersects(point_on_surface($geometry),geom_from_wkt( \'Polygon ((-74.4 78.2, -74.4 79.1, -66.8 79.1, -66.8 78.2, -74.4 78.2))\'))'
])
return filters
# HERE GO THE PROVIDER SPECIFIC TESTS
def testDateTimeTypes(self):
vl = QgsVectorLayer('%s table="QGIS"."DATE_TIMES" sql=' %
(self.dbconn), "testdatetimes", "oracle")
self.assertTrue(vl.isValid())
fields = vl.dataProvider().fields()
self.assertEqual(fields.at(fields.indexFromName(
'date_field')).type(), QVariant.Date)
self.assertEqual(fields.at(fields.indexFromName(
'datetime_field')).type(), QVariant.DateTime)
f = next(vl.getFeatures(QgsFeatureRequest()))
date_idx = vl.fields().lookupField('date_field')
self.assertIsInstance(f.attributes()[date_idx], QDate)
self.assertEqual(f.attributes()[date_idx], QDate(2004, 3, 4))
datetime_idx = vl.fields().lookupField('datetime_field')
self.assertIsInstance(f.attributes()[datetime_idx], QDateTime)
self.assertEqual(f.attributes()[datetime_idx], QDateTime(
QDate(2004, 3, 4), QTime(13, 41, 52)))
def testDefaultValue(self):
self.assertEqual(self.source.defaultValue(1), NULL)
self.assertEqual(self.source.defaultValue(2), "'qgis'")
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
martyngigg/pyqt-msvc | examples/webkit/googlechat/ui_form.py | 6 | 5672 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'form.ui'
#
# Created: Mon Nov 29 16:57:47 2010
# by: PyQt4 UI code generator snapshot-4.8.2-241fbaf4620d
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(286, 413)
self.verticalLayout = QtGui.QVBoxLayout(Form)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.stackedWidget = QtGui.QStackedWidget(Form)
self.stackedWidget.setObjectName(_fromUtf8("stackedWidget"))
self.page_3 = QtGui.QWidget()
self.page_3.setObjectName(_fromUtf8("page_3"))
self.verticalLayout_4 = QtGui.QVBoxLayout(self.page_3)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
spacerItem = QtGui.QSpacerItem(20, 170, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_4.addItem(spacerItem)
self.statusLabel = QtGui.QLabel(self.page_3)
self.statusLabel.setAlignment(QtCore.Qt.AlignCenter)
self.statusLabel.setWordWrap(True)
self.statusLabel.setObjectName(_fromUtf8("statusLabel"))
self.verticalLayout_4.addWidget(self.statusLabel)
self.progressBar = QtGui.QProgressBar(self.page_3)
self.progressBar.setProperty(_fromUtf8("value"), 24)
self.progressBar.setObjectName(_fromUtf8("progressBar"))
self.verticalLayout_4.addWidget(self.progressBar)
spacerItem1 = QtGui.QSpacerItem(20, 169, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_4.addItem(spacerItem1)
self.stackedWidget.addWidget(self.page_3)
self.page = QtGui.QWidget()
self.page.setObjectName(_fromUtf8("page"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.page)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
spacerItem2 = QtGui.QSpacerItem(20, 119, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem2)
self.userNameLabel = QtGui.QLabel(self.page)
self.userNameLabel.setObjectName(_fromUtf8("userNameLabel"))
self.verticalLayout_2.addWidget(self.userNameLabel)
self.userNameEdit = QtGui.QLineEdit(self.page)
self.userNameEdit.setObjectName(_fromUtf8("userNameEdit"))
self.verticalLayout_2.addWidget(self.userNameEdit)
spacerItem3 = QtGui.QSpacerItem(20, 17, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
self.verticalLayout_2.addItem(spacerItem3)
self.passwordLabel = QtGui.QLabel(self.page)
self.passwordLabel.setObjectName(_fromUtf8("passwordLabel"))
self.verticalLayout_2.addWidget(self.passwordLabel)
self.passwordEdit = QtGui.QLineEdit(self.page)
self.passwordEdit.setEchoMode(QtGui.QLineEdit.Password)
self.passwordEdit.setObjectName(_fromUtf8("passwordEdit"))
self.verticalLayout_2.addWidget(self.passwordEdit)
spacerItem4 = QtGui.QSpacerItem(20, 118, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem4)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
spacerItem5 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem5)
self.loginButton = QtGui.QPushButton(self.page)
self.loginButton.setDefault(True)
self.loginButton.setObjectName(_fromUtf8("loginButton"))
self.horizontalLayout.addWidget(self.loginButton)
spacerItem6 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem6)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.stackedWidget.addWidget(self.page)
self.page_2 = QtGui.QWidget()
self.page_2.setObjectName(_fromUtf8("page_2"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.page_2)
self.verticalLayout_3.setSpacing(0)
self.verticalLayout_3.setMargin(0)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.webView = QtWebKit.QWebView(self.page_2)
self.webView.setUrl(QtCore.QUrl(_fromUtf8("about:blank")))
self.webView.setObjectName(_fromUtf8("webView"))
self.verticalLayout_3.addWidget(self.webView)
self.stackedWidget.addWidget(self.page_2)
self.verticalLayout.addWidget(self.stackedWidget)
self.retranslateUi(Form)
self.stackedWidget.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(QtGui.QApplication.translate("Form", "Google Talk Client", None, QtGui.QApplication.UnicodeUTF8))
self.statusLabel.setText(QtGui.QApplication.translate("Form", "TextLabel", None, QtGui.QApplication.UnicodeUTF8))
self.userNameLabel.setText(QtGui.QApplication.translate("Form", "Google username:", None, QtGui.QApplication.UnicodeUTF8))
self.passwordLabel.setText(QtGui.QApplication.translate("Form", "Password:", None, QtGui.QApplication.UnicodeUTF8))
self.loginButton.setText(QtGui.QApplication.translate("Form", "Login", None, QtGui.QApplication.UnicodeUTF8))
from PyQt4 import QtWebKit
| gpl-3.0 |
doublebits/osf.io | scripts/clone_wiki_pages.py | 17 | 2766 | """
Create copies of wiki pages for existing forks and registrations instead of
using the same NodeWikiPage objects as the original node.
"""
import logging
import sys
from modularodm import Q
from framework.mongo import database as db
from framework.transactions.context import TokuTransaction
from website.addons.wiki.model import NodeWikiPage
from website.models import Node
from website.app import init_app
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
BACKUP_COLLECTION = 'unmigratedwikipages'
def main():
nodes = db.node.find({}, {'_id': True, 'wiki_pages_versions': True, 'wiki_pages_current': True})
nodes = nodes.batch_size(200)
update_wiki_pages(nodes)
def update_wiki_pages(nodes):
for i, node in enumerate(nodes):
if node['wiki_pages_versions']:
cloned_wiki_pages = {}
for key, wiki_versions in node['wiki_pages_versions'].items():
cloned_wiki_pages[key] = []
for wiki_id in wiki_versions:
node_wiki = NodeWikiPage.load(wiki_id)
if not node_wiki:
continue
if node_wiki.to_storage()['node'] != node['_id']:
if not node_wiki.node:
move_to_backup_collection(node_wiki)
continue
clone = node_wiki.clone_wiki(node['_id'])
logger.info('Cloned wiki page {} from node {} to {}'.format(wiki_id, node_wiki.node, node['_id']))
cloned_wiki_pages[key].append(clone._id)
# update current wiki page
if node_wiki.is_current:
wiki_pages_current = node['wiki_pages_current']
wiki_pages_current[key] = clone._id
db.node.update({'_id': node['_id']}, {'$set': {'wiki_pages_current': wiki_pages_current}})
else:
cloned_wiki_pages[key].append(wiki_id)
db.node.update({'_id': node['_id']}, {'$set': {'wiki_pages_versions': cloned_wiki_pages}})
# Wiki pages with nodes that no longer exist are removed from NodeWikiPage
# and put into a separate collection
def move_to_backup_collection(node_wiki_page):
db[BACKUP_COLLECTION].insert(node_wiki_page.to_storage())
NodeWikiPage.remove_one(Q('_id', 'eq', node_wiki_page._id))
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
init_app(routes=False, set_backends=True)
with TokuTransaction():
main()
if dry:
raise Exception('Dry Run -- Aborting Transaction')
| apache-2.0 |
nikolas/edx-platform | openedx/core/djangoapps/course_groups/management/commands/tests/test_remove_users_from_multiple_cohorts.py | 91 | 3951 | """
Tests for cleanup of users which are added in multiple cohorts of a course
"""
from django.core.exceptions import MultipleObjectsReturned
from django.core.management import call_command
from django.test.client import RequestFactory
from openedx.core.djangoapps.course_groups.views import cohort_handler
from openedx.core.djangoapps.course_groups.cohorts import get_cohort, get_cohort_by_name
from openedx.core.djangoapps.course_groups.tests.helpers import config_course_cohorts
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class TestMultipleCohortUsers(ModuleStoreTestCase):
"""
Base class for testing users with multiple cohorts
"""
def setUp(self):
"""
setup course, user and request for tests
"""
super(TestMultipleCohortUsers, self).setUp()
self.course1 = CourseFactory.create()
self.course2 = CourseFactory.create()
self.user1 = UserFactory(is_staff=True)
self.user2 = UserFactory(is_staff=True)
self.request = RequestFactory().get("dummy_url")
self.request.user = self.user1
def test_users_with_multiple_cohorts_cleanup(self):
"""
Test that user which have been added in multiple cohorts of a course,
can get cohorts without error after running cohorts cleanup command
"""
# set two auto_cohort_groups for both courses
config_course_cohorts(
self.course1, is_cohorted=True, auto_cohorts=["Course1AutoGroup1", "Course1AutoGroup2"]
)
config_course_cohorts(
self.course2, is_cohorted=True, auto_cohorts=["Course2AutoGroup1", "Course2AutoGroup2"]
)
# get the cohorts from the courses, which will cause auto cohorts to be created
cohort_handler(self.request, unicode(self.course1.id))
cohort_handler(self.request, unicode(self.course2.id))
course_1_auto_cohort_1 = get_cohort_by_name(self.course1.id, "Course1AutoGroup1")
course_1_auto_cohort_2 = get_cohort_by_name(self.course1.id, "Course1AutoGroup2")
course_2_auto_cohort_1 = get_cohort_by_name(self.course2.id, "Course2AutoGroup1")
# forcefully add user1 in two auto cohorts
course_1_auto_cohort_1.users.add(self.user1)
course_1_auto_cohort_2.users.add(self.user1)
# forcefully add user2 in auto cohorts of both courses
course_1_auto_cohort_1.users.add(self.user2)
course_2_auto_cohort_1.users.add(self.user2)
# now check that when user1 goes on discussion page and tries to get
# cohorts 'MultipleObjectsReturned' exception is returned
with self.assertRaises(MultipleObjectsReturned):
get_cohort(self.user1, self.course1.id)
# also check that user 2 can go on discussion page of both courses
# without any exception
get_cohort(self.user2, self.course1.id)
get_cohort(self.user2, self.course2.id)
# call command to remove users added in multiple cohorts of a course
# are removed from all cohort groups
call_command('remove_users_from_multiple_cohorts')
# check that only user1 (with multiple cohorts) is removed from cohorts
# and user2 is still in auto cohorts of both course after running
# 'remove_users_from_multiple_cohorts' management command
self.assertEqual(self.user1.course_groups.count(), 0)
self.assertEqual(self.user2.course_groups.count(), 2)
user2_cohorts = list(self.user2.course_groups.values_list('name', flat=True))
self.assertEqual(user2_cohorts, ['Course1AutoGroup1', 'Course2AutoGroup1'])
# now check that user1 can get cohorts in which he is added
response = cohort_handler(self.request, unicode(self.course1.id))
self.assertEqual(response.status_code, 200)
| agpl-3.0 |
chenjun0210/tensorflow | tensorflow/contrib/keras/api/keras/layers/__init__.py | 29 | 8012 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras layers API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Generic layers.
# pylint: disable=g-bad-import-order
from tensorflow.contrib.keras.python.keras.engine import Input
from tensorflow.contrib.keras.python.keras.engine import InputLayer
from tensorflow.contrib.keras.python.keras.engine import InputSpec
from tensorflow.contrib.keras.python.keras.engine import Layer
# Advanced activations.
from tensorflow.contrib.keras.python.keras.layers.advanced_activations import LeakyReLU
from tensorflow.contrib.keras.python.keras.layers.advanced_activations import PReLU
from tensorflow.contrib.keras.python.keras.layers.advanced_activations import ELU
from tensorflow.contrib.keras.python.keras.layers.advanced_activations import ThresholdedReLU
# Convolution layers.
from tensorflow.contrib.keras.python.keras.layers.convolutional import Conv1D
from tensorflow.contrib.keras.python.keras.layers.convolutional import Conv2D
from tensorflow.contrib.keras.python.keras.layers.convolutional import Conv3D
from tensorflow.contrib.keras.python.keras.layers.convolutional import Conv2DTranspose
from tensorflow.contrib.keras.python.keras.layers.convolutional import SeparableConv2D
# Convolution layer aliases.
from tensorflow.contrib.keras.python.keras.layers.convolutional import Convolution1D
from tensorflow.contrib.keras.python.keras.layers.convolutional import Convolution2D
from tensorflow.contrib.keras.python.keras.layers.convolutional import Convolution3D
from tensorflow.contrib.keras.python.keras.layers.convolutional import Convolution2DTranspose
from tensorflow.contrib.keras.python.keras.layers.convolutional import SeparableConvolution2D
# Image processing layers.
from tensorflow.contrib.keras.python.keras.layers.convolutional import UpSampling1D
from tensorflow.contrib.keras.python.keras.layers.convolutional import UpSampling2D
from tensorflow.contrib.keras.python.keras.layers.convolutional import UpSampling3D
from tensorflow.contrib.keras.python.keras.layers.convolutional import ZeroPadding1D
from tensorflow.contrib.keras.python.keras.layers.convolutional import ZeroPadding2D
from tensorflow.contrib.keras.python.keras.layers.convolutional import ZeroPadding3D
from tensorflow.contrib.keras.python.keras.layers.convolutional import Cropping1D
from tensorflow.contrib.keras.python.keras.layers.convolutional import Cropping2D
from tensorflow.contrib.keras.python.keras.layers.convolutional import Cropping3D
# Convolutional-recurrent layers.
from tensorflow.contrib.keras.python.keras.layers.convolutional_recurrent import ConvLSTM2D
# Core layers.
from tensorflow.contrib.keras.python.keras.layers.core import Masking
from tensorflow.contrib.keras.python.keras.layers.core import Dropout
from tensorflow.contrib.keras.python.keras.layers.core import SpatialDropout1D
from tensorflow.contrib.keras.python.keras.layers.core import SpatialDropout2D
from tensorflow.contrib.keras.python.keras.layers.core import SpatialDropout3D
from tensorflow.contrib.keras.python.keras.layers.core import Activation
from tensorflow.contrib.keras.python.keras.layers.core import Reshape
from tensorflow.contrib.keras.python.keras.layers.core import Permute
from tensorflow.contrib.keras.python.keras.layers.core import Flatten
from tensorflow.contrib.keras.python.keras.layers.core import RepeatVector
from tensorflow.contrib.keras.python.keras.layers.core import Lambda
from tensorflow.contrib.keras.python.keras.layers.core import Dense
from tensorflow.contrib.keras.python.keras.layers.core import ActivityRegularization
# Embedding layers.
from tensorflow.contrib.keras.python.keras.layers.embeddings import Embedding
# Locally-connected layers.
from tensorflow.contrib.keras.python.keras.layers.local import LocallyConnected1D
from tensorflow.contrib.keras.python.keras.layers.local import LocallyConnected2D
# Merge layers.
from tensorflow.contrib.keras.python.keras.layers.merge import Add
from tensorflow.contrib.keras.python.keras.layers.merge import Multiply
from tensorflow.contrib.keras.python.keras.layers.merge import Average
from tensorflow.contrib.keras.python.keras.layers.merge import Maximum
from tensorflow.contrib.keras.python.keras.layers.merge import Concatenate
from tensorflow.contrib.keras.python.keras.layers.merge import Dot
from tensorflow.contrib.keras.python.keras.layers.merge import add
from tensorflow.contrib.keras.python.keras.layers.merge import multiply
from tensorflow.contrib.keras.python.keras.layers.merge import average
from tensorflow.contrib.keras.python.keras.layers.merge import maximum
from tensorflow.contrib.keras.python.keras.layers.merge import concatenate
from tensorflow.contrib.keras.python.keras.layers.merge import dot
# Noise layers.
from tensorflow.contrib.keras.python.keras.layers.noise import GaussianNoise
from tensorflow.contrib.keras.python.keras.layers.noise import GaussianDropout
# Normalization layers.
from tensorflow.contrib.keras.python.keras.layers.normalization import BatchNormalization
# Pooling layers.
from tensorflow.contrib.keras.python.keras.layers.pooling import MaxPooling1D
from tensorflow.contrib.keras.python.keras.layers.pooling import MaxPooling2D
from tensorflow.contrib.keras.python.keras.layers.pooling import MaxPooling3D
from tensorflow.contrib.keras.python.keras.layers.pooling import AveragePooling1D
from tensorflow.contrib.keras.python.keras.layers.pooling import AveragePooling2D
from tensorflow.contrib.keras.python.keras.layers.pooling import AveragePooling3D
from tensorflow.contrib.keras.python.keras.layers.pooling import GlobalAveragePooling1D
from tensorflow.contrib.keras.python.keras.layers.pooling import GlobalAveragePooling2D
from tensorflow.contrib.keras.python.keras.layers.pooling import GlobalAveragePooling3D
from tensorflow.contrib.keras.python.keras.layers.pooling import GlobalMaxPooling1D
from tensorflow.contrib.keras.python.keras.layers.pooling import GlobalMaxPooling2D
from tensorflow.contrib.keras.python.keras.layers.pooling import GlobalMaxPooling3D
# Pooling layer aliases.
from tensorflow.contrib.keras.python.keras.layers.pooling import MaxPool1D
from tensorflow.contrib.keras.python.keras.layers.pooling import MaxPool2D
from tensorflow.contrib.keras.python.keras.layers.pooling import MaxPool3D
from tensorflow.contrib.keras.python.keras.layers.pooling import AvgPool1D
from tensorflow.contrib.keras.python.keras.layers.pooling import AvgPool2D
from tensorflow.contrib.keras.python.keras.layers.pooling import AvgPool3D
from tensorflow.contrib.keras.python.keras.layers.pooling import GlobalAvgPool1D
from tensorflow.contrib.keras.python.keras.layers.pooling import GlobalAvgPool2D
from tensorflow.contrib.keras.python.keras.layers.pooling import GlobalAvgPool3D
from tensorflow.contrib.keras.python.keras.layers.pooling import GlobalMaxPool1D
from tensorflow.contrib.keras.python.keras.layers.pooling import GlobalMaxPool2D
from tensorflow.contrib.keras.python.keras.layers.pooling import GlobalMaxPool3D
# Recurrent layers.
from tensorflow.contrib.keras.python.keras.layers.recurrent import SimpleRNN
from tensorflow.contrib.keras.python.keras.layers.recurrent import GRU
from tensorflow.contrib.keras.python.keras.layers.recurrent import LSTM
del absolute_import
del division
del print_function
| apache-2.0 |
amenonsen/ansible | lib/ansible/modules/network/vyos/vyos_facts.py | 1 | 5074 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The module file for vyos_facts
"""
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': [u'preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: vyos_facts
version_added: 2.2
short_description: Get facts about vyos devices.
description:
- Collects facts from network devices running the vyos operating
system. This module places the facts gathered in the fact tree keyed by the
respective resource name. The facts module will always collect a
base set of facts from the device and can enable or disable
collection of additional facts.
author:
- Nathaniel Case (@qalthos)
- Nilashish Chakraborty (@Nilashishc)
- Rohit Thakur (@rohitthakur2590)
extends_documentation_fragment: vyos
notes:
- Tested against VyOS 1.1.8 (helium).
- This module works with connection C(network_cli). See L(the VyOS OS Platform Options,../network/user_guide/platform_vyos.html).
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, default, config, and neighbors. Can specify a list of
values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
required: false
default: "!config"
gather_network_resources:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all and the resources like interfaces.
Can specify a list of values to include a larger subset. Values
can also be used with an initial C(M(!)) to specify that a
specific subset should not be collected.
required: false
version_added: "2.9"
choices: ['all', 'interfaces', '!interfaces', 'l3_interfaces', '!l3_interfaces','lag_interfaces', '!lag_interfaces',
'lldp_global', '!lldp_global','lldp_interfaces', '!lldp_interfaces']
"""
EXAMPLES = """
# Gather all facts
- vyos_facts:
gather_subset: all
gather_network_resources: all
# collect only the config and default facts
- vyos_facts:
gather_subset: config
# collect everything exception the config
- vyos_facts:
gather_subset: "!config"
# Collect only the interfaces facts
- vyos_facts:
gather_subset:
- '!all'
- '!min'
gather_network_resources:
- interfaces
# Do not collect interfaces facts
- vyos_facts:
gather_network_resources:
- "!interfaces"
# Collect interfaces and minimal default facts
- vyos_facts:
gather_subset: min
gather_network_resources: interfaces
"""
RETURN = """
ansible_net_config:
description: The running-config from the device
returned: when config is configured
type: str
ansible_net_commits:
description: The set of available configuration revisions
returned: when present
type: list
ansible_net_hostname:
description: The configured system hostname
returned: always
type: str
ansible_net_model:
description: The device model string
returned: always
type: str
ansible_net_serialnum:
description: The serial number of the device
returned: always
type: str
ansible_net_version:
description: The version of the software running
returned: always
type: str
ansible_net_neighbors:
description: The set of LLDP neighbors
returned: when interface is configured
type: list
ansible_net_gather_subset:
description: The list of subsets gathered by the module
returned: always
type: list
ansible_net_api:
description: The name of the transport
returned: always
type: str
ansible_net_python_version:
description: The Python version Ansible controller is using
returned: always
type: str
ansible_net_gather_network_resources:
description: The list of fact resource subsets collected from the device
returned: always
type: list
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.vyos.argspec.facts.facts import FactsArgs
from ansible.module_utils.network.vyos.facts.facts import Facts
from ansible.module_utils.network.vyos.vyos import vyos_argument_spec
def main():
"""
Main entry point for module execution
:returns: ansible_facts
"""
argument_spec = FactsArgs.argument_spec
argument_spec.update(vyos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = ['default value for `gather_subset` '
'will be changed to `min` from `!config` v2.11 onwards']
result = Facts(module).get_facts()
ansible_facts, additional_warnings = result
warnings.extend(additional_warnings)
module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
if __name__ == '__main__':
main()
| gpl-3.0 |
abirger/compliance-checker | compliance_checker/cf/appendix_f.py | 2 | 2157 | #!/usr/bin/env python
'''
Appendix F. Grid Mappings
---
Each recognized grid mapping is described in one of the sections below. Each
section contains: the valid name that is used with the grid_mapping_name
attribute; a list of the specific attributes that may be used to assign values
to the mapping's parameters; the standard names used to identify the coordinate
variables that contain the mapping's independent variables; and references to
the mapping's definition or other information that may help in using the
mapping. Since the attributes used to set a mapping's parameters may be shared
among several mappings, their definitions are contained in a table in the final
section. The attributes which describe the ellipsoid and prime meridian may be
included, when applicable, with any grid mapping.
We have used the FGDC "Content Standard for Digital Geospatial Metadata" [FGDC]
as a guide in choosing the values for grid_mapping_name and the attribute names
for the parameters describing map projections.
'''
grid_mapping_names = [
'albers_conical_equal_area',
'azimuthal_equidistant',
'lambert_azimuthal_equal_area',
'lambert_conformal_conic',
'lambert_cylindrical_equal_area',
'latitude_longitude',
'mercator',
'orthographic',
'polar_stereographic',
'rotated_latitude_longitude',
'stereographic',
'transverse_mercator',
'vertical_perspective' ]
grid_mapping_attrs = [
'earth_radius',
'false_easting',
'false_northing',
'grid_mapping_name',
'grid_north_pole_latitude',
'grid_north_pole_longitude',
'inverse_flattening',
'latitude_of_projection_origin',
'longitude_of_central_meridian',
'longitude_of_prime_meridian',
'longitude_of_projection_origin',
'north_pole_grid_longitude',
'perspective_point_height',
'scale_factor_at_central_meridian',
'scale_factor_at_projection_origin',
'semi_major_axis',
'semi_minor_axis',
'standard_parallel',
'straight_vertical_longitude_from_pole'
]
| apache-2.0 |
nchammas/asyncssh | asyncssh/crypto/__init__.py | 1 | 1106 | # Copyright (c) 2014-2015 by Ron Frederick <ronf@timeheart.net>.
# All rights reserved.
#
# This program and the accompanying materials are made available under
# the terms of the Eclipse Public License v1.0 which accompanies this
# distribution and is available at:
#
# http://www.eclipse.org/legal/epl-v10.html
#
# Contributors:
# Ron Frederick - initial implementation, API, and documentation
"""A shim for accessing cryptographic primitives needed by asyncssh"""
import importlib
from .cipher import register_cipher, lookup_cipher
from . import chacha
pyca_available = importlib.find_loader('cryptography')
pycrypto_available = importlib.find_loader('Crypto')
if pyca_available:
from . import pyca
if pycrypto_available:
from . import pycrypto
if pyca_available:
from .pyca.dsa import DSAPrivateKey, DSAPublicKey
from .pyca.rsa import RSAPrivateKey, RSAPublicKey
elif pycrypto_available:
from .pycrypto.dsa import DSAPrivateKey, DSAPublicKey
from .pycrypto.rsa import RSAPrivateKey, RSAPublicKey
else:
raise ImportError('No suitable crypto library found.')
| epl-1.0 |
JHUISI/charm | charm/test/schemes/abenc/abenc_waters09_test.py | 2 | 1130 | import unittest
from charm.schemes.abenc.abenc_waters09 import CPabe09
from charm.toolbox.pairinggroup import PairingGroup, GT
debug = False
class CPabe09Test(unittest.TestCase):
def testCPabe(self):
# Get the eliptic curve with the bilinear mapping feature needed.
groupObj = PairingGroup('SS512')
cpabe = CPabe09(groupObj)
(msk, pk) = cpabe.setup()
pol = '((ONE or THREE) and (TWO or FOUR))'
attr_list = ['THREE', 'ONE', 'TWO']
if debug: print('Acces Policy: %s' % pol)
if debug: print('User credential list: %s' % attr_list)
m = groupObj.random(GT)
cpkey = cpabe.keygen(pk, msk, attr_list)
if debug: print("\nSecret key: %s" % attr_list)
if debug: groupObj.debug(cpkey)
cipher = cpabe.encrypt(pk, m, pol)
if debug: print("\nCiphertext...")
if debug: groupObj.debug(cipher)
orig_m = cpabe.decrypt(pk, cpkey, cipher)
assert m == orig_m, 'FAILED Decryption!!!'
if debug: print('Successful Decryption!')
del groupObj
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 |
MattNolanLab/Ramsden_MEC | ABAFunctions/ABA_errors.py | 1 | 4010 | '''
Code for error analysis
Copyright (c) 2014, Helen Ramsden
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import Image, ImageChops
import numpy as np
from scipy import ndimage
from GenericFunctions import checkOSpath, adjust_spines,st
import matplotlib.pyplot as plt
plt.rc('ytick', labelsize=12)
plt.rc('xtick', labelsize=12)
plt.rc('axes', labelsize=12)
plt.rc('axes', titlesize=20)
def checksegmented(segmaskfilepath,filedict,resultsfilepath):
'''
FUNCTION runs through all segmented masks and checks location of centre of mass and size of mask
input SegmentedMask/
output is a list containing name of file, size of mask,
'''
newfile = open(resultsfilepath + 'maskstatssize.txt','w')
for f in filedict:
# print f
newfile.write(f )
for filepath in [segmaskfilepath]:#, segmask2filepath]:
maskim = Image.open(filepath+ f).convert('L') # need to convert to 8 bit (not rgb)
maskim = ImageChops.invert(maskim)
maskarray = np.array(maskim)
# print maskarray.shape
com = ndimage.measurements.center_of_mass(maskarray)
blackpixels = np.nonzero(maskarray==0)
whitepixels = np.nonzero(maskarray>0)
# print len(blackpixels[0]),len(whitepixels[0])
masksize = len(blackpixels[0])
newfile.write('\t' + '\t'.join([str(com[0]),str(com[1]),str(masksize)]))
newfile.write('\n')
def plotmi():
'''
Plot the distribution of MI scores from the registration output
'''
milog = np.loadtxt('alllogdata.txt',delimiter = '\t',dtype = float,usecols=[2,3])
diffs = milog[:,0] - milog[:,1]
milognew = np.ma.masked_array(milog, np.isnan(milog))
diffsnew = np.ma.masked_array(diffs, np.isnan(diffs))
# Get rid of nans
milogmaskpre = np.ma.masked_array(milog[:,0],np.isnan(milog[:,0]))
milogmaskpost = np.ma.masked_array(milog[:,1],np.isnan(milog[:,1]))
milogmaskpre = milogmaskpre[milogmaskpre>-1000]
milogmaskpost = milogmaskpost[milogmaskpost>-1000]
fig = plt.figure(figsize = (8,8))
fig.subplots_adjust(bottom=0.2)
fig.subplots_adjust(left=0.2)
ax = fig.add_subplot(1,1,1)
adjust_spines(ax, ['left','bottom'])
cols = ['r','b','k','g','m','y']
# histpre, binspre = np.histogram(milogmaskpre, bins=20)
# histpost, binspre = np.histogram(milogmaskpre, bins=20)
ax.hist(milogmaskpre, bins=20,histtype='step',color='b', range = [-600,0])
ax.hist(milogmaskpost,bins=20,histtype='step',color='g', range = [-600,0]) # normed=True,
[xmin, xmax, ymin, ymax] = ax.axis()
ax.set_yticks([ymin,ymax])
ax.set_yticklabels([int(ymin),int(ymax)], fontsize = 25)
ax.xaxis.set_label_coords(0.5, -0.15)
ax.set_xticks([xmin,xmax])
ax.set_xticklabels([xmin,xmax], fontsize = 25)
ax.set_xlabel('Joint Entropy', fontsize = 25)
ax.set_ylabel('Frequency', fontsize = 25)
ax.yaxis.set_label_coords( -0.15, 0.5)
fig.savefig('MIlogdata.png', transparent = True)
| bsd-3-clause |
a-bioinformatician/BreaKmer | breakmer.py | 1 | 6445 | #! /usr/bin/local/python
# -*- coding: utf-8 -*-
import argparse
import breakmer.params as params
import breakmer.processor.analysis as breakmer_analysis
__author__ = "Ryan Abo"
__copyright__ = "Copyright 2015, Ryan Abo"
__email__ = "ryanabo@gmail.com"
__license__ = "MIT"
'''
Main script that initiates the BreaKmer analysis or auxiliary functions to setup BreaKmer for analysis.
There are three functions provided:
1. run = perform analysis to detect structural variation.
2. start_blat_server = start the blat server in the background for analysis.
3. prepare_reference_data = prepare the reference data for the target regions that are specified in the input files.
The blat server provides a challenge in workflow. The best method is to:
1. prepare reference data using 'prepare_reference_data' function
2. start the blat server using 'start_blat_server' function
breakmer.py start_blat_server -p <port_number> --hostname <hostname> -c <config file>
3. run the analysis and keep the blat server alive in the background for use in other analyses.
breakmer.py run -k -p <port_number> --hostname <hostname> -c <config file> -n <nprocessors> -g <gene_list>
'''
PARSER = argparse.ArgumentParser(description='Program to identify structural variants within targeted locations.', usage='%(prog)s [options]', add_help=True)
SUBPARSERS = PARSER.add_subparsers(help='Program mode (run, start_blat_server, prepare_reference_data).', dest='fncCmd')
# Setup three separate parsers for the three different functions.
RUN_PARSER = SUBPARSERS.add_parser('run', help='Run analysis to detect structural variants.')
SERVER_PARSER = SUBPARSERS.add_parser('start_blat_server', help='Start the blat server prior to performing the analysis.')
REF_PARSER = SUBPARSERS.add_parser('prepare_reference_data', help='Prepare the reference sequence data for target regions prior to analysis.')
# Run parser
RUN_PARSER.add_argument('--log_level', dest='log_level', default='DEBUG', help='Log level [default: DEBUG]')
RUN_PARSER.add_argument('--indel_size', dest='indel_size', default=15, type=int, help='Indel size filter. [default: %(default)s]')
RUN_PARSER.add_argument('--trl_sr_thresh', dest='trl_sr_thresh', default=2, type=int, help='Split read support threshold for translocations. [default: %(default)s]')
RUN_PARSER.add_argument('--indel_sr_thresh', dest='indel_sr_thresh', default=5, type=int, help='Split read support threshold for indels. [default: %(default)s]')
RUN_PARSER.add_argument('--rearr_sr_thresh', dest='rearr_sr_thresh', default=2, type=int, help='Split read support threshold for rearrangements. [default: %(default)s]')
RUN_PARSER.add_argument('--rearr_min_seg_len', dest='rearr_minseg_len', default=30, type=int, help='Threshold for minimum segment to be rearranged. [default: %(default)s]')
RUN_PARSER.add_argument('--trl_min_seg_len', dest='trl_minseg_len', default=25, type=int, help='Threshold for minimum length of translocation segment. [default: %(default)s]')
RUN_PARSER.add_argument('--align_thresh', dest='align_thresh', default=.90, type=int, help='Threshold for minimum read alignment for assembly. [default: %(default)s]')
RUN_PARSER.add_argument('--no_output_header', dest='no_output_header', default=False, action='store_true', help='Suppress output headers. [default: %(default)s]')
RUN_PARSER.add_argument('--discread_only_thresh', dest='discread_only_thresh', default=2, type=int, help='The number of discordant read pairs in a cluster to output without evidence from a split read event. [default: %(default)s]')
RUN_PARSER.add_argument('--generate_image', dest='generate_image', default=False, action='store_true', help='Generate pileup image for events. [default: %(default)s]')
RUN_PARSER.add_argument('--hostname', dest='blat_hostname', default='localhost', help='The hostname for the blat server. Localhost will be used if not specified. [default: %(default)s]')
RUN_PARSER.add_argument('-g', '--gene_list', dest='gene_list', default=None, help='Gene list to consider for analysis. [default: %(default)s]')
RUN_PARSER.add_argument('-f', '--filter_list', dest='filterList', default=None, help='Input a set of events to filter out. [default: %(default)s]')
RUN_PARSER.add_argument('-n', '--nprocessors', dest='nprocs', default=1, type=int, help='The number of processors to use for analysis. [default: %(default)s]')
RUN_PARSER.add_argument('-s', '--start_blat_server', dest='start_blat_server', default=False, action='store_true', help='Start the blat server. Random port number and localhost will be used if neither specified. [default: %(default)s]')
RUN_PARSER.add_argument('-k', '--keep_blat_server', dest='keep_blat_server', default=False, action='store_true', help='Keep the blat server alive. [default: %(default)s]')
RUN_PARSER.add_argument('-p', '--port_number', dest='blat_port', default=None, type=int, help='The port number for the blat server. A random port number (8000-9500) will be used if not specified. [default: %(default)s]')
RUN_PARSER.add_argument('-c', '--config', dest='config_fn', default=None, required=True, help='The configuration filename that contains additional parameters. [default: %(default)s]')
# Server parser
SERVER_PARSER.add_argument('-p', '--port_number', dest='blat_port', default=None, type=int, help='The port number for the blat server. A random port number (8000-9500) will be used if not specified. [default: %(default)s]')
SERVER_PARSER.add_argument('--hostname', dest='blat_hostname', default='localhost', help='The hostname for the blat server. Localhost will be used if not specified. [default: %(default)s]')
SERVER_PARSER.add_argument('-c', '--config', dest='config_fn', default=None, required=True, help='The configuration filename that contains additional parameters. [default: %(default)s]')
# Setup reference parser
REF_PARSER.add_argument('-g', '--gene_list', dest='gene_list', default=None, help='Gene list to consider for analysis. [default: %(default)s]')
REF_PARSER.add_argument('-c', '--config', dest='config_fn', default=None, required=True, help='The configuration filename that contains additional parameters. [default: %(default)s]')
REF_PARSER.add_argument('-n', '--nprocessors', dest='nprocs', default=1, type=int, help='The number of processors to use for analysis. [default: %(default)s]')
# Start analysis
RUN_TRACKER = breakmer_analysis.RunTracker(params.ParamManager(PARSER.parse_args()))
RUN_TRACKER.run()
| mit |
jiajiechen/mxnet | example/python-howto/multiple_outputs.py | 41 | 1505 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Create a Multiple output configuration.
This example shows how to create a multiple output configuration.
"""
import mxnet as mx
net = mx.symbol.Variable('data')
fc1 = mx.symbol.FullyConnected(data=net, name='fc1', num_hidden=128)
net = mx.symbol.Activation(data=fc1, name='relu1', act_type="relu")
net = mx.symbol.FullyConnected(data=net, name='fc2', num_hidden=64)
out = mx.symbol.SoftmaxOutput(data=net, name='softmax')
# group fc1 and out together
group = mx.symbol.Group([fc1, out])
print group.list_outputs()
# You can go ahead and bind on the group
# executor = group.simple_bind(data=data_shape)
# executor.forward()
# executor.output[0] will be value of fc1
# executor.output[1] will be value of softmax
| apache-2.0 |
hfp/tensorflow-xsmm | tensorflow/contrib/layers/python/layers/embedding_ops.py | 16 | 42456 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Embedding functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.contrib.layers.python.ops import sparse_feature_cross_op
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
__all__ = [
"safe_embedding_lookup_sparse", "scattered_embedding_lookup",
"scattered_embedding_lookup_sparse", "embedding_lookup_unique",
"embedding_lookup_sparse_with_distributed_aggregation"
]
def safe_embedding_lookup_sparse(embedding_weights,
sparse_ids,
sparse_weights=None,
combiner=None,
default_id=None,
name=None,
partition_strategy="div",
max_norm=None):
"""Lookup embedding results, accounting for invalid IDs and empty features.
The partitioned embedding in `embedding_weights` must all be the same shape
except for the first dimension. The first dimension is allowed to vary as the
vocabulary size is not necessarily a multiple of `P`. `embedding_weights`
may be a `PartitionedVariable` as returned by using `tf.get_variable()` with a
partitioner.
Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs
with non-positive weight. For an entry with no features, the embedding vector
for `default_id` is returned, or the 0-vector if `default_id` is not supplied.
The ids and weights may be multi-dimensional. Embeddings are always aggregated
along the last dimension.
Args:
embedding_weights: A list of `P` float tensors or values representing
partitioned embedding tensors. Alternatively, a `PartitionedVariable`,
created by partitioning along dimension 0. The total unpartitioned
shape should be `[e_0, e_1, ..., e_m]`, where `e_0` represents the
vocab size and `e_1, ..., e_m` are the embedding dimensions.
sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the
ids. `d_0` is typically batch size.
sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing
float weights corresponding to `sparse_ids`, or `None` if all weights
are be assumed to be 1.0.
combiner: A string specifying how to combine embedding results for each
entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean"
the default.
default_id: The id to use for an entry with no features.
name: A name for this operation (optional).
partition_strategy: A string specifying the partitioning strategy.
Currently `"div"` and `"mod"` are supported. Default is `"div"`.
max_norm: If not None, all embeddings are l2-normalized to max_norm before
combining.
Returns:
Dense tensor of shape `[d_0, d_1, ..., d_{n-1}, e_1, ..., e_m]`.
Raises:
ValueError: if `embedding_weights` is empty.
"""
if combiner is None:
logging.warn("The default value of combiner will change from \"mean\" "
"to \"sqrtn\" after 2016/11/01.")
combiner = "mean"
if embedding_weights is None:
raise ValueError("Missing embedding_weights %s." % embedding_weights)
if isinstance(embedding_weights, variables.PartitionedVariable):
embedding_weights = list(embedding_weights) # get underlying Variables.
if not isinstance(embedding_weights, list):
embedding_weights = [embedding_weights]
if len(embedding_weights) < 1:
raise ValueError("Missing embedding_weights %s." % embedding_weights)
dtype = sparse_weights.dtype if sparse_weights is not None else None
if isinstance(embedding_weights, variables.PartitionedVariable):
embedding_weights = list(embedding_weights)
embedding_weights = [
ops.convert_to_tensor(w, dtype=dtype) for w in embedding_weights
]
contrib_tensor_util.assert_same_float_dtype(embedding_weights +
[sparse_weights])
with ops.name_scope(name, "embedding_lookup",
embedding_weights + [sparse_ids,
sparse_weights]) as scope:
# Reshape higher-rank sparse ids and weights to linear segment ids.
original_shape = sparse_ids.dense_shape
original_rank_dim = tensor_shape.Dimension(tensor_shape.dimension_value(
sparse_ids.dense_shape.get_shape()[0]))
original_rank = (
array_ops.size(original_shape)
if original_rank_dim.value is None
else original_rank_dim.value)
sparse_ids = sparse_ops.sparse_reshape(sparse_ids, [
math_ops.reduce_prod(
array_ops.slice(original_shape, [0], [original_rank - 1])),
array_ops.gather(original_shape, original_rank - 1)])
if sparse_weights is not None:
sparse_weights = sparse_tensor.SparseTensor(
sparse_ids.indices,
sparse_weights.values, sparse_ids.dense_shape)
# Prune invalid ids and weights.
sparse_ids, sparse_weights = _prune_invalid_ids(sparse_ids, sparse_weights)
if combiner != "sum":
sparse_ids, sparse_weights = _prune_invalid_weights(
sparse_ids, sparse_weights)
# Fill in dummy values for empty features, if necessary.
sparse_ids, is_row_empty = sparse_ops.sparse_fill_empty_rows(sparse_ids,
default_id or
0)
if sparse_weights is not None:
sparse_weights, _ = sparse_ops.sparse_fill_empty_rows(sparse_weights, 1.0)
result = embedding_ops.embedding_lookup_sparse(
embedding_weights,
sparse_ids,
sparse_weights,
combiner=combiner,
partition_strategy=partition_strategy,
name=None if default_id is None else scope,
max_norm=max_norm)
if default_id is None:
# Broadcast is_row_empty to the same shape as embedding_lookup_result,
# for use in Select.
is_row_empty = array_ops.tile(
array_ops.reshape(is_row_empty, [-1, 1]),
array_ops.stack([1, array_ops.shape(result)[1]]))
result = array_ops.where(is_row_empty,
array_ops.zeros_like(result),
result,
name=scope)
# Reshape back from linear ids back into higher-dimensional dense result.
final_result = array_ops.reshape(
result,
array_ops.concat([
array_ops.slice(
math_ops.cast(original_shape, dtypes.int32), [0],
[original_rank - 1]),
array_ops.slice(array_ops.shape(result), [1], [-1])
], 0))
final_result.set_shape(tensor_shape.unknown_shape(
(original_rank_dim - 1).value).concatenate(result.get_shape()[1:]))
return final_result
def _prune_invalid_ids(sparse_ids, sparse_weights):
"""Prune invalid IDs (< 0) from the input ids and weights."""
is_id_valid = math_ops.greater_equal(sparse_ids.values, 0)
if sparse_weights is not None:
is_id_valid = math_ops.logical_and(
is_id_valid,
array_ops.ones_like(sparse_weights.values, dtype=dtypes.bool))
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid)
if sparse_weights is not None:
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid)
return sparse_ids, sparse_weights
def _prune_invalid_weights(sparse_ids, sparse_weights):
"""Prune invalid weights (< 0) from the input ids and weights."""
if sparse_weights is not None:
is_weights_valid = math_ops.greater(sparse_weights.values, 0)
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_weights_valid)
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_weights_valid)
return sparse_ids, sparse_weights
def scattered_embedding_lookup(params,
values,
dimension,
name=None,
hash_key=None):
"""Looks up embeddings using parameter hashing for each value in `values`.
The i-th embedding component of a value v in `values` is found by retrieving
the weight whose index is a fingerprint of the pair (v,i).
The concept is explored as "feature hashing" for model compression in this
paper: http://arxiv.org/pdf/1504.04788.pdf
Feature hashing has the pleasant effect of allowing us to compute an embedding
without needing a pre-determined vocabulary, relieving some amount of process
complexity. It also allows for us to maintain embeddings for possibly
trillions of features with a fixed amount of memory.
Note that this is superior to out-of-vocabulary shared "hash buckets" in that
the embedding is extremely likely to be unique for each token as opposed to
being shared across probably-colliding tokens. The price is that we must
compute a hash once for each scalar in the token's embedding as opposed to
once per token.
If `params` is a list, it represents a partition of the embedding parameters.
Each tensor in the list should have the same length, except for the first ones
which may have an additional element. For instance 10 parameters can be
partitioned in 4 tensors with length `[3, 3, 2, 2]`.
Args:
params: A `Tensor`, `list` of `Tensors`, or `PartitionedVariable`.
Each tensor must be of rank 1 with fully-defined shape.
values: `Tensor` of values to be embedded with shape `[d0, ..., dn]`.
dimension: Embedding dimension.
name: An optional name for this op.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp
(optional).
Returns:
A `Tensor` with shape `[d0, ..., dn, dimension]`.
Raises:
ValueError: if dimension is not positive or the partition size is invalid.
"""
if dimension is None:
raise ValueError("You must specify dimension.")
return _sampled_scattered_embedding_lookup(
params, values, dimension=dimension, sampled_candidates=None,
hash_key=hash_key, name=name)
def _sampled_scattered_embedding_lookup(
params, values, dimension=None, sampled_candidates=None, hash_key=None,
name=None):
"""Looks up embeddings using parameter hashing for each value in `values`.
This method looks up selected embedding dimensions if `sampled_candidates` is
given, otherwise looks up all dimensions.
The i-th embedding component of a value v in `values` is found by retrieving
the weight whose index is a fingerprint of the pair (v,i).
The concept is explored as "feature hashing" for model compression in this
paper: http://arxiv.org/pdf/1504.04788.pdf
Feature hashing has the pleasant effect of allowing us to compute an embedding
without needing a pre-determined vocabulary, relieving some amount of process
complexity. It also allows for us to maintain embeddings for possibly
trillions of features with a fixed amount of memory.
Note that this is superior to out-of-vocabulary shared "hash buckets" in that
the embedding is extremely likely to be unique for each token as opposed to
being shared across probably-colliding tokens. The price is that we must
compute a hash once for each scalar in the token's embedding as opposed to
once per token.
If `params` is a list, it represents a partition of the embedding parameters.
Each tensor in the list should have the same length, except for the first ones
which may have an additional element. For instance 10 parameters can be
partitioned in 4 tensors with length `[3, 3, 2, 2]`.
Args:
params: A `Tensor`, `list` of `Tensors`, or `PartitionedVariable`.
Each tensor must be of rank 1 with fully-defined shape.
values: `Tensor` of values to be embedded with shape `[d0, ..., dn]`.
dimension: Embedding dimension. The user must specify either `dimension` or
`sampled_candidates`.
sampled_candidates: An optional `Tensor` of slice indices to keep along the
final dimension with shape `[d0, ..., dn, N]`. If given, `dimension` is
ignored. If `None`, looks up all candidates.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp
(optional).
name: An optional name for this op.
Returns:
A `Tensor` with shape `[d0, ..., dn, dimension]`.
If `sampled_candidates` is given, the output shape is `[d0, ..., dn, N]`
Raises:
ValueError: if dimension is not positive or the partition size is invalid.
"""
if isinstance(params, variables.PartitionedVariable):
params = list(params)
if not isinstance(params, list):
params = [params]
with ops.name_scope(name, "scattered_embedding_lookup",
params + [dimension, values]):
# Flatten the values
values_shape = array_ops.shape(values)
values = array_ops.reshape(values, [-1, 1])
if sampled_candidates is None:
if dimension is None:
raise ValueError(
"You must specify either dimension or sampled_candidates.")
if dimension <= 0:
raise ValueError("Dimension must be >0. Given is %d" % dimension)
sampled_candidates = array_ops.tile(array_ops.expand_dims(
math_ops.range(0, dimension), 0), array_ops.shape(values))
else:
dimension = array_ops.shape(sampled_candidates)[
math_ops.subtract(array_ops.rank(sampled_candidates), 1)]
sampled_candidates_shape = array_ops.shape(sampled_candidates)
dimension_tensor = array_ops.reshape(dimension, shape=[1,])
expected_shape = array_ops.concat([values_shape, dimension_tensor], 0)
with ops.control_dependencies([control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(sampled_candidates_shape,
expected_shape)),
["The shape of sampled_candidates: ", sampled_candidates_shape,
" does not match the shape of values: ", values_shape])]):
# Flatten sampled_candidates, same way as values are flattened.
sampled_candidates = array_ops.reshape(sampled_candidates,
[-1, dimension])
num_partitions = len(params)
partition_sizes = []
for p in range(num_partitions):
shape = params[p].get_shape()
shape.assert_has_rank(1)
shape.assert_is_fully_defined()
partition_sizes.append(tensor_shape.dimension_value(shape[0]))
num_params = sum(partition_sizes) # Total number of parameters.
# Assert the size of each partition.
for p in range(num_partitions):
expected_size = (num_params - p - 1) // num_partitions + 1
if partition_sizes[p] != expected_size:
raise ValueError("Tensor %d in params has size %d, expected %d." %
(p, partition_sizes[p], expected_size))
# With two values v1 and v2 and 3 dimensions, we will cross
# [[0, 1, 2], [0, 1, 2]] with [[v1], [v2]].
tensors_to_cross = [sampled_candidates, values]
ids = sparse_feature_cross_op.sparse_feature_cross(
tensors_to_cross, hashed_output=True, num_buckets=num_params,
hash_key=hash_key)
ids = sparse_ops.sparse_tensor_to_dense(ids)
# No need to validate the indices since we have checked the params
# dimensions and we know the largest id.
result = embedding_ops.embedding_lookup(
params, ids, partition_strategy="div")
return array_ops.reshape(result,
array_ops.concat([values_shape, [dimension]], 0))
def scattered_embedding_lookup_sparse(params,
sparse_values,
dimension,
combiner=None,
default_value=None,
name=None,
hash_key=None):
"""Looks up embeddings of a sparse feature using parameter hashing.
See `tf.contrib.layers.scattered_embedding_lookup` for embedding with hashing.
Args:
params: A `Tensor`, `list` of `Tensors`, or `PartitionedVariable`.
Each tensor must be of rank 1 with fully-defined shape.
sparse_values: A 2-D `SparseTensor` containing the values to be embedded.
Some rows may be empty.
dimension: Embedding dimension
combiner: A string specifying how to combine embedding results for each
entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean"
the default.
default_value: The value to use for an entry with no features.
name: An optional name for this op.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp
(optional).
Returns:
Dense tensor with shape [N, dimension] with N the number of rows in
sparse_values.
Raises:
TypeError: If sparse_values is not a SparseTensor.
ValueError: If combiner is not one of {"mean", "sqrtn", "sum"}.
"""
if combiner is None:
logging.warn("The default value of combiner will change from \"mean\" "
"to \"sqrtn\" after 2016/11/01.")
combiner = "mean"
if isinstance(params, variables.PartitionedVariable):
params = list(params)
if not isinstance(params, list):
params = [params]
if not isinstance(sparse_values, sparse_tensor.SparseTensor):
raise TypeError("sparse_values must be SparseTensor")
with ops.name_scope(name, "scattered_embedding_lookup_sparse",
params + [sparse_values]) as scope:
# Fill in the empty rows.
if default_value is None:
# Random default values to reduce the risk of collision.
if sparse_values.dtype == dtypes.string:
default_value = "6ZxWzWOHxZ"
else:
default_value = 1288896567
sparse_values, _ = sparse_ops.sparse_fill_empty_rows(
sparse_values, default_value)
segment_ids = sparse_values.indices[:, 0]
if segment_ids.dtype != dtypes.int32:
segment_ids = math_ops.cast(segment_ids, dtypes.int32)
values = sparse_values.values
values, idx = array_ops.unique(values)
embeddings = scattered_embedding_lookup(
params, values, dimension, hash_key=hash_key)
if combiner == "sum":
embeddings = math_ops.sparse_segment_sum(embeddings, idx, segment_ids,
name=scope)
elif combiner == "mean":
embeddings = math_ops.sparse_segment_mean(embeddings, idx, segment_ids,
name=scope)
elif combiner == "sqrtn":
embeddings = math_ops.sparse_segment_sqrt_n(embeddings, idx, segment_ids,
name=scope)
else:
raise ValueError("Combiner must be one of 'mean', 'sqrtn' or 'sum'.")
return embeddings
def embedding_lookup_unique(params, ids, partition_strategy="mod", name=None):
"""Version of embedding_lookup that avoids duplicate lookups.
This can save communication in the case of repeated ids.
Same interface as embedding_lookup. Except it supports multi-dimensional `ids`
which allows to not reshape input/output to fit gather.
Args:
params: A list of tensors with the same shape and type, or a
`PartitionedVariable`. Shape `[index, d1, d2, ...]`.
ids: A one-dimensional `Tensor` with type `int32` or `int64` containing
the ids to be looked up in `params`. Shape `[ids1, ids2, ...]`.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(params) > 1`. Currently `"div"` and `"mod"` are supported. Default
is `"mod"`.
name: A name for this operation (optional).
Returns:
A `Tensor` with the same type as the tensors in `params` and dimension of
`[ids1, ids2, d1, d2, ...]`.
Raises:
ValueError: If `params` is empty.
"""
with ops.name_scope(name, "EmbeddingLookupUnique", [params, ids]):
ids = ops.convert_to_tensor(ids)
shape = array_ops.shape(ids)
ids_flat = array_ops.reshape(
ids, math_ops.reduce_prod(shape, keepdims=True))
unique_ids, idx = array_ops.unique(ids_flat)
unique_embeddings = embedding_ops.embedding_lookup(params, unique_ids,
partition_strategy)
embeds_flat = array_ops.gather(unique_embeddings, idx)
embed_shape = array_ops.concat(
[shape, array_ops.shape(unique_embeddings)[1:]], 0)
embeds = array_ops.reshape(embeds_flat, embed_shape)
embeds.set_shape(ids.get_shape().concatenate(
unique_embeddings.get_shape()[1:]))
return embeds
def _sampled_scattered_embedding_lookup_sparse(params,
sp_values,
dimension=None,
sampled_candidates=None,
hash_key=None,
with_sign_hash=False,
name=None):
"""Looks up embeddings using parameter hashing for sparse values.
This method looks up selected embedding dimensions if `sampled_candidates` is
given, otherwise looks up all dimensions.
The i-th embedding component of a value v in `values` is found by retrieving
the weight whose index is a fingerprint of the pair (v,i).
The concept is explored as "feature hashing" for model compression in this
paper: http://arxiv.org/pdf/1504.04788.pdf
This is logically equivalent to:
* Transforming `sp_values` (which has shape `[d0, d1]`) into a one-hot
`Tensor` of shape `[d0, N]`.
* Multiplying with a `Tensor` `h` of shape `[N, dimension]`, where
`h(i, j) = params[hash(i, j)]`.
Args:
params: A float `Tensor` with rank 1 and fully-defined shape.
sp_values: A 2D `SparseTensor` to be embedded with shape `[d0, d1]`.
dimension: An int `Tensor` of the final dimension. The user needs to provide
either `dimension` or `sampled_candidates`.
sampled_candidates: An optional `Tensor` of column indices to keep along
the final dimension with shape `[d0, N]`. If given, `dimension` is
ignored. If `None`, looks up all candidates.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp
(optional).
with_sign_hash: A `bool` indicating whether `h(i, j)` should be multiplied
by `+1` or `-1`, where the value selected is determined by hashing
`(i, j)`. This is often necessary to remove bias resulting from hash
collisions.
name: An optional name for this op.
Returns:
A `Tensor` of shape `[d0, dimension]`.
If `sampled_candidates` is given, the output shape is `[d0, N]`.
Raises:
TypeError: If sp_values is not `SparseTensor`.
ValueError: If both `dimension` and `sampled_candidates` are `None`.
"""
if not isinstance(sp_values, sparse_tensor.SparseTensor):
raise TypeError("sp_values must be SparseTensor")
with ops.name_scope(
name=name,
default_name="sampled_scattered_embedding_lookup_sparse",
values=[sp_values, params, dimension, sampled_candidates]) as name_scope:
segment_ids = sp_values.indices[:, 0]
if sampled_candidates is not None:
# Tile sampled_candidates so there is one line corresponding to each
# element in sp_values.values
sampled_candidates = array_ops.gather(sampled_candidates, segment_ids)
embeddings = _sampled_scattered_embedding_lookup(
params, sp_values.values, dimension=dimension,
sampled_candidates=sampled_candidates,
hash_key=hash_key, name="values_lookup")
if with_sign_hash:
signs = _sampled_scattered_embedding_lookup(
array_ops.constant([-1., 1.]), sp_values.values, dimension=dimension,
sampled_candidates=sampled_candidates, hash_key=hash_key,
name="signs_lookup")
embeddings = math_ops.multiply(signs, embeddings, name="signs_hash")
if segment_ids.dtype != dtypes.int32:
segment_ids = math_ops.cast(segment_ids, dtypes.int32)
num_segments = array_ops.shape(sp_values)[0]
return math_ops.unsorted_segment_sum(embeddings, segment_ids,
num_segments=num_segments,
name=name_scope)
def embedding_lookup_sparse_with_distributed_aggregation(
params,
sp_ids,
sp_weights,
partition_strategy="mod",
name=None,
combiner=None,
max_norm=None):
"""Computes embeddings for the given ids and weights.
Embeddings belonging to same param are aggregated on that device first. This
op is intended to decrease data transmission and improve parallelism. See
`tf.nn.embedding_lookup_sparse` for the functionality and example of this op.
Args:
params: A single tensor representing the complete embedding tensor,
or a list of P tensors all of same shape except for the first dimension,
representing sharded embedding tensors. Alternatively, a
`PartitionedVariable`, created by partitioning along dimension 0. Each
element must be appropriately sized for the given `partition_strategy`.
sp_ids: N x M SparseTensor of int64 ids (typically from FeatureValueToId),
where N is typically batch size and M is arbitrary.
sp_weights: either a SparseTensor of float / double weights, or None to
indicate all weights should be taken to be 1. If specified, sp_weights
must have exactly the same shape and indices as sp_ids.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(params) > 1`. Currently `"div"` and `"mod"` are supported. Default
is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: Optional name for the op.
combiner: A string specifying the reduction op. Currently "mean", "sqrtn"
and "sum" are supported.
"sum" computes the weighted sum of the embedding results for each row.
"mean" is the weighted sum divided by the total weight.
"sqrtn" is the weighted sum divided by the square root of the sum of the
squares of the weights.
max_norm: If not None, each embedding is normalized to have l2 norm equal
to max_norm before combining.
Returns:
A dense tensor representing the combined embeddings for the
sparse ids. For each row in the dense tensor represented by sp_ids, the op
looks up the embeddings for all ids in that row, multiplies them by the
corresponding weight, and combines these embeddings as specified.
Raises:
TypeError: If sp_ids is not a SparseTensor, or if sp_weights is neither
None nor SparseTensor.
ValueError: If combiner is not one of {"mean", "sqrtn", "sum"}.
"""
if combiner is None:
logging.warn("The default value of combiner will change from \"mean\" "
"to \"sqrtn\" after 2016/11/01.")
combiner = "mean"
if combiner not in ("mean", "sqrtn", "sum"):
raise ValueError("combiner must be one of 'mean', 'sqrtn' or 'sum'")
if isinstance(params, variables.PartitionedVariable):
params = list(params) # Iterate to get the underlying Variables.
if not isinstance(params, list):
params = [params]
if not isinstance(sp_ids, sparse_tensor.SparseTensor):
raise TypeError("sp_ids must be SparseTensor")
ignore_weights = sp_weights is None
if not ignore_weights:
if not isinstance(sp_weights, sparse_tensor.SparseTensor):
raise TypeError("sp_weights must be either None or SparseTensor")
sp_ids.values.get_shape().assert_is_compatible_with(
sp_weights.values.get_shape())
sp_ids.indices.get_shape().assert_is_compatible_with(
sp_weights.indices.get_shape())
sp_ids.dense_shape.get_shape().assert_is_compatible_with(
sp_weights.dense_shape.get_shape())
# TODO(yleon): Add enhanced node assertions to verify that sp_ids and
# sp_weights have equal indices and shapes.
with ops.name_scope(name, "embedding_lookup_sparse",
params + [sp_ids]) as name:
segment_ids = sp_ids.indices[:, 0]
if segment_ids.dtype != dtypes.int32:
segment_ids = math_ops.cast(segment_ids, dtypes.int32)
ids = sp_ids.values
if ignore_weights:
ids, idx = array_ops.unique(ids)
else:
idx = None
weights = None if ignore_weights else sp_weights.values
embeddings = _embedding_lookup_with_distributed_aggregation(
params,
ids,
partition_strategy=partition_strategy,
max_norm=max_norm,
weights=weights,
idx=idx,
segment_ids=segment_ids)
# Set weights to all one if ignore weights.
if ignore_weights:
weights = array_ops.fill([array_ops.shape(segment_ids)[0]], 1)
if weights.dtype != embeddings.dtype:
weights = math_ops.cast(weights, embeddings.dtype)
# Reshape weights.
ones = array_ops.fill(
array_ops.expand_dims(array_ops.rank(embeddings) - 1, 0), 1)
bcast_weights_shape = array_ops.concat([array_ops.shape(weights), ones], 0)
orig_weights_shape = weights.get_shape()
weights = array_ops.reshape(weights, bcast_weights_shape)
if embeddings.get_shape().ndims is not None:
weights.set_shape(
orig_weights_shape.concatenate(
[1 for _ in range(embeddings.get_shape().ndims - 1)]))
if combiner == "mean":
weight_sum = math_ops.segment_sum(weights, segment_ids)
embeddings = math_ops.div(embeddings, weight_sum)
elif combiner == "sqrtn":
weights_squared = math_ops.pow(weights, 2)
weight_sum = math_ops.segment_sum(weights_squared, segment_ids)
weight_sum_sqrt = math_ops.sqrt(weight_sum)
embeddings = math_ops.div(embeddings, weight_sum_sqrt)
elif combiner != "sum":
assert False, "Unrecognized combiner"
return embeddings
def _do_gather(params, ids, name=None):
"""Deals with doing gather differently for resource variables."""
if isinstance(params, resource_variable_ops.ResourceVariable):
return params.sparse_read(ids, name=name)
return array_ops.gather(params, ids, name=name)
def _embedding_lookup_with_distributed_aggregation(params,
ids,
partition_strategy="mod",
name=None,
max_norm=None,
weights=None,
idx=None,
segment_ids=None):
"""Lookup helper for embedding_lookup_sparse_with_distributed_aggregation."""
if params is None or params == []: # pylint: disable=g-explicit-bool-comparison
raise ValueError("Need at least one param")
if isinstance(params, variables.PartitionedVariable):
params = list(params) # Iterate to get the underlying Variables.
if not isinstance(params, list):
params = [params]
def maybe_normalize(x):
if max_norm is not None:
if x.get_shape().ndims is not None:
ndims = x.get_shape().ndims
else:
ndims = array_ops.size(array_ops.shape(x))
return clip_ops.clip_by_norm(x, max_norm, axes=list(range(1, ndims)))
return x
with ops.name_scope(name, "embedding_lookup_with_distributed_aggregation",
params + [ids]) as name:
np = len(params) # Number of partitions
# Preserve the resource variable status to avoid accidental dense reads.
if not any(
isinstance(p, resource_variable_ops.ResourceVariable) for p in params):
params = ops.convert_n_to_tensor_or_indexed_slices(params, name="params")
if np == 1:
with ops.colocate_with(params[0]):
ret = maybe_normalize(_do_gather(params[0], ids))
ignore_weights = weights is None
if not ignore_weights:
if weights.dtype != ret.dtype:
weights = math_ops.cast(weights, ret.dtype)
# Reshape to allow broadcast
ones = array_ops.fill(
array_ops.expand_dims(array_ops.rank(ret) - 1, 0), 1)
bcast_weights_shape = array_ops.concat(
[array_ops.shape(weights), ones], 0)
orig_weights_shape = weights.get_shape()
weights = array_ops.reshape(weights, bcast_weights_shape)
# Set weights shape after reshape
if ret.get_shape().ndims is not None:
weights.set_shape(
orig_weights_shape.concatenate(
[1 for _ in range(ret.get_shape().ndims - 1)]))
ret *= weights
return math_ops.segment_sum(ret, segment_ids, name=name)
else:
return math_ops.sparse_segment_sum(ret, idx, segment_ids, name=name)
else:
ids = ops.convert_to_tensor(ids, name="ids")
flat_ids = array_ops.reshape(ids, [-1])
original_indices = math_ops.range(array_ops.size(flat_ids))
# Create p_assignments and set new_ids depending on the strategy.
if partition_strategy == "mod":
p_assignments = flat_ids % np
new_ids = flat_ids // np
elif partition_strategy == "div":
# Compute num_total_ids as the sum of dim-0 of params, then assign to
# partitions based on a constant number of ids per partition. Optimize
# if we already know the full shape statically.
dim_0_size = params[0].get_shape().dims[0]
for p in xrange(1, np):
dim_0_size += params[p].get_shape().dims[0]
if dim_0_size.value:
num_total_ids = constant_op.constant(dim_0_size, flat_ids.dtype)
else:
dim_0_sizes = []
for p in xrange(np):
if params[p].get_shape().dims[0].value is not None:
dim_0_sizes.append(params[p].get_shape().dims[0].value)
else:
with ops.colocate_with(params[p]):
dim_0_sizes.append(array_ops.shape(params[p])[0])
num_total_ids = math_ops.reduce_sum(
math_ops.cast(array_ops.stack(dim_0_sizes), flat_ids.dtype))
ids_per_partition = num_total_ids // np
extras = num_total_ids % np
p_assignments = math_ops.maximum(flat_ids // (ids_per_partition + 1), (
flat_ids - extras) // ids_per_partition)
# Emulate a conditional using a boolean indicator tensor
is_in_first_extras_partitions = math_ops.cast(p_assignments < extras,
flat_ids.dtype)
new_ids = (is_in_first_extras_partitions * (flat_ids %
(ids_per_partition + 1)) +
(1 - is_in_first_extras_partitions) * (
(flat_ids - extras) % ids_per_partition))
else:
raise ValueError("Unrecognized partition strategy: " +
partition_strategy)
# Cast partition assignments to int32 for use in dynamic_partition.
# There really should not be more than 2^32 partitions.
p_assignments = math_ops.cast(p_assignments, dtypes.int32)
# Partition list of ids based on assignments into np separate lists
gather_ids = data_flow_ops.dynamic_partition(new_ids, p_assignments, np)
# Similarly, partition the original indices.
pindices = data_flow_ops.dynamic_partition(original_indices,
p_assignments, np)
# Do np separate lookups, finding embeddings for plist[p] in params[p]
partitioned_result = []
for p in xrange(np):
with ops.colocate_with(params[p]):
partitioned_result.append(_do_gather(params[p], gather_ids[p]))
ignore_weights = weights is None
if not ignore_weights:
# Partition weights according to pindices.
partitioned_weight = []
for p in xrange(np):
partitioned_weight.append(array_ops.gather(weights, pindices[p]))
# Reshape each partition result.
element_shape = params[0].get_shape()[1:]
for p in params[1:]:
element_shape = element_shape.merge_with(p.get_shape()[1:])
if element_shape.is_fully_defined():
for p in xrange(np):
with ops.colocate_with(params[p]):
partitioned_result[p] = array_ops.reshape(
partitioned_result[p],
array_ops.concat([array_ops.shape(pindices[p]), element_shape],
0))
else:
with ops.colocate_with(params[0]):
params_shape = array_ops.shape(params[0])
for p in xrange(np):
with ops.colocate_with(params[p]):
partitioned_result[p] = array_ops.reshape(
partitioned_result[p],
array_ops.concat([
array_ops.shape(pindices[p]), array_ops.slice(
params_shape, [1], [-1])
], 0))
# Normalize each partition result.
for p in xrange(np):
with ops.colocate_with(params[p]):
partitioned_result[p] = maybe_normalize(partitioned_result[p])
if not ignore_weights:
# Multiply each partition result with partition weights.
for p in xrange(np):
with ops.colocate_with(params[p]):
if partitioned_weight[p].dtype != partitioned_result[p].dtype:
partitioned_weight[p] = math_ops.cast(partitioned_weight[p],
partitioned_result[p].dtype)
# Reshape partition weights.
ones = array_ops.fill(
array_ops.expand_dims(
array_ops.rank(partitioned_result[p]) - 1, 0), 1)
bcast_weights_shape = array_ops.concat(
[array_ops.shape(partitioned_weight[p]), ones], 0)
orig_weights_shape = partitioned_weight[p].get_shape()
partitioned_weight[p] = array_ops.reshape(partitioned_weight[p],
bcast_weights_shape)
if partitioned_result[p].get_shape().ndims is not None:
partitioned_weight[p].set_shape(
orig_weights_shape.concatenate([
1
for _ in range(partitioned_result[p].get_shape().ndims -
1)
]))
partitioned_result[p] *= partitioned_weight[p]
partitioned_segment_ids = []
for p in xrange(np):
if not ignore_weights:
# Partition segment_ids according to pindices.
p_segment_ids = array_ops.gather(segment_ids, pindices[p])
# Number the p_segment_ids to meet segment_sum's requirements. Note
# that unique_p_segment_ids contains unique segment ids of this
# partition and these ids' order is unchanged.
unique_p_segment_ids, unique_p_segment_idx = array_ops.unique(
p_segment_ids)
partitioned_segment_ids.append(unique_p_segment_ids)
# segment_sum this partition's result.
with ops.colocate_with(params[p]):
partitioned_result[p] = math_ops.segment_sum(
partitioned_result[p], unique_p_segment_idx)
else:
# When ignore weights, we need to get indexs of elements in idx and
# segment_ids.
_, exclude_idx = array_ops.setdiff1d(idx, pindices[p])
all_idx = math_ops.range(array_ops.shape(idx)[0])
_, include_idx = array_ops.setdiff1d(all_idx, exclude_idx)
# Gather segment_ids and idx according to indexs.
p_segment_ids = array_ops.gather(segment_ids, include_idx)
p_idx = array_ops.gather(idx, include_idx)
# Number the p_segment_ids, same as ignore_weights case above.
unique_p_segment_ids, unique_p_segment_idx = array_ops.unique(
p_segment_ids)
_, unique_p_idx_idx = array_ops.unique(p_idx)
partitioned_segment_ids.append(unique_p_segment_ids)
with ops.colocate_with(params[p]):
partitioned_result[p] = math_ops.sparse_segment_sum(
partitioned_result[p], unique_p_idx_idx, unique_p_segment_idx)
# Concat each partition's segment_ids and result for final segment_sum.
concat_segment_ids = array_ops.concat(partitioned_segment_ids, 0)
concat_partitioned_result = array_ops.concat(partitioned_result, 0)
return math_ops.unsorted_segment_sum(
concat_partitioned_result,
concat_segment_ids,
math_ops.reduce_max(concat_segment_ids) + 1,
name=name)
| apache-2.0 |
Kazade/NeHe-Website | google_appengine/lib/django-1.5/django/core/management/templates.py | 102 | 12715 | import cgi
import errno
import mimetypes
import os
import posixpath
import re
import shutil
import stat
import sys
import tempfile
try:
from urllib.request import urlretrieve
except ImportError: # Python 2
from urllib import urlretrieve
from optparse import make_option
from os import path
import django
from django.template import Template, Context
from django.utils import archive
from django.utils._os import rmtree_errorhandler
from django.core.management.base import BaseCommand, CommandError
from django.core.management.commands.makemessages import handle_extensions
_drive_re = re.compile('^([a-z]):', re.I)
_url_drive_re = re.compile('^([a-z])[:|]', re.I)
class TemplateCommand(BaseCommand):
"""
Copies either a Django application layout template or a Django project
layout template into the specified directory.
:param style: A color style object (see django.core.management.color).
:param app_or_project: The string 'app' or 'project'.
:param name: The name of the application or project.
:param directory: The directory to which the template should be copied.
:param options: The additional variables passed to project or app templates
"""
args = "[name] [optional destination directory]"
option_list = BaseCommand.option_list + (
make_option('--template',
action='store', dest='template',
help='The dotted import path to load the template from.'),
make_option('--extension', '-e', dest='extensions',
action='append', default=['py'],
help='The file extension(s) to render (default: "py"). '
'Separate multiple extensions with commas, or use '
'-e multiple times.'),
make_option('--name', '-n', dest='files',
action='append', default=[],
help='The file name(s) to render. '
'Separate multiple extensions with commas, or use '
'-n multiple times.')
)
requires_model_validation = False
# Can't import settings during this command, because they haven't
# necessarily been created.
can_import_settings = False
# The supported URL schemes
url_schemes = ['http', 'https', 'ftp']
def handle(self, app_or_project, name, target=None, **options):
self.app_or_project = app_or_project
self.paths_to_remove = []
self.verbosity = int(options.get('verbosity'))
# If it's not a valid directory name.
if not re.search(r'^[_a-zA-Z]\w*$', name):
# Provide a smart error message, depending on the error.
if not re.search(r'^[_a-zA-Z]', name):
message = ('make sure the name begins '
'with a letter or underscore')
else:
message = 'use only numbers, letters and underscores'
raise CommandError("%r is not a valid %s name. Please %s." %
(name, app_or_project, message))
# if some directory is given, make sure it's nicely expanded
if target is None:
top_dir = path.join(os.getcwd(), name)
try:
os.makedirs(top_dir)
except OSError as e:
if e.errno == errno.EEXIST:
message = "'%s' already exists" % top_dir
else:
message = e
raise CommandError(message)
else:
top_dir = os.path.abspath(path.expanduser(target))
if not os.path.exists(top_dir):
raise CommandError("Destination directory '%s' does not "
"exist, please create it first." % top_dir)
extensions = tuple(
handle_extensions(options.get('extensions'), ignored=()))
extra_files = []
for file in options.get('files'):
extra_files.extend(map(lambda x: x.strip(), file.split(',')))
if self.verbosity >= 2:
self.stdout.write("Rendering %s template files with "
"extensions: %s\n" %
(app_or_project, ', '.join(extensions)))
self.stdout.write("Rendering %s template files with "
"filenames: %s\n" %
(app_or_project, ', '.join(extra_files)))
base_name = '%s_name' % app_or_project
base_subdir = '%s_template' % app_or_project
base_directory = '%s_directory' % app_or_project
context = Context(dict(options, **{
base_name: name,
base_directory: top_dir,
}), autoescape=False)
# Setup a stub settings environment for template rendering
from django.conf import settings
if not settings.configured:
settings.configure()
template_dir = self.handle_template(options.get('template'),
base_subdir)
prefix_length = len(template_dir) + 1
for root, dirs, files in os.walk(template_dir):
path_rest = root[prefix_length:]
relative_dir = path_rest.replace(base_name, name)
if relative_dir:
target_dir = path.join(top_dir, relative_dir)
if not path.exists(target_dir):
os.mkdir(target_dir)
for dirname in dirs[:]:
if dirname.startswith('.') or dirname == '__pycache__':
dirs.remove(dirname)
for filename in files:
if filename.endswith(('.pyo', '.pyc', '.py.class')):
# Ignore some files as they cause various breakages.
continue
old_path = path.join(root, filename)
new_path = path.join(top_dir, relative_dir,
filename.replace(base_name, name))
if path.exists(new_path):
raise CommandError("%s already exists, overlaying a "
"project or app into an existing "
"directory won't replace conflicting "
"files" % new_path)
# Only render the Python files, as we don't want to
# accidentally render Django templates files
with open(old_path, 'rb') as template_file:
content = template_file.read()
if filename.endswith(extensions) or filename in extra_files:
content = content.decode('utf-8')
template = Template(content)
content = template.render(context)
content = content.encode('utf-8')
with open(new_path, 'wb') as new_file:
new_file.write(content)
if self.verbosity >= 2:
self.stdout.write("Creating %s\n" % new_path)
try:
shutil.copymode(old_path, new_path)
self.make_writeable(new_path)
except OSError:
self.stderr.write(
"Notice: Couldn't set permission bits on %s. You're "
"probably using an uncommon filesystem setup. No "
"problem." % new_path, self.style.NOTICE)
if self.paths_to_remove:
if self.verbosity >= 2:
self.stdout.write("Cleaning up temporary files.\n")
for path_to_remove in self.paths_to_remove:
if path.isfile(path_to_remove):
os.remove(path_to_remove)
else:
shutil.rmtree(path_to_remove,
onerror=rmtree_errorhandler)
def handle_template(self, template, subdir):
"""
Determines where the app or project templates are.
Use django.__path__[0] as the default because we don't
know into which directory Django has been installed.
"""
if template is None:
return path.join(django.__path__[0], 'conf', subdir)
else:
if template.startswith('file://'):
template = template[7:]
expanded_template = path.expanduser(template)
expanded_template = path.normpath(expanded_template)
if path.isdir(expanded_template):
return expanded_template
if self.is_url(template):
# downloads the file and returns the path
absolute_path = self.download(template)
else:
absolute_path = path.abspath(expanded_template)
if path.exists(absolute_path):
return self.extract(absolute_path)
raise CommandError("couldn't handle %s template %s." %
(self.app_or_project, template))
def download(self, url):
"""
Downloads the given URL and returns the file name.
"""
def cleanup_url(url):
tmp = url.rstrip('/')
filename = tmp.split('/')[-1]
if url.endswith('/'):
display_url = tmp + '/'
else:
display_url = url
return filename, display_url
prefix = 'django_%s_template_' % self.app_or_project
tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_download')
self.paths_to_remove.append(tempdir)
filename, display_url = cleanup_url(url)
if self.verbosity >= 2:
self.stdout.write("Downloading %s\n" % display_url)
try:
the_path, info = urlretrieve(url, path.join(tempdir, filename))
except IOError as e:
raise CommandError("couldn't download URL %s to %s: %s" %
(url, filename, e))
used_name = the_path.split('/')[-1]
# Trying to get better name from response headers
content_disposition = info.get('content-disposition')
if content_disposition:
_, params = cgi.parse_header(content_disposition)
guessed_filename = params.get('filename') or used_name
else:
guessed_filename = used_name
# Falling back to content type guessing
ext = self.splitext(guessed_filename)[1]
content_type = info.get('content-type')
if not ext and content_type:
ext = mimetypes.guess_extension(content_type)
if ext:
guessed_filename += ext
# Move the temporary file to a filename that has better
# chances of being recognnized by the archive utils
if used_name != guessed_filename:
guessed_path = path.join(tempdir, guessed_filename)
shutil.move(the_path, guessed_path)
return guessed_path
# Giving up
return the_path
def splitext(self, the_path):
"""
Like os.path.splitext, but takes off .tar, too
"""
base, ext = posixpath.splitext(the_path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def extract(self, filename):
"""
Extracts the given file to a temporarily and returns
the path of the directory with the extracted content.
"""
prefix = 'django_%s_template_' % self.app_or_project
tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_extract')
self.paths_to_remove.append(tempdir)
if self.verbosity >= 2:
self.stdout.write("Extracting %s\n" % filename)
try:
archive.extract(filename, tempdir)
return tempdir
except (archive.ArchiveException, IOError) as e:
raise CommandError("couldn't extract file %s to %s: %s" %
(filename, tempdir, e))
def is_url(self, template):
"""
Returns True if the name looks like a URL
"""
if ':' not in template:
return False
scheme = template.split(':', 1)[0].lower()
return scheme in self.url_schemes
def make_writeable(self, filename):
"""
Make sure that the file is writeable.
Useful if our source is read-only.
"""
if sys.platform.startswith('java'):
# On Jython there is no os.access()
return
if not os.access(filename, os.W_OK):
st = os.stat(filename)
new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
os.chmod(filename, new_permissions)
| bsd-3-clause |
mitocw/edx-platform | lms/djangoapps/commerce/tests/__init__.py | 4 | 3085 | # -*- coding: utf-8 -*-
""" Commerce app tests package. """
import httpretty
import mock
from django.conf import settings
from django.test import TestCase
from freezegun import freeze_time
from openedx.core.djangoapps.commerce.utils import ecommerce_api_client
from openedx.core.djangoapps.oauth_dispatch.jwt import create_jwt_for_user
from student.tests.factories import UserFactory
JSON = 'application/json'
TEST_PUBLIC_URL_ROOT = 'http://www.example.com'
TEST_API_URL = 'http://www-internal.example.com/api'
TEST_BASKET_ID = 7
TEST_ORDER_NUMBER = '100004'
TEST_PAYMENT_DATA = {
'payment_processor_name': 'test-processor',
'payment_form_data': {},
'payment_page_url': 'http://example.com/pay',
}
class EdxRestApiClientTest(TestCase):
""" Tests to ensure the client is initialized properly. """
SCOPES = [
'user_id',
'email',
'profile'
]
def setUp(self):
super(EdxRestApiClientTest, self).setUp()
self.user = UserFactory()
@httpretty.activate
def test_tracking_context(self):
"""
Ensure the tracking context is set up in the api client correctly and
automatically.
"""
with freeze_time('2015-7-2'):
# fake an E-Commerce API request.
httpretty.register_uri(
httpretty.POST,
'{}/baskets/1/'.format(settings.ECOMMERCE_API_URL.strip('/')),
status=200, body='{}',
adding_headers={'Content-Type': JSON}
)
mock_tracker = mock.Mock()
mock_tracker.resolve_context = mock.Mock(return_value={'ip': '127.0.0.1'})
with mock.patch('openedx.core.djangoapps.commerce.utils.tracker.get_tracker', return_value=mock_tracker):
ecommerce_api_client(self.user).baskets(1).post()
# Verify the JWT includes the tracking context for the user
actual_header = httpretty.last_request().headers['Authorization']
claims = {
'tracking_context': {
'lms_user_id': self.user.id,
'lms_ip': '127.0.0.1',
}
}
expected_jwt = create_jwt_for_user(self.user, additional_claims=claims, scopes=self.SCOPES)
expected_header = u'JWT {}'.format(expected_jwt)
self.assertEqual(actual_header, expected_header)
@httpretty.activate
def test_client_unicode(self):
"""
The client should handle json responses properly when they contain
unicode character data.
Regression test for ECOM-1606.
"""
expected_content = '{"result": "Préparatoire"}'
httpretty.register_uri(
httpretty.GET,
'{}/baskets/1/order/'.format(settings.ECOMMERCE_API_URL.strip('/')),
status=200, body=expected_content,
adding_headers={'Content-Type': JSON},
)
actual_object = ecommerce_api_client(self.user).baskets(1).order.get()
self.assertEqual(actual_object, {u"result": u"Préparatoire"})
| agpl-3.0 |
dbrgn/fahrplan | fahrplan/parser.py | 1 | 8199 | # -*- coding: utf-8 -*-
from datetime import datetime, timedelta
import re
import logging
keywords = {
'de': {
'now': ['jetzt', 'sofort', 'nun'],
'noon': ['mittag'],
'midnight': ['mitternacht'],
'today': ["heute"],
'tomorrow': ["morgen"],
'at': ['um', 'am'],
'days': [r'in (\d+) tagen'],
'weekdays': ["montag", "dienstag", "mittwoch", "donnerstag", "freitag", "samstag", "sonntag"],
},
'en': {
'now': ['now', 'right now', 'immediately'],
'noon': ['noon'],
'midnight': ['midnight'],
'today': ["today"],
'tomorrow': ["tomorrow"],
'at': ['at'],
'days': [r'in (\d+) days'],
'weekdays': ["monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday"],
},
'fr': {
'now': ['maitenant'],
'noon': ['midi'],
'midnight': ['minuit'],
'today': ["aujourd'hui"],
'tomorrow': ["demain"],
'days': [r"dans (\d+) jours"],
'at': [], # TODO: "à" clashes with top level keywords
'weekdays': ["lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi", "dimanche"],
},
}
def _process_tokens(tokens, sloppy_validation=False):
"""Parse input tokens.
Take a list of tokens (usually ``sys.argv[1:]``) and parse the "human
readable" input into a format suitable for machines.
Args:
tokens: List of tokens (usually ``sys.argv[1:]``.
sloppy_validation: Set to True to enable less strict validation. Used
mainly for testing, default False.
Returns:
A 2-tuple containing the unmapped data dictionary and the language
string. For example:
({'to': 'bern', 'from': 'zürich', 'departure': '18:00'}, 'de')
Raises:
ValueError: If "from" or "to" arguments are missing or if both
departure *and* arrival time are specified (as long as
sloppy_validation is disabled).
"""
if len(tokens) < 2:
return {}, None
keyword_dicts = {
'en': {'from': 'from', 'to': 'to', 'via': 'via',
'departure': 'departure', 'arrival': 'arrival'},
'de': {'from': 'von', 'to': 'nach', 'via': 'via',
'departure': 'ab', 'arrival': 'an'},
'fr': {'from': 'de', 'to': 'à', 'via': 'via',
'departure': 'départ', 'arrival': 'arrivée'},
}
# Detect language
language = _detect_language(keyword_dicts, tokens)
logging.info('Detected [%s] input' % language)
# Keywords mapping
keywords = dict((v, k) for k, v in keyword_dicts[language].items())
logging.debug('Using keywords: ' + ', '.join(keywords.keys()))
# Prepare variables
data = {}
stack = []
def process_stack():
"""Process the stack. First item is the key, rest is value."""
key = keywords.get(stack[0])
value = ' '.join(stack[1:])
data[key] = value
stack[:] = []
# Process tokens
for token in tokens:
if token in keywords.keys():
if stack:
process_stack()
elif not stack:
continue
stack.append(token)
if not stack:
return {}, None
process_stack()
# Validate data
if not sloppy_validation:
if not ('from' in data and 'to' in data):
raise ValueError('"from" and "to" arguments must be present!')
if 'departure' in data and 'arrival' in data:
raise ValueError('You can\'t specify both departure *and* arrival time.')
return data, language
def _detect_language(keyword_dicts, tokens):
"""Detect the language of the tokens by finding the highest intersection
with the keywords of a specific language."""
def intersection_count(a, b):
return len(set(a).intersection(b))
counts = []
for lang, keywords in keyword_dicts.items():
count = intersection_count(keywords.values(), tokens)
counts.append((lang, count))
language = max(counts, key=lambda x: x[1])[0]
return language
def _parse_date(datestring, keywords):
"""Parse date tokens.
Args:
datestring: String containing a date specification.
keywords: Language keywords
Returns:
date string.
Raises:
ValueError: If time could not be parsed.
"""
date = None
days_shift = None
# Keywords
for i, d in enumerate(["today", "tomorrow"]):
if any([t in datestring for t in keywords[d]]):
days_shift = i
# Weekdays
for i, d in enumerate(keywords["weekdays"]):
if d in datestring:
days_shift = i - datetime.now().weekday()
if days_shift <= 0:
days_shift += 7
# Shifts
if days_shift is None:
for pattern in keywords["days"]:
days_re = re.search(pattern, datestring)
if days_re:
try:
days_shift = int(days_re.group(1))
except:
pass
if days_shift is not None:
return datetime.now() + timedelta(days=days_shift)
# Regular date strings
for dateformat in [[r"(\d{2}/\d{2}/\d{4})", "%d/%m/%Y"], [r"(\d{2}/\d{2})", "%d/%m"]]:
days_re = re.search(dateformat[0], datestring)
if days_re:
try:
date = datetime.strptime(days_re.group(1), dateformat[1])
except:
continue
if date.year == 1900:
date = date.replace(year=datetime.now().year)
break
if date is not None:
return date.strftime("%Y/%m/%d")
return None
def _parse_time(timestring, keywords):
"""Parse time tokens.
Args:
timestring: String containing a time specification.
keywords: Language keywords
Returns:
Time string.
Raises:
ValueError: If time could not be parsed.
"""
# Ignore "at" keywords
if timestring.split(' ', 1)[0] in keywords['at']:
timestring = timestring.split(' ', 1)[1]
# Parse regular time strings
# regular_time_match = re.search(r'([0-2]?[0-9])[:\-\. ]([0-9]{2})', timestring)
regular_time_match = re.search(r'(?<!/)(\d{2})(?::*)(\d{2})', timestring)
if regular_time_match:
return ':'.join(regular_time_match.groups())
timestring = timestring.lower()
if timestring in keywords['now']:
return datetime.now().strftime('%H:%M')
if timestring in keywords['noon']:
return '12:00'
if timestring in keywords['midnight']:
return '23:59' # '00:00' would be the first minute of the day, not the last one.
raise ValueError('Time is missing or could not be parsed')
def parse_input(tokens):
"""Parse input tokens.
Take a list of tokens (usually ``sys.argv[1:]``) and parse the "human
readable" input into a format suitable for machines. The output format
matches the format required by the Transport API.
Args:
tokens: List of tokens (usually ``sys.argv[1:]``.
Returns:
A 2-tuple containing the data dictionary and the language string. For
example:
({'to': 'bern', 'from': 'zürich'}, 'de')
Raises:
ValueError: If "from" or "to" arguments are missing or if both
departure *and* arrival time are specified.
"""
# Process tokens, get data dict and language
data, language = _process_tokens(tokens)
if data == {}:
if len(tokens) == 2:
# Fallback when e.g. doing "fahrplan zurich basel"
return {'from': tokens[0], 'to': tokens[1]}, 'en'
return data, language
try:
kws = keywords[language]
except IndexError:
raise ValueError('Invalid language: "%s"!' % language)
# Map keys
for t in ["departure", "arrival"]:
if t in data:
data["time"] = _parse_time(data[t], kws)
date = _parse_date(data[t], kws)
if date is not None:
data["date"] = date
if t == "arrival":
data['isArrivalTime'] = 1
del data[t]
logging.debug('Data: ' + repr(data))
return data, language
| gpl-3.0 |
toucheJterm2015/touche | testing/tests/admin/SetupHeadersForbiddenTest.py | 1 | 8114 | # -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, time, re
class SetupHeadersForbiddenTest(unittest.TestCase):
def __init__(self, url):
self.setUp(url)
def setUp(self, url):
self.driver = webdriver.PhantomJS("/usr/local/bin/phantomjs")
self.driver.implicitly_wait(30)
self.base_url = url
self.verificationErrors = []
self.accept_next_alert = True
def test_setup_headers_forbidden(self):
driver = self.driver
#should be moved to separate function, but for now when I try to do so, it breaks.
driver.get(self.base_url + "/admin/index.php")
user = driver.find_element_by_name("user")
user.clear()
user.send_keys("admin")
driver.find_element_by_name("password").clear()
driver.find_element_by_name("password").send_keys("password")
driver.find_element_by_name("submit").click()
#end of what should be the login function
#headers tests
driver.get(self.base_url + "/admin/setup_headers.php")
try: self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.col-md-5"))
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertRegexpMatches(driver.find_element_by_css_selector("div.col-md-5").text, r"^[\s\S]*C[\s][\s\S]*$")
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertRegexpMatches(driver.find_element_by_css_selector("div.col-md-5").text, r"^[\s\S]*CXX[\s\S]*$")
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertRegexpMatches(driver.find_element_by_css_selector("div.col-md-5").text, r"^[\s\S]*JAVA[\s\S]*$")
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertRegexpMatches(driver.find_element_by_css_selector("div.col-md-5").text, r"^[\s\S]*Edit[\s\S]*$")
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertRegexpMatches(driver.find_element_by_css_selector("div.col-md-6 > div.table-responsive > table.table > tbody > tr > td").text, r"^[\s\S]*Please select[\s\S]*$")
except AssertionError as e: self.verificationErrors.append(str(e))
driver.find_element_by_link_text("Edit").click()
try: self.assertNotRegexpMatches(driver.find_element_by_css_selector("div.col-md-6 > div.table-responsive > table.table > tbody > tr > td").text, r"^[\s\S]*Please select[\s\S]*$")
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertRegexpMatches(driver.find_element_by_css_selector("div.col-md-6").text, r"^[\s\S]*Editing[\s\S]*$")
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertTrue(self.is_element_present(By.NAME, "submit"))
except AssertionError as e: self.verificationErrors.append(str(e))
driver.find_element_by_name("submit").click()
try: self.assertFalse(self.is_element_present(By.NAME, "submit"))
except AssertionError as e: self.verificationErrors.append(str(e))
#forbidden tests start here and continue until the end of the def
#first, making every language have forbidden words
driver.get(self.base_url + "/admin/setup_contest.php")
if not driver.find_element_by_name("forbidden_c").is_selected():
driver.find_element_by_name("forbidden_c").click()
if not driver.find_element_by_name("forbidden_cpp").is_selected():
driver.find_element_by_name("forbidden_cpp").click()
if not driver.find_element_by_name("forbidden_java").is_selected():
driver.find_element_by_name("forbidden_java").click()
driver.find_element_by_name("submit").click()
#now all of the lists of forbidden words should be accessible.
driver.get(self.base_url + "/admin/setup_forbidden.php")
try: self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.col-md-5"))
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertRegexpMatches(driver.find_element_by_css_selector("div.col-md-5").text, r"^[\s\S]*C[\s][\s\S]*$")
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertRegexpMatches(driver.find_element_by_css_selector("div.col-md-5").text, r"^[\s\S]*CXX[\s\S]*$")
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertRegexpMatches(driver.find_element_by_css_selector("div.col-md-5").text, r"^[\s\S]*JAVA[\s\S]*$")
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertRegexpMatches(driver.find_element_by_css_selector("div.col-md-5").text, r"^[\s\S]*Edit[\s\S]*$")
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertRegexpMatches(driver.find_element_by_css_selector("div.col-md-6 > div.table-responsive > table.table > tbody > tr > td").text, r"^[\s\S]*Please select[\s\S]*$")
except AssertionError as e: self.verificationErrors.append(str(e))
driver.find_element_by_link_text("Edit").click()
try: self.assertNotRegexpMatches(driver.find_element_by_css_selector("div.col-md-6 > div.table-responsive > table.table > tbody > tr > td").text, r"^[\s\S]*Please select[\s\S]*$")
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertRegexpMatches(driver.find_element_by_css_selector("div.col-md-6").text, r"^[\s\S]*Editing[\s\S]*$")
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertTrue(self.is_element_present(By.NAME, "submit"))
except AssertionError as e: self.verificationErrors.append(str(e))
driver.find_element_by_name("submit").click()
try: self.assertFalse(self.is_element_present(By.NAME, "submit"))
except AssertionError as e: self.verificationErrors.append(str(e))
#turn off the java forbidden words (the most common to omit)
driver.get(self.base_url + "/admin/setup_contest.php")
if driver.find_element_by_name("forbidden_java").is_selected():
driver.find_element_by_name("forbidden_java").click()
driver.find_element_by_name("submit").click()
#now make sure that java is not listed as an option
driver.get(self.base_url + "/admin/setup_forbidden.php")
try: self.assertRegexpMatches(driver.find_element_by_css_selector("div.col-md-5").text, r"^[\s\S]*CXX[\s\S]*$")
except AssertionError as e: self.verificationErrors.append(str(e))#C++ should still be represented there, but Java should not.
try: self.assertNotRegexpMatches(driver.find_element_by_css_selector("div.col-md-5").text, r"^[\s\S]*JAVA[\s\S]*$")
except AssertionError as e: self.verificationErrors.append(str(e))
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException, e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
| gpl-2.0 |
nick-huang-cc/GraffitiSpaceTT | UnderstandStudyPython/virtualenv_stu.py | 1 | 1524 | #!/usr/bin/env python3
# -*- coding:UTF-8 -*-
#Copyright (c) 1986 Nick Wong.
#Copyright (c) 2016-2026 TP-NEW Corp.
# License: TP-NEW (www.tp-new.com)
__author__ = "Nick Wong"
'''
virtualenv 为应用创建“隔离”环境
virtualenv为应用提供了隔离的Python运行环境,解决了不同应用间多版本的冲突问题。
python3.3以后自带venv模块支持轻量级虚拟环境,virtualenv模块仍然支持,可安装。
1.创建虚拟环境
virtualenv --no-site-packages myvenv
等价于
virtualenv myvenv (目前新版默认不使用系统环境包)
python3自带venv
python -m venv myvenv
也是默认全新干净的环境,相反可选的参数
python -m venv --system-site-packages myvenv
使虚拟环境指向系统环境包目录(非复制),在系统环境pip新安装包,在虚拟环境就可以使用。
2.激活虚拟环境
Platform Shell Command to activate virtual environment
Posix bash/zsh $ source <venv>/bin/activate
fish $ . <venv>/bin/activate.fish
csh/tcsh $ source <venv>/bin/activate.csh
Windows cmd.exe C:> <venv>\Scripts\activate.bat
PowerShell PS C:> <venv>\Scripts\Activate.ps1
3.关闭虚拟环境
<strong>deactivate</strong>
4.删除虚拟环境
删除目录即可
<strong>rd /s /q myvenv</strong> (windows cmd下)
'''
# pip3 install virtualenv
mkdir myproject
cd myproject
virtualenv --no-site-packages venv
source venv/bin/activate
deactivate #退出
| agpl-3.0 |
cyanna/edx-platform | common/test/acceptance/tests/studio/test_studio_general.py | 105 | 5669 | """
Acceptance tests for Studio.
"""
from unittest import skip
from bok_choy.web_app_test import WebAppTest
from ...pages.studio.asset_index import AssetIndexPage
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.studio.checklists import ChecklistsPage
from ...pages.studio.course_info import CourseUpdatesPage
from ...pages.studio.edit_tabs import PagesPage
from ...pages.studio.import_export import ExportCoursePage, ImportCoursePage
from ...pages.studio.howitworks import HowitworksPage
from ...pages.studio.index import DashboardPage
from ...pages.studio.login import LoginPage
from ...pages.studio.users import CourseTeamPage
from ...pages.studio.overview import CourseOutlinePage
from ...pages.studio.settings import SettingsPage
from ...pages.studio.settings_advanced import AdvancedSettingsPage
from ...pages.studio.settings_graders import GradingPage
from ...pages.studio.signup import SignupPage
from ...pages.studio.textbooks import TextbooksPage
from ...fixtures.course import XBlockFixtureDesc
from base_studio_test import StudioCourseTest
class LoggedOutTest(WebAppTest):
"""
Smoke test for pages in Studio that are visible when logged out.
"""
def setUp(self):
super(LoggedOutTest, self).setUp()
self.pages = [LoginPage(self.browser), HowitworksPage(self.browser), SignupPage(self.browser)]
def test_page_existence(self):
"""
Make sure that all the pages are accessible.
Rather than fire up the browser just to check each url,
do them all sequentially in this testcase.
"""
for page in self.pages:
page.visit()
class LoggedInPagesTest(WebAppTest):
"""
Tests that verify the pages in Studio that you can get to when logged
in and do not have a course yet.
"""
def setUp(self):
super(LoggedInPagesTest, self).setUp()
self.auth_page = AutoAuthPage(self.browser, staff=True)
self.dashboard_page = DashboardPage(self.browser)
def test_dashboard_no_courses(self):
"""
Make sure that you can get to the dashboard page without a course.
"""
self.auth_page.visit()
self.dashboard_page.visit()
class CoursePagesTest(StudioCourseTest):
"""
Tests that verify the pages in Studio that you can get to when logged
in and have a course.
"""
COURSE_ID_SEPARATOR = "."
def setUp(self):
"""
Install a course with no content using a fixture.
"""
super(CoursePagesTest, self).setUp()
self.pages = [
clz(self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run'])
for clz in [
AssetIndexPage, ChecklistsPage, CourseUpdatesPage,
PagesPage, ExportCoursePage, ImportCoursePage, CourseTeamPage, CourseOutlinePage, SettingsPage,
AdvancedSettingsPage, GradingPage, TextbooksPage
]
]
def test_page_redirect(self):
"""
/course/ is the base URL for all courses, but by itself, it should
redirect to /home/.
"""
self.dashboard_page = DashboardPage(self.browser) # pylint: disable=attribute-defined-outside-init
self.dashboard_page.visit()
self.assertEqual(self.browser.current_url.strip('/').rsplit('/')[-1], 'home')
@skip('Intermittently failing with Page not found error for Assets. TE-418')
def test_page_existence(self):
"""
Make sure that all these pages are accessible once you have a course.
Rather than fire up the browser just to check each url,
do them all sequentially in this testcase.
"""
# In the real workflow you will be at the dashboard page
# after you log in. This test was intermittently failing on the
# first (asset) page load with a 404.
# Not exactly sure why, so adding in a visit
# to the dashboard page here to replicate the usual flow.
self.dashboard_page = DashboardPage(self.browser)
self.dashboard_page.visit()
# Verify that each page is available
for page in self.pages:
page.visit()
class DiscussionPreviewTest(StudioCourseTest):
"""
Tests that Inline Discussions are rendered with a custom preview in Studio
"""
def setUp(self):
super(DiscussionPreviewTest, self).setUp()
cop = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
cop.visit()
self.unit = cop.section('Test Section').subsection('Test Subsection').expand_subsection().unit('Test Unit')
self.unit.go_to()
def populate_course_fixture(self, course_fixture):
"""
Return a test course fixture containing a discussion component.
"""
course_fixture.add_children(
XBlockFixtureDesc("chapter", "Test Section").add_children(
XBlockFixtureDesc("sequential", "Test Subsection").add_children(
XBlockFixtureDesc("vertical", "Test Unit").add_children(
XBlockFixtureDesc(
"discussion",
"Test Discussion",
)
)
)
)
)
def test_is_preview(self):
"""
Ensure that the preview version of the discussion is rendered.
"""
self.assertTrue(self.unit.q(css=".discussion-preview").present)
self.assertFalse(self.unit.q(css=".discussion-show").present)
| agpl-3.0 |
jalexvig/keras | keras/preprocessing/text.py | 61 | 5906 | # -*- coding: utf-8 -*-
'''
These preprocessing utils would greatly benefit
from a fast Cython rewrite.
'''
from __future__ import absolute_import
import string, sys
import numpy as np
from six.moves import range
from six.moves import zip
if sys.version_info < (3,):
maketrans = string.maketrans
else:
maketrans = str.maketrans
def base_filter():
f = string.punctuation
f = f.replace("'", '')
f += '\t\n'
return f
def text_to_word_sequence(text, filters=base_filter(), lower=True, split=" "):
'''prune: sequence of characters to filter out
'''
if lower:
text = text.lower()
text = text.translate(maketrans(filters, split*len(filters)))
seq = text.split(split)
return [_f for _f in seq if _f]
def one_hot(text, n, filters=base_filter(), lower=True, split=" "):
seq = text_to_word_sequence(text, filters=filters, lower=lower, split=split)
return [(abs(hash(w)) % (n - 1) + 1) for w in seq]
class Tokenizer(object):
def __init__(self, nb_words=None, filters=base_filter(), lower=True, split=" "):
self.word_counts = {}
self.word_docs = {}
self.filters = filters
self.split = split
self.lower = lower
self.nb_words = nb_words
self.document_count = 0
def fit_on_texts(self, texts):
'''
required before using texts_to_sequences or texts_to_matrix
@param texts: can be a list or a generator (for memory-efficiency)
'''
self.document_count = 0
for text in texts:
self.document_count += 1
seq = text_to_word_sequence(text, self.filters, self.lower, self.split)
for w in seq:
if w in self.word_counts:
self.word_counts[w] += 1
else:
self.word_counts[w] = 1
for w in set(seq):
if w in self.word_docs:
self.word_docs[w] += 1
else:
self.word_docs[w] = 1
wcounts = list(self.word_counts.items())
wcounts.sort(key=lambda x: x[1], reverse=True)
sorted_voc = [wc[0] for wc in wcounts]
self.word_index = dict(list(zip(sorted_voc, list(range(1, len(sorted_voc) + 1)))))
self.index_docs = {}
for w, c in list(self.word_docs.items()):
self.index_docs[self.word_index[w]] = c
def fit_on_sequences(self, sequences):
'''
required before using sequences_to_matrix
(if fit_on_texts was never called)
'''
self.document_count = len(sequences)
self.index_docs = {}
for seq in sequences:
seq = set(seq)
for i in seq:
if i not in self.index_docs:
self.index_docs[i] = 1
else:
self.index_docs[i] += 1
def texts_to_sequences(self, texts):
'''
Transform each text in texts in a sequence of integers.
Only top "nb_words" most frequent words will be taken into account.
Only words known by the tokenizer will be taken into account.
Returns a list of sequences.
'''
res = []
for vect in self.texts_to_sequences_generator(texts):
res.append(vect)
return res
def texts_to_sequences_generator(self, texts):
'''
Transform each text in texts in a sequence of integers.
Only top "nb_words" most frequent words will be taken into account.
Only words known by the tokenizer will be taken into account.
Yields individual sequences.
'''
nb_words = self.nb_words
for text in texts:
seq = text_to_word_sequence(text, self.filters, self.lower, self.split)
vect = []
for w in seq:
i = self.word_index.get(w)
if i is not None:
if nb_words and i >= nb_words:
pass
else:
vect.append(i)
yield vect
def texts_to_matrix(self, texts, mode="binary"):
'''
modes: binary, count, tfidf, freq
'''
sequences = self.texts_to_sequences(texts)
return self.sequences_to_matrix(sequences, mode=mode)
def sequences_to_matrix(self, sequences, mode="binary"):
'''
modes: binary, count, tfidf, freq
'''
if not self.nb_words:
if self.word_index:
nb_words = len(self.word_index) + 1
else:
raise Exception("Specify a dimension (nb_words argument), or fit on some text data first")
else:
nb_words = self.nb_words
if mode == "tfidf" and not self.document_count:
raise Exception("Fit the Tokenizer on some data before using tfidf mode")
X = np.zeros((len(sequences), nb_words))
for i, seq in enumerate(sequences):
if not seq:
pass
counts = {}
for j in seq:
if j >= nb_words:
pass
if j not in counts:
counts[j] = 1.
else:
counts[j] += 1
for j, c in list(counts.items()):
if mode == "count":
X[i][j] = c
elif mode == "freq":
X[i][j] = c / len(seq)
elif mode == "binary":
X[i][j] = 1
elif mode == "tfidf":
tf = np.log(c / len(seq))
df = (1 + np.log(1 + self.index_docs.get(j, 0) / (1 + self.document_count)))
X[i][j] = tf / df
else:
raise Exception("Unknown vectorization mode: " + str(mode))
return X
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.