prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
# -*- coding: utf-8 -*-.
import dxf2gmlcatastro
# carpeta de trabajo
path = '/carp | eta/archivos/'
# define archivos
dxf = | 'parcelacad.dxf'
gml = 'catastrogml.gml'
#Define variables
dxffile = path + dxf
gmlfile = path + gml
src = '25830'
# Crea GML
dxf2gmlcatastro.crea_gml(dxffile, gmlfile, src)
#Añade capa GML a QGIS
layer = iface.addVectorLayer(gmlfile, "gmlcatastro", "ogr") |
from toee import *
import char_class_utils
###################################################
def GetConditionName():
return "Loremaster"
def GetSpellCasterConditionName():
return "Loremaster Spellcasting"
def GetCategory():
return "Core 3.5 Ed Prestige Classes"
def GetClassDefinitionFlags():
return CDF_CoreClass
def GetClassHelpTopic():
return "TAG_LOREMASTERS"
classEnum = stat_level_loremaster
###################################################
class_feats = {
}
class_skills = (skill_alchemy, skill_appraise, skill_concentration, skill_alchemy, skill_decipher_script, skill_gather_information, skill_handle_animal, skill_heal, skill_knowledge_all, skill_perform, skill_profession, skill_spellcraft, skill_use_magic_device)
def IsEnabled():
return 0
def GetHitDieType():
return 4
def GetSkillPtsPerLevel():
return 4
def GetBabProgression():
return base_attack_bonus_non_martial
def IsFortSaveFavored():
return 0
def IsRefSaveFavored():
return 0
def IsWillSaveFavored():
return 1
def GetSpellListType():
return spell_list_type_any
def IsClassSkill(skillEnum):
return char_class_utils.IsClassSkill(class_skills, skillEnum)
def IsClassFeat( | featEnum):
return char_class_utils.IsClassFeat(class_feats, featEnum)
def GetClassFeats():
return class_feats
def I | sAlignmentCompatible( alignment):
return 1
def LoremasterFeatPrereq(obj):
numFeats = 0
loremasterFeats = (feat_empower_spell, feat_enlarge_spell, feat_extend_spell, feat_heighten_spell, feat_maximize_spell, feat_silent_spell, feat_quicken_spell , feat_still_spell, feat_widen_spell, feat_persistent_spell, feat_scribe_scroll, feat_brew_potion, feat_craft_magic_arms_and_armor, feat_craft_rod, feat_craft_staff, feat_craft_wand, feat_craft_wondrous_item)
for p in loremasterFeats:
if obj.has_feat(p):
numFeats = numMmFeats + 1
if (numFeats >= 3):
return 1
return 0
def ObjMeetsPrereqs( obj ):
return 0 # WIP
if (not LoremasterFeatPrereq(obj)):
return 0
if (obj.stat_level_get(stat_level) < 7): # in lieu of Knowledge ranks
return 0
# todo check seven divination spells... bah..
return 1 |
#
# getlangnames.py
#
# Copyright (C) 2007 Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, o | r
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/> | .
#
import sys
sys.path.append("..")
import localeinfo
import gettext
localeInfo = localeinfo.get("en_US.UTF-8")
names = {}
for k in localeInfo.keys():
found = False
for l in localeinfo.expandLangs(k):
try:
f = open("../po/%s.gmo" %(l,))
except (OSError, IOError):
continue
cat = gettext.GNUTranslations(f)
cat.set_output_charset("utf-8")
names[localeInfo[k][0]] = cat.lgettext(localeInfo[k][0])
found = True
break
if not found:
names[localeInfo[k][0]] = localeInfo[k][0]
nameList = names.keys()
nameList.sort()
for k in nameList:
print("%s\t%s" % (k, names[k]))
|
class ReversiLogic(object):
def __init__(self):
self.turn = 'b'
self.board = Board()
# Check if the current self.turn can play. If not, change to the other
# player. If he can't play either, return None (game over).
def get_whose_turn(self):
if self.can_play():
return self.turn
else:
self.turn = switch(self.turn)
if self.can_play():
return self.turn
return None
# Iterate through every square on the board until a legal move is
# found. If none is found, the current self.turn can't play.
def can_play(self):
for row in range(8):
for col in range(8):
if self.legal_attacks(row, col):
return True
return False
# Get a list of all of the attack lines created by playing this square.
# If it's empty, the move is invalid. Otherwise, change the square and
# flip the pieces on the attack lines. Return changed squares as a list of
# Square objects to be graphically changed.
def make_move(self, row, col):
directions = self.legal_attacks(row, col)
if not directions: return False
self.board.squares[get_key(row, col)] = self.turn
self.turn = switch(self.turn)
return self.board.flip(row, col, directions)
# If there's already a piece on this square, it can't be a legal move.
# Otherwise, return a (possibly empty) list of all of the attack lines.
def legal_attacks(self, row, col):
if self.board.squares[get_key(row, col)]: return False
return self.board.attacks(row, col, self.turn)
# Return the number of the given player's pieces on the board.
def count_pieces(self, color):
pieces = 0
for row in range(8):
for col in range(8):
if self.board.squares[get_key(row, col)] == color:
pieces += 1
return pieces
# Stores a dictionary of each square's color, initialized to None.
# Each key is a concatenated string in row+col format. See get_key().
class Board(object):
def __init__(self):
self.squares = {}
for row in range(8):
for col in range(8):
key = get_key(row, col)
self.squares[key] = None
# Create the starting pieces.
self.squares['33'] = 'w'
self.squares['34'] = 'b'
self.squares['43'] = 'b'
self.squares['44'] = 'w'
steps = [-1, 0, 1]
steps = [(r_step, c_step) for r_step in steps for c_step in steps]
steps.remove((0, 0))
# Steps is a list of tuples, representing all possible directions from
# a given square. Tuple is in (row_step, col_step) format.
self.steps = steps
def attacks(self, row, col, color):
attack_lines = []
opponent = switch(color)
# Check in every adjacent square for the opponent's color.
for direction in self.steps:
row_step = direction[0]
col_step = direction[1]
# Use a try statement because some adjacent squares will be
# off the board, resulting in a key error.
try:
key = get_key(row + row_step, col + col_step)
# If adjacent square contains the opponent, continue in that
# direction to determine if it meets up with a player's piece.
if self.squares[key] == opponent:
row_index = row
col_index = col
while 1:
row_index += row_step
col_index += col_step
key = get_key(row_index, col_index)
# Check to see if there's a piece on this square.
if self.squares[key]:
# Now check if the piece is one of the players.
if self.squares[key] != opponent:
# We have found an attack line.
attack_lines.append(direction)
# Break from this direction to try others.
break
# Fou | nd an empty square. Move on to the next direction
else: break
# If we check a square not on the board, just move on to the next.
except KeyError: continu | e
return attack_lines
def flip(self, row, col, directions):
# target is the color we'll be changing to.
target = self.squares[get_key(row, col)]
# squares is the list of squares that need to be graphically updated.
squares = []
# Each direction is an attack line.
for direction in directions:
row_index = row
col_index = col
# Continue flipping pieces in this direction until target is found
while 1:
row_index += direction[0]
col_index += direction[1]
key = get_key(row_index, col_index)
if self.squares[key] == target: break
# Flip piece.
self.squares[key] = target
# Add this square to list that must be graphically updated.
squares.append(Square(row_index, col_index))
# The played square must be graphically updated too.
squares.append(Square(row, col))
return squares
# Simple data storage object to return to the main function.
# Each square returned must be updated.
class Square(object):
def __init__(self, row, col):
self.row = str(row)
self.col = str(col)
# UTILITY FUNCTIONS
def get_key(row, col):
return str(row) + str(col)
def switch(color):
if color == 'b': return 'w'
elif color == 'w': return 'b'
|
tionary'):
create_filter_rules_list(['a'], self.resource_model)
def test_filter_rule_missing_entry_fails(self):
with self.assertRaisesRegex(BadFilterRule, 'missing'):
create_filter_rules_list([{'key': 'key1'}], self.resource_model)
def test_filter_rule_key_not_text_type_fails(self):
with self.assertRaisesRegex(BadFilterRule, 'must be a string'):
err_filter_rule = {'key': 1, 'values': ['b'],
'operator': LabelsOperator.ANY_OF,
'type': 'label'}
create_filter_rules_list([err_filter_rule], self.resource_model)
def test_filter_rule_value_not_list_fails(self):
with self.assertRaisesRegex(BadFilterRule, 'must be a list'):
err_filter_rule = {'key': 'a', 'values': 'b',
'operator': LabelsOperator.ANY_OF,
'type': 'label'}
create_filter_rules_list([err_filter_rule], self.resource_model)
def test_parse_filter_rules_fails(self):
err_filter_rules_params = [
(('a', ['b'], 'bad_operator', 'label'),
'operator for filtering by labels must be one of'),
(('a', ['b'], LabelsOperator.IS_NULL, 'label'),
'list must be empty if the operator'),
(('a', ['b'], LabelsOperator.IS_NOT_NULL, 'label'),
'list must be empty if the operator'),
(('a', [], LabelsOperator.ANY_OF, 'label'),
'list must include at least one item if the operator'),
(('blueprint_id', ['b'], 'bad_operator', 'attribute'),
'The operator for filtering by attributes must be'),
(('bad_attribute', ['dep1'], LabelsOperator.ANY_OF, 'attribute'),
'Allowed attributes to filter deployments|blueprints by are'),
(('a', ['b'], LabelsOperator.ANY_OF, 'bad_type'),
'Filter rule type must be one of'),
(('bad_attribute', ['dep1'], LabelsOperator.ANY_OF, 'bad_type'),
'Filter rule type must be one of')
]
for params, err_msg in err_filter_rules_params:
with self.assertRaisesRegex(BadFilterRule, err_msg):
create_filter_rules_list([FilterRule(*params)],
self.resource_model)
def test_key_and_value_validation_fails(self):
err_filter_rules_params = [
(('a b', ['b'], LabelsOperator.ANY_OF, 'label'), 'The key'),
(('a', ['b', '"'], LabelsOperator.ANY_OF, 'label'), 'The value'),
(('a', ['b', '\t'], LabelsOperator.ANY_OF, 'label'), 'The value'),
(('a', ['b', '\n'], LabelsOperator.ANY_OF, 'label'), 'The value')
]
for params, err_msg in err_filter_rules_params:
with self.assertRaisesRegex(BadFilterRule, err_msg):
create_filter_rules_list([FilterRule(*params)],
self.resource_model)
class BlueprintsFiltersFunctionalityCase(FiltersFunctionalityBaseCase):
__test__ = True
def setUp(self):
super().setUp(models.Blueprint)
def test_filters_applied(self):
bp_1 = self.put_blueprint_with_labels(self.LABELS, blueprint_id='bp_1')
bp_2 = self.put_blueprint_with_labels(self.LABELS_2,
blueprint_id='bp_2')
bp_3 = self.put_blueprint_with_labels(self.LABELS_3,
blueprint_id='bp_3')
bp_4 = self.put_blueprint(blueprint_id='bp_4')
self._test_labels_filters_applied(bp_1['id'], bp_2['id'], bp_3['id'],
bp_4['id'])
def test_filter_by_state_uploaded(self):
bp_1 = self.put_blueprint(blueprint_id='bp_1')
bp_2 = self.put_blueprint(blueprint_id='bp_2')
bp_3 = self.put_blueprint(blueprint_id='bp_3')
self.assert_filters_applied(
[('state', ['uploaded'], AttrsOperator.ANY_OF,
'attribute'),
('state', ['invalid', 'failed', ], AttrsOperator.NOT_ANY_OF,
'attribute')],
{bp_1.id, bp_2.id, bp_3.id},
models.Blueprint
)
def test_filter_by_state_invalid(self):
bp = self.put_blueprint(blueprint_id='invalid_blueprint')
self.client.blueprints.update(bp.id, {'state': 'invalid'})
self.assert_filters_applied(
[('state', ['invalid'], AttrsOperator.ANY_OF, 'attribute'),
('state', ['uploaded'], AttrsOperator.NOT_ANY_OF, 'attribute'),
('state', ['invalid'], AttrsOperator.CONTAINS, 'attribute'),
('state', ['uploaded'], AttrsOperator.NOT_CONTAINS, 'attribute')],
{bp.id},
models.Blueprint
)
class DeploymentFiltersFunctionalityCase(FiltersFunctionalityBaseCase):
__test__ = True
def setUp(self):
super().setUp(models.Deployment)
def test_filters_applied(self):
self.client.sites.create('site_1')
self.client.sites.create('other_site')
dep1 = self.put_deployment_with_labels(self.LABELS,
resource_id='res_1',
site_name='site_1')
dep2 = self.put_deployment_wi | th_labels(self.LABELS_2,
resource_id='res_2',
site_name='other_site')
dep3 = self.put_deployment_with_labels(self.LABELS_3,
resource_id='res_3',
site_name='other_site')
_, _, _, dep4 = self.put_deployment(deployment_id='res_4',
| blueprint_id='res_4')
self._test_labels_filters_applied(dep1.id, dep2.id, dep3.id, dep4.id)
self.assert_filters_applied(
[('a', ['b'], LabelsOperator.ANY_OF, 'label'),
('c', ['y', 'z'], LabelsOperator.NOT_ANY_OF, 'label')], {dep1.id})
self.assert_filters_applied(
[('blueprint_id', ['res_1', 'res_2'], AttrsOperator.ANY_OF,
'attribute'),
('created_by', ['not_user'], AttrsOperator.NOT_ANY_OF,
'attribute'),
('site_name', ['site'], AttrsOperator.CONTAINS, 'attribute'),
('a', ['b'], LabelsOperator.ANY_OF, 'label')],
{dep1.id, dep2.id},
)
self.assert_filters_applied(
[('a', ['b'], LabelsOperator.ANY_OF, 'label'),
('blueprint_id', ['res_1', 'res_2'], AttrsOperator.NOT_ANY_OF,
'attribute')],
set()
)
self.assert_filters_applied(
[('a', ['b'], LabelsOperator.ANY_OF, 'label'),
('blueprint_id', ['res'], AttrsOperator.CONTAINS, 'attribute')],
{dep1.id, dep2.id}
)
self.assert_filters_applied(
[('a', ['b'], LabelsOperator.ANY_OF, 'label'),
('blueprint_id', ['res_1'], AttrsOperator.CONTAINS, 'attribute')],
{dep1.id}
)
self.assert_filters_applied(
[('site_name', ['site_1'], AttrsOperator.NOT_CONTAINS,
'attribute')], {dep2.id, dep3.id}
)
self.assert_filters_applied(
[('site_name', ['site_1', 'site_3'], AttrsOperator.NOT_CONTAINS,
'attribute')], {dep2.id, dep3.id}
)
self.assert_filters_applied(
[('site_name', ['site'], AttrsOperator.STARTS_WITH, 'attribute')],
{dep1.id}
)
self.assert_filters_applied(
[('site_name', ['other', 'blah'], AttrsOperator.STARTS_WITH,
'attribute')], {dep2.id, dep3.id}
)
self.assert_filters_applied(
[('site_name', ['site'], AttrsOperator.ENDS_WITH, 'attribute')],
{dep2.id, dep3.id}
)
self.assert_filters_applied(
[('site_name', ['1', 'blah'], AttrsOperator.ENDS_WITH,
'attribute')], {dep1.id}
)
class FiltersBaseCase(base_test.BaseServerTestCase):
__te |
from . import views
from django.urls import path
urlpatterns = [
path("", views.PublicWidgets.as_view(), na | me="home"),
path("create/widget", views.WidgetCreate.as_view(), name="widget-create"),
path("subscription/<pk>/delete", views.SubscriptionDelete.as_view(), name="subscription-delete"),
path("user/<username>/subscriptions", views.SubscriptionListView.as_view(), name="subscriptions"),
path("user/<username>/widgets", view | s.UserWidgets.as_view(), name="widget-user"),
# Widget Views
path("widget", views.WidgetListView.as_view(), name="widget-list"),
path("widget/<pk>", views.WidgetDetailView.as_view(), name="widget-detail"),
path("widget/<pk>/delete", views.WidgetDelete.as_view(), name="widget-delete"),
path("widget/<pk>/subscribe", views.WidgetSubscription.as_view(), name="widget-subscribe"),
path("widget/<pk>/unsubscribe", views.WidgetUnsubscribe.as_view(), name="widget-unsubscribe"),
path("widget/<pk>/update", views.WidgetUpdate.as_view(), name="widget-update"),
path("widget/<pk>/increment", views.StreakIncrement.as_view(), name="streak-increment"),
# Filtered List Views
path("filter/<type>", views.FilterList.as_view(), name="widget-type"),
# Misc Views
path("recent/waypoints", views.WaypointList.as_view(), name="waypoint-list"),
path("recent/samples", views.SampleList.as_view(), name="sample-list"),
path("recent/scrapes", views.ScrapeList.as_view(), name="scrape-list"),
path("recent/shares", views.ShareList.as_view(), name="share-list"),
]
|
# Twisted, the Framework of Your Internet
# Copyright (C) 2001 Matthew W. Lefkowitz
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Standard input/out/err support.
API Stability: semi-stable
Future Plans: suppo | rt for stderr, perhaps
Maintainer: U{Itamar Shtull-Trauring<mailto:twisted@itamarst.org>}
"""
# system imports
import sys, | os, select, errno
# Sibling Imports
import abstract, fdesc, protocol
from main import CONNECTION_LOST
_stdio_in_use = 0
class StandardIOWriter(abstract.FileDescriptor):
connected = 1
ic = 0
def __init__(self):
abstract.FileDescriptor.__init__(self)
self.fileno = sys.__stdout__.fileno
fdesc.setNonBlocking(self.fileno())
def writeSomeData(self, data):
try:
return os.write(self.fileno(), data)
return rv
except IOError, io:
if io.args[0] == errno.EAGAIN:
return 0
elif io.args[0] == errno.EPERM:
return 0
return CONNECTION_LOST
except OSError, ose:
if ose.errno == errno.EPIPE:
return CONNECTION_LOST
if ose.errno == errno.EAGAIN:
return 0
raise
def connectionLost(self, reason):
abstract.FileDescriptor.connectionLost(self, reason)
os.close(self.fileno())
class StandardIO(abstract.FileDescriptor):
"""I can connect Standard IO to a twisted.protocol
I act as a selectable for sys.stdin, and provide a write method that writes
to stdout.
"""
def __init__(self, protocol):
"""Create me with a protocol.
This will fail if a StandardIO has already been instantiated.
"""
abstract.FileDescriptor.__init__(self)
global _stdio_in_use
if _stdio_in_use:
raise RuntimeError, "Standard IO already in use."
_stdio_in_use = 1
self.fileno = sys.__stdin__.fileno
fdesc.setNonBlocking(self.fileno())
self.protocol = protocol
self.startReading()
self.writer = StandardIOWriter()
self.protocol.makeConnection(self)
def write(self, data):
"""Write some data to standard output.
"""
self.writer.write(data)
def doRead(self):
"""Some data's readable from standard input.
"""
return fdesc.readFromFD(self.fileno(), self.protocol.dataReceived)
def closeStdin(self):
"""Close standard input.
"""
self.writer.loseConnection()
def connectionLost(self, reason):
"""The connection was lost.
"""
self.protocol.connectionLost()
|
#!/bin/env python
# -*- coding: utf-8 -*-
"""
Description:
Compute Phase Tensors from ModEM Dat File and output to CSV files
Usage Examples:
python scripts/modem_data_to_phase_tensor.py examples/data/ModEM_files/Modular_MPI_NLCG_028.dat [OutDir]
python scripts/modem_data_to_phase_tensor.py /e/MTPY2_Outputs/GA_UA_edited_10s-10000s_modem_inputs/ModEM_Data.dat [OutDir]
Developer: fei.zhang@ga.gov.au
LastUpdate: 08/09/2017
LastUpdate: 05/12/2017 FZ moved the function into the module mtpy.modeling.modem.Data
LastUpdate: 21/02/2018 Added comma | nd for running the script
"""
import sys, os
from mtpy.modeling.modem import Data
from mtpy.mtpy_globals import NEW_TEMP_DIR
if __name__ == "__main__":
file_dat = sys.argv[1]
if len(sys.argv)>2:
| outdir = sys.argv[2]
else:
outdir=NEW_TEMP_DIR
obj = Data()
obj.compute_phase_tensor(file_dat, outdir)
|
.env.user.company_id.currency_id
currency_rate_id = self.env['res.currency.rate'].search([
('rate', '=', 1),
'|', ('company_id', '=', self.env.user.company_id.id), ('company_id', '=', False)], limit=1)
base_currency_id = currency_rate_id.currency_id
ctx = context.copy()
for record in self:
ctx['date'] = record.date
record.user_currency_price_total = base_currency_id.with_context(ctx).compute(record.price_total, user_currency_id)
record.user_currency_price_average = base_currency_id.with_context(ctx).compute(record.price_average, user_currency_id)
record.user_currency_residual = base_currency_id.with_context(ctx).compute(record.residual, user_currency_id)
date = fields.Date(readonly=True)
product_id = fields.Many2one('product.product', string='Product', readonly=True)
product_qty = fields.Float(string='Product Quantity', readonly=True)
uom_name = fields.Char(string='Reference Unit of Measure', readonly=True)
payment_term_id = fields.Many2one('account.payment.term', string='Payment Term', oldname='payment_term', readonly=True)
fiscal_position_id = fields.Many2one('account.fiscal.position', oldname='fiscal_position', string='Fiscal Position', readonly=True)
currency_id = fields.Many2one('res.currency', string='Currency', readonly=True)
categ_id = fields.Many2one('product.category', string='Product Category', readonly=True)
journal_id = fields.Many2one('account.journal', string='Journal', readonly=True)
partner_id = fields.Many2one('res.partner', string='Partner', readonly=True)
commercial_partner_id = fields.Many2one('res.partner', string='Partner Company', help="Commercial Entity")
company_id = fields.Many2one('res.company', string='Company', readonly=True)
user_id = fields.Many2one('res.users', string='Salesperson', readonly=True)
price_total = fields.Float(string='Total Without Tax', readonly=True)
user_currency_price_total = fields.Float(string="Total Without Tax", compute='_compute_amounts_in_user_currency', digits=0)
price_average = fields.Float(string='Average Price', readonly=True, group_operator="avg")
user_currency_price_average = fields.Float(string="Average Price", compute='_compute_amounts_in_user_currency', digits=0)
currency_rate = fields.Float(string='Currency Rate', readonly=True)
nbr = fields.Integer(string='# of Invoices', readonly=True) # TDE FIXME master: rename into nbr_lines
type = fields.Selection([
('out_invoice', 'Customer Invoice'),
('in_invoice', 'Vendor Bill'),
('out_refund', 'Customer Refund'),
('in_refund', 'Vendor Refund'),
], readonly=True)
state = fields.Selection([
('draft', 'Draft'),
('proforma', 'Pro-forma'),
('proforma2', 'Pro-forma'),
('open', 'Open'),
('paid', 'Done'),
('cancel', 'Cancelled')
], string='Invoice Status', readonly=True)
date_due = fields.Date(string='Due Date', readonly=True)
account_id = fields.Many2one('account.account', string='Account', readonly=True, domain=[('deprecated', '=', False)])
account_line_id = fields.Many2one('account.account', string='Account Line', readonly=True, domain=[('deprecated', '=', False)])
partner_bank_id = fields.Many2one('res.partner.bank', string='Bank Account', readonly=True)
residual = fields.Float(string='Total Residual', readonly=True)
user_currency_residual = fields.Float(string="Total Residual", compute='_compute_amounts_in_user_currency', digits=0)
country_id = fields.Many2one('res.country', string='Country of the Partner Company')
_order = 'date desc'
_depends = {
'account.invoice': [
'account_id', 'amount_total_company_signed', 'commercial_partner_id', 'company_id',
'currency_id', 'date_due', 'date_invoice', 'fiscal_position_id',
'journal_id', 'partner_bank_id', 'partner_id', 'payment_term_id',
'residual', 'state', 'type', 'user_id',
],
'account.invoice.line': [
'account_id | ', 'invoice_id', 'price_subtotal', 'product_id',
'quantity', 'uom_id', 'account_analytic_id',
],
| 'product.product': ['product_tmpl_id'],
'product.template': ['categ_id'],
'product.uom': ['category_id', 'factor', 'name', 'uom_type'],
'res.currency.rate': ['currency_id', 'name'],
'res.partner': ['country_id'],
}
def _select(self):
select_str = """
SELECT sub.id, sub.date, sub.product_id, sub.partner_id, sub.country_id, sub.account_analytic_id,
sub.payment_term_id, sub.uom_name, sub.currency_id, sub.journal_id,
sub.fiscal_position_id, sub.user_id, sub.company_id, sub.nbr, sub.type, sub.state,
sub.categ_id, sub.date_due, sub.account_id, sub.account_line_id, sub.partner_bank_id,
sub.product_qty, sub.price_total as price_total, sub.price_average as price_average,
COALESCE(cr.rate, 1) as currency_rate, sub.residual as residual, sub.commercial_partner_id as commercial_partner_id
"""
return select_str
def _sub_select(self):
select_str = """
SELECT min(ail.id) AS id,
ai.date_invoice AS date,
ail.product_id, ai.partner_id, ai.payment_term_id, ail.account_analytic_id,
u2.name AS uom_name,
ai.currency_id, ai.journal_id, ai.fiscal_position_id, ai.user_id, ai.company_id,
count(ail.*) AS nbr,
ai.type, ai.state, pt.categ_id, ai.date_due, ai.account_id, ail.account_id AS account_line_id,
ai.partner_bank_id,
SUM(CASE
WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text])
THEN (- ail.quantity) / u.factor * u2.factor
ELSE ail.quantity / u.factor * u2.factor
END) AS product_qty,
SUM(ail.price_subtotal_signed) AS price_total,
SUM(ail.price_subtotal_signed) / CASE
WHEN SUM(ail.quantity / u.factor * u2.factor) <> 0::numeric
THEN CASE
WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text])
THEN SUM((- ail.quantity) / u.factor * u2.factor)
ELSE SUM(ail.quantity / u.factor * u2.factor)
END
ELSE 1::numeric
END AS price_average,
ai.residual_company_signed / (SELECT count(*) FROM account_invoice_line l where invoice_id = ai.id) *
count(*) AS residual,
ai.commercial_partner_id as commercial_partner_id,
partner.country_id
"""
return select_str
def _from(self):
from_str = """
FROM account_invoice_line ail
JOIN account_invoice ai ON ai.id = ail.invoice_id
JOIN res_partner partner ON ai.commercial_partner_id = partner.id
LEFT JOIN product_product pr ON pr.id = ail.product_id
left JOIN product_template pt ON pt.id = pr.product_tmpl_id
LEFT JOIN product_uom u ON u.id = ail.uom_id
LEFT JOIN product_uom u2 ON u2.id = pt.uom_id
"""
return from_str
def _group_by(self):
group_by_str = """
GROUP BY ail.product_id, ail.account_analytic_id, ai.date_invoice, ai.id,
ai.partner_id, ai.payment_term_id, u2.name, u2.id, ai.currency_id, ai.journal_id,
ai.fiscal_position_id, ai.user_id, ai.company_id, ai.type, ai.state, pt.categ_id,
ai.date_due, ai.account_id, ail.account_id, ai.partner_bank_id, ai.residual_company_sig |
mary_key=True)),
('name', models.CharField(help_text=b'This field is the actual domain name. This is the field that powerDNS matches to when it gets a request. The domain name should be in the format of: domainname.TLD.', unique=True, max_length=255, verbose_name='Name')),
('master', models.CharField(default=None, max_length=128, null=True, verbose_name='Master', help_text=b'This describes the master nameserver from which this domain should be slaved.')),
('last_check', models.IntegerField(default=None, help_text=b'Last time this domain was checked for freshness.', null=True, verbose_name='Last check')),
('type', models.CharField(help_text=b'Type of the domain.', max_length=6, verbose_name='Type', db_index=True, choices=[(b'MASTER', b'MASTER'), (b'SLAVE', b'SLAVE'), (b'NATIVE', b'NATIVE'), (b'SUPERSLAVE', b'SUPERSLAVE')])),
('notified_serial', models.IntegerField(default=None, help_text=b'The last notified serial of a master domain. This is updated from the SOA record of the domain.', null=True, verbose_name='Notified serial')),
('account', models.CharField(default=None, max_length=40, null=True, verbose_name='Account', help_text=b'Determine if a certain host is a supermaster for a certain domain name.')),
('user', models.IntegerField(default=None, help_text=b'Field representing the user ID responsible for the domain. Added by Erigones.', null=True, verbose_name='User', db_index=True)),
('desc', models.CharField(help_text=b'Added by Erigones.', max_length=128, verbose_name='Description', blank=True)),
('access', models.SmallIntegerField(default=3, help_text=b'Added by Erigones.', verbose_name='Access' | , choices=[(1, 'Public'), (3, 'Private')])),
('created', models.DateTimeField(auto_now_add=True, help_text=b'Added by Erigones.', null=True, verbose_name='Created')),
('changed', models.DateTimeField(auto_now=True, help_text=b'Added by Erigones.', null=True, verbose_name='Last changed')),
('dc_bound', | models.IntegerField(default=None, help_text=b'Datacenter ID used for DC-bound DNS records. Added by Erigones.', null=True, verbose_name='Datacenter')),
],
options={
'db_table': 'domains',
'verbose_name': 'Domain',
'verbose_name_plural': 'Domains',
},
),
migrations.CreateModel(
name='Record',
fields=[
('id', models.AutoField(help_text=b'This field is used to easily manage the records with this number as an unique handle.', serialize=False, primary_key=True)),
('name', models.CharField(default=None, max_length=255, help_text=b'What URI the dns-server should pick up on. For example www.test.com.', null=True, verbose_name='Name', db_index=True)),
('type', models.CharField(default=None, choices=[(b'SOA', b'SOA'), (b'NS', b'NS'), (b'MX', b'MX'), (b'A', b'A'), (b'AAAA', b'AAAA'), (b'CNAME', b'CNAME'), (b'TXT', b'TXT'), (b'PTR', b'PTR'), (b'SRV', b'SRV'), (b'SPF', b'SPF'), (b'HINFO', b'HINFO'), (b'NAPTR', b'NAPTR'), (b'SSHFP', b'SSHFP'), (b'RP', b'RP'), (b'LOC', b'LOC'), (b'KEY', b'KEY'), (b'CERT', b'CERT'), (b'TLSA', b'TLSA')], max_length=6, help_text=b'The ASCII representation of the qtype of this record.', null=True, verbose_name='Type')),
('content', models.CharField(default=None, max_length=65535, null=True, verbose_name='Content', help_text=b'Is the answer of the DNS-query and the content depend on the type field.')),
('ttl', models.IntegerField(default=None, help_text=b'How long the DNS-client are allowed to remember this record. Also known as Time To Live (TTL) This value is in seconds.', null=True, verbose_name='TTL')),
('prio', models.IntegerField(default=None, help_text=b'This field sets the priority of an MX-field.', null=True, verbose_name='Priority')),
('change_date', models.IntegerField(default=None, help_text=b'Timestamp for the last update.', null=True, verbose_name='Changed')),
('disabled', models.BooleanField(default=False, help_text=b'If set to true, this record is hidden from DNS clients, but can still be modified from the REST API.', verbose_name='Disabled?')),
('ordername', models.CharField(default=None, max_length=255, null=True, verbose_name='Ordername')),
('auth', models.BooleanField(default=True, verbose_name='Auth')),
('domain', models.ForeignKey(db_constraint=False, db_column=b'domain_id', default=None, to='pdns.Domain', help_text=b'This field binds the current record to the unique handle(the id-field) in the domains-table.', null=True)),
],
options={
'db_table': 'records',
'verbose_name': 'Record',
'verbose_name_plural': 'Records',
},
),
migrations.AlterIndexTogether(
name='record',
index_together=set([('name', 'type')]),
),
# Update domains table
migrations.RunSQL("""
ALTER TABLE domains ADD CONSTRAINT c_lowercase_name CHECK (((name)::TEXT = lower((name)::TEXT)));
ALTER TABLE domains ALTER COLUMN "access" SET DEFAULT 3;
ALTER TABLE domains ALTER COLUMN "desc" SET DEFAULT '';
ALTER TABLE domains ALTER COLUMN "user" SET DEFAULT 1;
GRANT ALL ON domains TO pdns;
GRANT ALL ON domains_id_seq TO pdns;
"""),
# Update records table
migrations.RunSQL("""
ALTER TABLE records ADD CONSTRAINT c_lowercase_name CHECK (((name)::TEXT = lower((name)::TEXT)));
ALTER TABLE records ADD CONSTRAINT domain_exists FOREIGN KEY(domain_id) REFERENCES domains(id) ON DELETE CASCADE;
ALTER TABLE records ALTER COLUMN "disabled" SET DEFAULT false;
ALTER TABLE records ALTER COLUMN "auth" SET DEFAULT false;
CREATE INDEX recordorder ON records (domain_id, ordername text_pattern_ops);
GRANT ALL ON records TO pdns;
GRANT ALL ON records_id_seq TO pdns;
"""),
# Create other PowerDNS tables
migrations.RunSQL("""
CREATE TABLE supermasters (
ip INET NOT NULL,
nameserver VARCHAR(255) NOT NULL,
account VARCHAR(40) NOT NULL,
PRIMARY KEY(ip, nameserver)
);
GRANT ALL ON supermasters TO pdns;
CREATE TABLE comments (
id SERIAL PRIMARY KEY,
domain_id INT NOT NULL,
name VARCHAR(255) NOT NULL,
type VARCHAR(10) NOT NULL,
modified_at INT NOT NULL,
account VARCHAR(40) DEFAULT NULL,
comment VARCHAR(65535) NOT NULL,
CONSTRAINT domain_exists
FOREIGN KEY(domain_id) REFERENCES domains(id)
ON DELETE CASCADE,
CONSTRAINT c_lowercase_name CHECK (((name)::TEXT = LOWER((name)::TEXT)))
);
CREATE INDEX comments_domain_id_idx ON comments (domain_id);
CREATE INDEX comments_name_type_idx ON comments (name, type);
CREATE INDEX comments_order_idx ON comments (domain_id, modified_at);
GRANT ALL ON comments TO pdns;
GRANT ALL ON comments_id_seq TO pdns;
CREATE TABLE domainmetadata (
id SERIAL PRIMARY KEY,
domain_id INT REFERENCES domains(id) ON DELETE CASCADE,
kind VARCHAR(32),
content TEXT
);
CREATE INDEX domainidmetaindex ON domainmetadata(domain_id);
GRANT ALL ON domainmetadata TO pdns;
GRANT ALL ON domainmetadata_id_seq TO pdns;
CREATE TABLE cryptokeys (
id SERIAL PRIMARY KEY,
domain_id INT REFERENCES domains(id) ON DELETE CASCADE,
flags INT NOT NULL,
active BOOL,
content TEXT
);
CREATE INDEX domainidindex ON cryptokeys(domain_id);
GRANT ALL ON cryptokeys TO pdns;
GRANT ALL ON cryptokeys_id_seq TO pdns;
CREATE TABLE tsigkeys (
|
from d | jango import forms
__all__ = ('RatingField',)
class RatingField(forms.ChoiceField): |
pass |
# License: MIT License https://github.com/passalis/sef/blob/master/LICENSE.txt
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import sklearn
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sef_dr.classification import evaluate_svm
from sef_dr.datasets import load_mnist
from sef_dr.linear | import LinearSEF
def supervised_reduction(method=None):
# Load data and init seeds
train_data, train_labels, test_data, test_labels = load_mnist(dataset_path='data')
np.random.seed(1)
sklearn.utils.check_random_state(1)
n_train = 5000
n_classes = len(np.unique(train_labels))
if method == 'lda':
proj = LinearDiscriminantAnalysis(n_components=n_classes - 1)
proj.fit(train_data[:n_train, :], | train_labels[:n_train])
elif method == 's-lda':
proj = LinearSEF(train_data.shape[1], output_dimensionality=(n_classes - 1))
proj.cuda()
loss = proj.fit(data=train_data[:n_train, :], target_labels=train_labels[:n_train], epochs=50,
target='supervised', batch_size=128, regularizer_weight=1, learning_rate=0.001, verbose=True)
elif method == 's-lda-2x':
# SEF output dimensions are not limited
proj = LinearSEF(train_data.shape[1], output_dimensionality=2 * (n_classes - 1))
proj.cuda()
loss = proj.fit(data=train_data[:n_train, :], target_labels=train_labels[:n_train], epochs=50,
target='supervised', batch_size=128, regularizer_weight=1, learning_rate=0.001, verbose=True)
acc = evaluate_svm(proj.transform(train_data[:n_train, :]), train_labels[:n_train],
proj.transform(test_data), test_labels)
print("Method: ", method, " Test accuracy: ", 100 * acc, " %")
if __name__ == '__main__':
print("LDA: ")
supervised_reduction('lda')
print("S-LDA: ")
supervised_reduction('s-lda')
print("S-LDA (2x): ")
supervised_reduction('s-lda-2x')
|
__(self, main_window=None):
self.main_window = main_window
def get_current_config(self):
current_config_dict = {}
current_config_dict['h1'] = self.__get_current_table_config(table='h1')
current_config_dict['h2'] = self.__get_current_table_config(table='h2')
current_config_dict['h3'] = self.__get_current_table_config(table='h3')
return current_config_dict
def __get_current_table_config(self, table='h1'):
if table == 'h1':
table_ui = self.main_window.processing_ui.h1_table
elif table == 'h2':
table_ui = self.main_window.processing_ui.h2_table
else:
table_ui = self.main_window.processing_ui.h3_table
nbr_column = table_ui.columnCount()
_dict = {}
for _col in np.arange(nbr_column):
_width = table_ui.columnWidth(_col)
_visible = not table_ui.isColumnHidden(_col)
_dict[_col] = {'width': _width,
'visible': _visible}
return _dict
def block_table_header_ui(self, block_all=True,
unblock_all=False,
block_h1=False,
block_h2=False,
block_h3=False):
if block_all:
block_h1 = True
block_h2 = True
block_h3 = True
if unblock_all:
block_h1 = False
block_h2 = False
block_h3 = False
self.main_window.h1_header_table.blockSignals(block_h1)
self.main_window.h2_header_table.blockSignals(block_h2)
self.main_window.h3_header_table.blockSignals(block_h3)
def disconnect_table_ui(self, block_all=True,
unblock_all=False,
block_h1=False,
block_h2=False,
block_h3=False):
if block_all:
block_h1 = True
block_h2 = True
block_h3 = True
if unblock_all:
block_h1 = False
block_h2 = False
block_h3 = False
if block_h1:
self.main_window.h1_header_table.sectionResized.disconnect(
self.main_window.resizing_h1)
else:
self.main_window.h1_header_table.sectionResized.connect(
self.main_window.resizing_h1)
if block_h2:
self.main_window.h2_header_table.sectionResized.disconnect(
self.main_window.resizing_h2)
else:
self.main_window.h2_header_table.sectionResized.connect(
self.main_window.resizing_h2)
if block_h3:
self.main_window.h3_header_table.sectionResized.disconnect(
self.main_window.resizing_h3)
else:
self.main_window.h3_header_table.sectionResized.connect(
self.main_window.resizing_h3)
def get_h2_children_from_h1(self, h1=-1):
if h1 == -1:
return None
table_columns_links = self.main_window.table_columns_links
list_h2_values = table_columns_links['h2']
return list_h2_values[h1]
def get_last_h2_visible(self, list_h2=[]):
if list_h2 == []:
return None
for _h2 in list_h2[::-1]:
if self.main_window.processing_ui.h2_table.isColumnHidden(_h2):
continue
else:
return _h2
return None
def get_h3_children_from_h2(self, h2=-1):
if h2 == -1:
return None
table_columns_links = self.main_window.table_columns_links
list_h3_values = table_columns_links['h3']
list_h2_values = table_columns_links['h2']
index_h2 = -1
index_h1 = 0
for h2_values in list_h2_values:
if h2 in h2_values:
index_h2 = h2_values.index(h2)
break
index_h1 += 1
if index_h2 == -1:
return []
return list_h3_values[index_h1][index_h2]
def get_last_h3_visible(self, list_h3=[]):
if list_h3 == []:
return None
for _h3 in list_h3[::-1]:
if self.main_window.processing_ui.h3_table.isColumnHidden(_h3):
continue
else:
return _h3
return None
def get_size_column(self, h1=None, h2=None, h3=None):
table_ui = self.get_table_ui(h1=h1, h2=h2, h3=h3)
h = self.get_master_h(h1=h1, h2=h2, h3=h3)
return table_ui.columnWidth(h)
def get_table_ui(self, h1=None, h2=None, h3=None):
'''h1, h2 or h3 are column indexes'''
if h1 is not None:
table_ui = self.main_window.processing_ui.h1_table
elif h2 is not None:
table_ui = self.main_window.processing_ui.h2_table
elif h3 is not None:
table_ui = self.main_window.processing_ui.h3_table
else:
table_ui = None
return table_ui
def get_master_h(self, h1=None, h2=None, h3=None):
'''return the only defined column index from h1, h2 or h3 table'''
if h1 is not None:
return h1
elif h2 is not None:
return h2
elif h3 is not None:
return h3
else:
return None
def set_size_column(self, h1=None, h2=None, h3=None, width=None):
if width is None:
return
table_ui = self.get_table_ui(h1=h1, h2=h2, h3=h3)
h = self.get_master_h(h1=h1, h2=h2, h3=h3)
table_ui.setColumnWidth(h, width)
def get_h1_parent_from_h2(self, h2=-1):
if h2 == -1:
return None
table_columns_links = self.main_window.table_columns_links
list_h2_values = table_columns_links['h2']
h1_parent_index = 0
for h2_values in list_h2_values:
if h2 in h2_values:
return h1_parent_index
| h1_parent_index += 1
return None
def resizing_h1_using_all_visible_h2(self, h1=None):
'''automatically resize the h1 using all its h2 visible '''
h2_children = self.get_h2_children_from_h1(h1=h1)
list_visible_h2 = self.get_all_h2_visible(lis | t_h2=h2_children)
if list_visible_h2 is None:
return
full_size_h2 = 0
for _h2 in list_visible_h2:
full_size_h2 += self.get_size_column(h2=_h2)
self.main_window.processing_ui.h1_table.setColumnWidth(
h1, full_size_h2)
def get_h_columns_from_item_name(self, item_name=None):
# h_columns_affected = {'h1': [],
# 'h2': [],
# 'h3': [],
# 'list_tree_ui': [],
# 'list_parent_ui': []}
if item_name is None:
return
h1_columns = []
h2_columns = []
h3_columns = []
list_tree_ui = []
list_parent_ui = []
h1_global_counter = 0
h2_global_counter = 0
h3_global_counter = 0
td = self.main_window.tree_dict
for h1_global_counter, _key_h1 in enumerate(td.keys()):
if item_name == _key_h1:
# get all h2 and h3 of this h1
if td[_key_h1]['children']:
for _key_h2 in td[_key_h1]['children']:
if td[_key_h1]['children'][_key_h2]['children']:
list_tree_ui.append(
td[_key_h1]['children'][_key_h2]['ui'])
for _key_h3 in td[_key_h1]['children'][_key_h2]['children'].keys(
):
h3_columns.append(h3_global_counter)
list_tree_ui.append(
td[_key_h1]['children'][_key_h2]['children'][_key_h3]['ui'])
h3_global_counter += 1
else:
h2_columns.append(h2_global_counter)
list_tree_ui.append(
td[_ |
# -*- coding: utf-8 -*-
#
# diffoscope: in-depth comparison of files, archives, and directories
#
# Copyright © 2015 Jérémy Bobbio <lunar@debian.org>
#
# diffoscope is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# diffoscope is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with diffoscope. If not, see <http://www.gnu.org/licenses/>.
import os.path
import shutil
import pytest
from diffoscope.comparators import specialize
from diffoscope.comparators.binary import FilesystemFile, NonExistingFile
from diffoscope.comparators.debian import DotChangesFile, DotDscFile
from diffoscope.config import Config
from diffoscope.presenters.text import output_text
TEST_DOT_CHANGES_FILE1_PATH = os.path.join(os.path.dirname(__file__), '../data/test1.changes')
TEST_DOT_CHANGES_FILE2_PATH = os.path.join(os.path.dirname(__file__), '../data/test2.changes')
TEST_DEB_FILE1_PATH = os.path.join(os.path.dirname(__file__), '../data/test1.deb')
TEST_DEB_FILE2_PATH = os.path.join(os.path.dirname(__file__), '../data/test2.deb')
@pytest.fixture
def dot_changes1(tmpdir):
tmpdir.mkdir('a')
dot_changes_path = str(tmpdir.join('a/test_1.changes'))
shutil.copy(TEST_DOT_CHANGES_FILE1_PATH, dot_changes_path)
shutil.copy(TEST_DEB_FILE1_PATH, str(tmpdir.join('a/test_1_all.deb')))
return specialize(FilesystemFile(dot_changes_path))
@pytest.fixture
def dot_changes2(tmpdir):
| tmpdir.mkdir('b')
dot_changes_path = str(tmpdir.join('b/test_1.changes'))
shutil.copy(TEST_DOT_CHANGES_FILE2_PATH, dot_changes_path)
shutil.copy(TEST_DEB_FILE2_PATH, str(tmpdir.join('b/test_1_all.deb')))
return specialize(FilesystemFile(dot_changes_path))
def test_dot_changes_identification(dot_changes1):
assert is | instance(dot_changes1, DotChangesFile)
def test_dot_changes_invalid(tmpdir):
tmpdir.mkdir('a')
dot_changes_path = str(tmpdir.join('a/test_1.changes'))
shutil.copy(TEST_DOT_CHANGES_FILE1_PATH, dot_changes_path)
# we don't copy the referenced .deb
identified = specialize(FilesystemFile(dot_changes_path))
assert not isinstance(identified, DotChangesFile)
def test_dot_changes_no_differences(dot_changes1):
difference = dot_changes1.compare(dot_changes1)
assert difference is None
@pytest.fixture
def dot_changes_differences(dot_changes1, dot_changes2):
difference = dot_changes1.compare(dot_changes2)
output_text(difference, print_func=print)
return difference.details
def test_dot_changes_description(dot_changes_differences):
assert dot_changes_differences[0]
expected_diff = open(os.path.join(os.path.dirname(__file__), '../data/dot_changes_description_expected_diff')).read()
assert dot_changes_differences[0].unified_diff == expected_diff
def test_dot_changes_internal_diff(dot_changes_differences):
assert dot_changes_differences[2].source1 == 'test_1_all.deb'
def test_dot_changes_compare_non_existing(monkeypatch, dot_changes1):
monkeypatch.setattr(Config.general, 'new_file', True)
difference = dot_changes1.compare(NonExistingFile('/nonexisting', dot_changes1))
output_text(difference, print_func=print)
assert difference.source2 == '/nonexisting'
assert difference.details[-1].source2 == '/dev/null'
TEST_DOT_DSC_FILE1_PATH = os.path.join(os.path.dirname(__file__), '../data/test1.dsc')
TEST_DOT_DSC_FILE2_PATH = os.path.join(os.path.dirname(__file__), '../data/test2.dsc')
TEST_DEB_SRC1_PATH = os.path.join(os.path.dirname(__file__), '../data/test1.debsrc.tar.gz')
TEST_DEB_SRC2_PATH = os.path.join(os.path.dirname(__file__), '../data/test2.debsrc.tar.gz')
@pytest.fixture
def dot_dsc1(tmpdir):
tmpdir.mkdir('a')
dot_dsc_path = str(tmpdir.join('a/test_1.dsc'))
shutil.copy(TEST_DOT_DSC_FILE1_PATH, dot_dsc_path)
shutil.copy(TEST_DEB_SRC1_PATH, str(tmpdir.join('a/test_1.tar.gz')))
return specialize(FilesystemFile(dot_dsc_path))
@pytest.fixture
def dot_dsc2(tmpdir):
tmpdir.mkdir('b')
dot_dsc_path = str(tmpdir.join('b/test_1.dsc'))
shutil.copy(TEST_DOT_DSC_FILE2_PATH, dot_dsc_path)
shutil.copy(TEST_DEB_SRC2_PATH, str(tmpdir.join('b/test_1.tar.gz')))
return specialize(FilesystemFile(dot_dsc_path))
def test_dot_dsc_identification(dot_dsc1):
assert isinstance(dot_dsc1, DotDscFile)
def test_dot_dsc_invalid(tmpdir, dot_dsc2):
tmpdir.mkdir('a')
dot_dsc_path = str(tmpdir.join('a/test_1.dsc'))
shutil.copy(TEST_DOT_CHANGES_FILE1_PATH, dot_dsc_path)
# we don't copy the referenced .tar.gz
identified = specialize(FilesystemFile(dot_dsc_path))
assert not isinstance(identified, DotDscFile)
def test_dot_dsc_no_differences(dot_dsc1):
difference = dot_dsc1.compare(dot_dsc1)
assert difference is None
@pytest.fixture
def dot_dsc_differences(dot_dsc1, dot_dsc2):
difference = dot_dsc1.compare(dot_dsc2)
output_text(difference, print_func=print)
return difference.details
def test_dot_dsc_internal_diff(dot_dsc_differences):
assert dot_dsc_differences[1].source1 == 'test_1.tar.gz'
def test_dot_dsc_compare_non_existing(monkeypatch, dot_dsc1):
monkeypatch.setattr(Config.general, 'new_file', True)
difference = dot_dsc1.compare(NonExistingFile('/nonexisting', dot_dsc1))
output_text(difference, print_func=print)
assert difference.source2 == '/nonexisting'
assert difference.details[-1].source2 == '/dev/null'
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import mode | ls, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations. | CreateModel(
name='Stat',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateTimeField(auto_now_add=True)),
('up', models.BigIntegerField()),
('down', models.BigIntegerField()),
('live_time', models.BigIntegerField()),
],
options={
},
bases=(models.Model,),
),
]
|
tApplicable')
result = os.path.getctime(py_path)
elif time_type == 'Change':
if os.name != 'posix':
return Expression('Missing', 'NotApplicable')
result = os.path.getctime(py_path)
elif time_type == 'Modification':
result = os.path.getmtime(py_path)
else:
evaluation.message('FileDate', 'datetype')
return
# Offset for system epoch
epochtime = Expression('AbsoluteTime', time.strftime(
"%Y-%m-%d %H:%M",
time.gmtime(0))).to_python(n_evaluation=evaluation)
result += epochtime
return Expression('DateList', from_python(result))
def apply_default(self, path, evaluation):
'FileDate[path_]'
return self.apply(path, None, evaluation)
class SetFileDate(Builtin):
"""
<dl>
<dt>'SetFileDate["$file$"]'
<dd>set the file access and modification dates of $file$ to the current date.
<dt>'SetFileDate["$file$", $date$]'
<dd>set the file access and modification dates of $file$ to the specified date list.
<dt>'SetFileDate["$file$", $date$, "$type$"]'
<dd>set the file date of $file$ to the specified date list.
The "$type$" can be one of "$Access$", "$Creation$", "$Modification$", or 'All'.
</dl>
Create a temporary file (for example purposes)
>> tmpfilename = $TemporaryDirectory <> "/tmp0";
>> Close[OpenWrite[tmpfilename]];
>> SetFileDate[t | mpfilename, {2000, 1, 1, 0, 0, 0.}, "Access"];
>> FileDate[tmpfilename, "Access"]
= {2000, 1, 1, 0, 0, 0.}
#> SetFileDate[tmpfilename, {2001, 1, 1, 0, | 0, 0.}];
#> FileDate[tmpfilename, "Access"]
= {2001, 1, 1, 0, 0, 0.}
#> SetFileDate[tmpfilename]
#> FileDate[tmpfilename, "Access"]
= {...}
#> DeleteFile[tmpfilename]
#> SetFileDate["MathicsNonExample"]
: File not found during SetFileDate[MathicsNonExample].
= $Failed
"""
messages = {
'fstr': ('File specification `1` is not a string of one or '
'more characters.'),
'nffil': 'File not found during `1`.',
'fdate': ('Date specification should be either the number of seconds '
'since January 1, 1900 or a {y, m, d, h, m, s} list.'),
'datetype': ('Date type a should be "Access", "Modification", '
'"Creation" (Windows only), or All.'),
'nocreationunix': ('The Creation date of a file cannot be set on '
'Macintosh or Unix.'),
}
attributes = ('Protected')
def apply(self, filename, datelist, attribute, evaluation):
'SetFileDate[filename_, datelist_, attribute_]'
py_filename = filename.to_python()
if datelist is None:
py_datelist = Expression(
'DateList').evaluate(evaluation).to_python()
expr = Expression('SetFileDate', filename)
else:
py_datelist = datelist.to_python()
if attribute is None:
py_attr = 'All'
if datelist is not None:
expr = Expression('SetFileDate', filename, datelist)
else:
py_attr = attribute.to_python()
expr = Expression('SetFileDate', filename, datelist, attribute)
# Check filename
if not (isinstance(py_filename, basestring) and
py_filename[0] == py_filename[-1] == '"'):
evaluation.message('SetFileDate', 'fstr', filename)
return
py_filename = path_search(py_filename[1:-1])
if py_filename is None:
evaluation.message('SetFileDate', 'nffil', expr)
return Symbol('$Failed')
# Check datelist
if not (isinstance(py_datelist, list) and len(py_datelist) == 6 and
all(isinstance(d, int) for d in py_datelist[:-1]) and
isinstance(py_datelist[-1], float)):
evaluation.message('SetFileDate', 'fdate', expr)
# Check attribute
if py_attr not in ['"Access"', '"Creation"', '"Modification"', 'All']:
evaluation.message('SetFileDate', 'datetype')
return
epochtime = Expression('AbsoluteTime', time.strftime(
"%Y-%m-%d %H:%M", time.gmtime(0))).evaluate(evaluation).to_python()
stattime = Expression('AbsoluteTime', from_python(py_datelist))
stattime = stattime.to_python(n_evaluation=evaluation)
stattime -= epochtime
try:
os.stat(py_filename)
if py_attr == '"Access"':
os.utime(py_filename, (
stattime, os.path.getatime(py_filename)))
if py_attr == '"Creation"':
if os.name == 'posix':
evaluation.message('SetFileDate', 'nocreationunix')
return Symbol('$Failed')
else:
# TODO: Note: This is windows only
return Symbol('$Failed')
if py_attr == '"Modification"':
os.utime(py_filename, (os.path.getatime(
py_filename), stattime))
if py_attr == 'All':
os.utime(py_filename, (stattime, stattime))
except OSError as e:
print e
# evaluation.message(...)
return Symbol('$Failed')
return Symbol('Null')
def apply_1arg(self, filename, evaluation):
'SetFileDate[filename_]'
return self.apply(filename, None, None, evaluation)
def apply_2arg(self, filename, datelist, evaluation):
'SetFileDate[filename_, datelist_]'
return self.apply(filename, datelist, None, evaluation)
class CopyFile(Builtin):
"""
<dl>
<dt>'CopyFile["$file1$", "$file2$"]'
<dd>copies $file1$ to $file2$.
</dl>
>> CopyFile["ExampleData/sunflowers.jpg", "MathicsSunflowers.jpg"]
= MathicsSunflowers.jpg
>> DeleteFile["MathicsSunflowers.jpg"]
"""
messages = {
'filex': 'Cannot overwrite existing file `1`.',
'fstr': ('File specification `1` is not a string of '
'one or more characters.'),
'nffil': 'File not found during `1`.',
}
attributes = ('Protected')
def apply(self, source, dest, evaluation):
'CopyFile[source_, dest_]'
py_source = source.to_python()
py_dest = dest.to_python()
# Check filenames
if not (isinstance(py_source, basestring) and
py_source[0] == py_source[-1] == '"'):
evaluation.message('CopyFile', 'fstr', source)
return
if not (isinstance(py_dest, basestring) and
py_dest[0] == py_dest[-1] == '"'):
evaluation.message('CopyFile', 'fstr', dest)
return
py_source = py_source[1:-1]
py_dest = py_dest[1:-1]
py_source = path_search(py_source)
if py_source is None:
evaluation.message('CopyFile', 'filex', source)
return Symbol('$Failed')
if os.path.exists(py_dest):
evaluation.message('CopyFile', 'filex', dest)
return Symbol('$Failed')
try:
shutil.copy(py_source, py_dest)
except IOError:
evaluation.message('CopyFile', 'nffil', Expression(
'CopyFile', source, dest))
return Symbol('$Failed')
return dest
class RenameFile(Builtin):
"""
<dl>
<dt>'RenameFile["$file1$", "$file2$"]'
<dd>renames $file1$ to $file2$.
</dl>
>> CopyFile["ExampleData/sunflowers.jpg", "MathicsSunflowers.jpg"]
= MathicsSunflowers.jpg
>> RenameFile["MathicsSunflowers.jpg", "MathicsSunnyFlowers.jpg"]
= MathicsSunnyFlowers.jpg
>> DeleteFile["MathicsSunnyFlowers.jpg"]
"""
messages = {
'filex': 'Cannot overwrite existing file `1`.',
'fstr': ('File specification `1` is not a string of '
'one or more characters.'),
'nffil': 'File not found during `1`.',
}
attributes = ('Protected')
def apply(self, source, dest, evaluation):
'RenameFile[source_, dest_]'
py_so |
# -*- coding: utf-8 -*-
"""
This generate diagram in .png and .svg from neo.core
Author: sgarcia
"""
from datetime import datetime
import numpy as np
import quantities as pq
from matplotlib import pyplot
from matplotlib.patches import Rectangle, ArrowStyle, FancyArrowPatch
from matplotlib.font_manager import FontProperties
from neo.test.generate_datasets import fake_neo
line_heigth = .22
fontsize = 10.5
left_text_shift = .1
dpi = 100
def get_rect_height(name, obj):
'''
calculate rectangle height
'''
nlines = 1.5
nlines += len(getattr(obj, '_all_attrs', []))
nlines += len(getattr(obj, '_single_child_objects', []))
nlines += len(getattr(obj, '_multi_child_objects', []))
nlines += len(getattr(obj, '_multi_parent_objects', []))
return nlines*line_heigth
def annotate(ax, coord1, coord2, connectionstyle, color, alpha):
arrowprops = dict(arrowstyle='fancy',
#~ patchB=p,
shrinkA=.3, shrinkB=.3,
fc=color, ec=color,
connectionstyle=connectionstyle,
alpha=alpha)
bbox = dict(boxstyle="square", fc="w")
a = ax.annotate('', coord1, coord2,
#xycoords="figure fraction",
#textcoords="figure fraction",
ha="right", va="center",
size=fontsize,
arrowprops=arrowprops,
bbox=bbox)
a.set_zorder(-4)
def calc_coordinates(pos, height):
x = pos[0]
y = pos[1] + height - line_heigth*.5
return pos[0], y
def generate_diagram(filename, rect_pos, rect_width, figsize):
rw = rect_width
fig = pyplot.figure(figsize=figsize)
ax = fig.add_axes([0, 0, 1, 1])
all_h = {}
objs = {}
for name in rect_pos:
objs[name] = fake_neo(name)
all_h[name] = get_rect_height(name, objs[name])
# draw connections
color = ['c', 'm', 'y']
alpha = [1., 1., 0.3]
for name, pos in rect_pos.items():
obj = objs[name]
relationships = [getattr(obj, '_single_child_objects', []),
getattr(obj, '_multi_child_objects', []),
getattr(obj, '_child_ | properties', [])]
for r in range(3):
for ch_name in relationships[r]:
x1, y1 = calc_coordinates(rect_pos[ch_name], all_h[ch_name])
| x2, y2 = calc_coordinates(pos, all_h[name])
if r in [0, 2]:
x2 += rect_width
connectionstyle = "arc3,rad=-0.2"
elif y2 >= y1:
connectionstyle = "arc3,rad=0.7"
else:
connectionstyle = "arc3,rad=-0.7"
annotate(ax=ax, coord1=(x1, y1), coord2=(x2, y2),
connectionstyle=connectionstyle,
color=color[r], alpha=alpha[r])
# draw boxes
for name, pos in rect_pos.items():
htotal = all_h[name]
obj = objs[name]
allrelationship = (list(getattr(obj, '_child_containers', [])) +
list(getattr(obj, '_multi_parent_containers', [])))
rect = Rectangle(pos, rect_width, htotal,
facecolor='w', edgecolor='k', linewidth=2.)
ax.add_patch(rect)
# title green
pos2 = pos[0], pos[1]+htotal - line_heigth*1.5
rect = Rectangle(pos2, rect_width, line_heigth*1.5,
facecolor='g', edgecolor='k', alpha=.5, linewidth=2.)
ax.add_patch(rect)
# single relationship
relationship = getattr(obj, '_single_child_objects', [])
pos2 = pos[1] + htotal - line_heigth*(1.5+len(relationship))
rect_height = len(relationship)*line_heigth
rect = Rectangle((pos[0], pos2), rect_width, rect_height,
facecolor='c', edgecolor='k', alpha=.5)
ax.add_patch(rect)
# multi relationship
relationship = (list(getattr(obj, '_multi_child_objects', [])) +
list(getattr(obj, '_multi_parent_containers', [])))
pos2 = (pos[1]+htotal - line_heigth*(1.5+len(relationship)) -
rect_height)
rect_height = len(relationship)*line_heigth
rect = Rectangle((pos[0], pos2), rect_width, rect_height,
facecolor='m', edgecolor='k', alpha=.5)
ax.add_patch(rect)
# necessary attr
pos2 = (pos[1]+htotal -
line_heigth*(1.5+len(allrelationship) +
len(obj._necessary_attrs)))
rect = Rectangle((pos[0], pos2), rect_width,
line_heigth*len(obj._necessary_attrs),
facecolor='r', edgecolor='k', alpha=.5)
ax.add_patch(rect)
# name
if hasattr(obj, '_quantity_attr'):
post = '* '
else:
post = ''
ax.text(pos[0]+rect_width/2., pos[1]+htotal - line_heigth*1.5/2.,
name+post,
horizontalalignment='center', verticalalignment='center',
fontsize=fontsize+2,
fontproperties=FontProperties(weight='bold'),
)
#relationship
for i, relat in enumerate(allrelationship):
ax.text(pos[0]+left_text_shift, pos[1]+htotal - line_heigth*(i+2),
relat+': list',
horizontalalignment='left', verticalalignment='center',
fontsize=fontsize,
)
# attributes
for i, attr in enumerate(obj._all_attrs):
attrname, attrtype = attr[0], attr[1]
t1 = attrname
if (hasattr(obj, '_quantity_attr') and
obj._quantity_attr == attrname):
t1 = attrname+'(object itself)'
else:
t1 = attrname
if attrtype == pq.Quantity:
if attr[2] == 0:
t2 = 'Quantity scalar'
else:
t2 = 'Quantity %dD' % attr[2]
elif attrtype == np.ndarray:
t2 = "np.ndarray %dD dt='%s'" % (attr[2], attr[3].kind)
elif attrtype == datetime:
t2 = 'datetime'
else:
t2 = attrtype.__name__
t = t1+' : '+t2
ax.text(pos[0]+left_text_shift,
pos[1]+htotal - line_heigth*(i+len(allrelationship)+2),
t,
horizontalalignment='left', verticalalignment='center',
fontsize=fontsize,
)
xlim, ylim = figsize
ax.set_xlim(0, xlim)
ax.set_ylim(0, ylim)
ax.set_xticks([])
ax.set_yticks([])
fig.savefig(filename, dpi=dpi)
def generate_diagram_simple():
figsize = (18, 12)
rw = rect_width = 3.
bf = blank_fact = 1.2
rect_pos = {'Block': (.5+rw*bf*0, 4),
'Segment': (.5+rw*bf*1, .5),
'Event': (.5+rw*bf*4, 3.0),
'Epoch': (.5+rw*bf*4, 1.0),
'ChannelIndex': (.5+rw*bf*1, 7.5),
'Unit': (.5+rw*bf*2., 9.9),
'SpikeTrain': (.5+rw*bf*3, 7.5),
'IrregularlySampledSignal': (.5+rw*bf*3, 0.5),
'AnalogSignal': (.5+rw*bf*3, 4.9),
}
generate_diagram('simple_generated_diagram.svg',
rect_pos, rect_width, figsize)
generate_diagram('simple_generated_diagram.png',
rect_pos, rect_width, figsize)
if __name__ == '__main__':
generate_diagram_simple()
pyplot.show()
|
#!/usr/local/autopkg/python
# encoding: utf-8
#
# Copyright 2015 The Pennsylvania State University.
#
"""
BESTemplater.py
Created by Matt Hansen (mah60@psu.edu) on 2015-04-30.
AutoPkg Processor for importing tasks using the BigFix RESTAPI
Updated by Rusty Myers (rzm102@psu.edu) on 2020-02-21.
Work in progress. Does not support Python3.
"""
from __future__ import absolute_import
import os
import sys
from autopkglib import Processor, ProcessorError
__all__ = ["BESTemplater"]
class BESTemplater(Processor):
"""AutoPkg Processor for rendering tasks from templates"""
description = "Generates BigFix XML to install application."
input_variables = {
"template_name": {
"required": True,
"description":
"Name of template file."
},
}
output_variables = {
"bes_file": {
"description":
"The resulting BES task rendered from the template."
},
}
__doc__ = description
def main(self):
"""BESImporter Main Method"""
# http://stackoverflow.com/a/14150750/2626090
uppath = lambda _path, n: os.sep.joi | n(_path.split(os.sep)[:-n])
try:
from jinja2 import Environment, ChoiceLoader, FileSystemLoader
except ImportError as err:
raise ProcessorError("jinja2 module is not installed: %s" % err)
# Assign variables
template_name = self.env.get("template_name")
name = self.env.get("NAME")
version = self.env.get("version")
RECIPE_DIR = self.env.get("RECIPE_DIR")
BES_TEMPLATES = self.env.get("BES_TEMPLATES")
|
jinja_env = Environment(loader = ChoiceLoader([
FileSystemLoader(os.getcwd()),
FileSystemLoader('templates'),
FileSystemLoader(os.path.join(RECIPE_DIR, 'templates')),
FileSystemLoader(os.path.join(uppath(RECIPE_DIR, 1), 'templates')),
FileSystemLoader(os.path.join(uppath(RECIPE_DIR, 2), 'Templates')),
FileSystemLoader(BES_TEMPLATES)
]))
template_task = jinja_env.get_template(template_name)
# print jinja_env.list_templates()
rendered_task = template_task.render(**self.env)
# Write Final BES File to Disk
outputfile_handle = open("%s/Deploy %s %s.bes" %
(self.env.get("RECIPE_CACHE_DIR"),
name, version), "wb")
outputfile_handle.write(rendered_task)
outputfile_handle.close()
self.env['bes_file'] = outputfile_handle.name
self.output("Output BES File: '%s'" % self.env.get("bes_file"))
if __name__ == "__main__":
processor = BESImporter()
processor.execute_shell()
|
from base import Phase
preparation = Phase('Preparation', 'Initializing connections, fetching data etc.')
volume_creation = Phase('Volume creation', 'Creating the volume to bootstrap onto')
volume_preparation = Phase('Volume preparation', 'Formatting the bootstrap volume')
volume_mounting = | Phase('Volume mounting', 'Mounting bootstrap volume')
os_installation = Phase('OS installation', 'Installing the operating system')
package_installation = Phase('Package installation', 'Installing software')
system_modification = Phase('System modification', 'Modifying configuration files, adding resources, etc.')
system_cleaning = Phase('System cleaning', 'Removing sensitive data, temporary file | s and other leftovers')
volume_unmounting = Phase('Volume unmounting', 'Unmounting the bootstrap volume')
image_registration = Phase('Image registration', 'Uploading/Registering with the provider')
cleaning = Phase('Cleaning', 'Removing temporary files')
order = [preparation,
volume_creation,
volume_preparation,
volume_mounting,
os_installation,
package_installation,
system_modification,
system_cleaning,
volume_unmounting,
image_registration,
cleaning,
]
|
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='TinyContent',
f | ields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, prim | ary_key=True)),
('name', models.CharField(unique=True, max_length=100)),
('content', models.TextField()),
],
options={
'verbose_name': 'Content block',
},
),
]
|
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
from pants.engine.fs import EMPTY_SNAPSHOT
from pants.engine.rules import RootRule, rule
from pants.engine.selectors import Select
from pants.util.objects import datatype
logger = logging.getLogger(__name__)
class ExecuteProcessRequest(datatype('ExecuteProcessRequest', ['argv', 'env', 'input_files_ | digest', 'digest_length'])):
"""Request for execution with args and snapshots to extract."""
@classmethod
def create_from_snapshot(cls, argv, env, snapshot):
return ExecuteProcessRequest(
argv=argv,
env=env,
input_files_digest=snapshot.fingerprint,
digest_length=snapshot.digest_length,
)
@classmethod
def create_with_empty_snapshot(cls, argv, env):
re | turn cls.create_from_snapshot(argv, env, EMPTY_SNAPSHOT)
def __new__(cls, argv, env, input_files_digest, digest_length):
"""
:param args: Arguments to the process being run.
:param env: A tuple of environment variables and values.
"""
if not isinstance(argv, tuple):
raise ValueError('argv must be a tuple.')
if not isinstance(env, tuple):
raise ValueError('env must be a tuple.')
if not isinstance(input_files_digest, str):
raise ValueError('input_files_digest must be a str.')
if not isinstance(digest_length, int):
raise ValueError('digest_length must be an int.')
if digest_length < 0:
raise ValueError('digest_length must be >= 0.')
return super(ExecuteProcessRequest, cls).__new__(cls, argv, env, input_files_digest, digest_length)
class ExecuteProcessResult(datatype('ExecuteProcessResult', ['stdout', 'stderr', 'exit_code'])):
pass
def create_process_rules():
"""Intrinsically replaced on the rust side."""
return [execute_process_noop, RootRule(ExecuteProcessRequest)]
@rule(ExecuteProcessResult, [Select(ExecuteProcessRequest)])
def execute_process_noop(*args):
raise Exception('This task is replaced intrinsically, and should never run.')
|
# (C) British Crown Copyright 2014, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Unit tests for the `iris.fileformats.grib.GribWrapper` class.
"""
from __future__ import (absolute_import, division, print_function)
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
from biggus import NumpyArrayAdapter
import mock
import numpy as np
from iris.fileformats.grib import GribWrapper, GribDataProxy
_message_length = 1000
def _mock_grib_get_long(grib_message, key):
lookup = dict(totalLength=_message_length,
numberOfValues=200,
jPointsAreConsecutive=0,
Ni=20,
Nj=10)
try:
result = lookup[key]
except KeyError:
msg = 'Mock grib_get_long unknown key: {!r}'.format(key)
raise AttributeError(msg)
return result
def _mock_grib_get_string(grib_message, key):
return grib_message
def _mock_grib_get_native_type(grib_message, key):
result = int
if key == 'gridType':
result = str
return result
class Test_deferred(tests.IrisTest):
def setUp(self):
confirm_patch = mock.patch(
'iris.fileformats.grib.GribWrapper._confirm_in_scope')
compute_patch = mock.patch(
'iris.fileformats.grib.GribWrapper._compute_extra_keys')
long_patch = mock.patch('gribapi.grib_get_long', _mock_grib_get_long)
string_patch = mock.patch('gribapi.grib_get_string',
_mock_grib_get_string)
native_patch = mock.patch('gribapi.grib_get_native_type',
_mock_grib_get_native_type)
confirm_patch.start()
compute_patch.start()
long_patch.start()
string_patch.start()
native_patch.start()
self.addCleanup(confirm_patch.stop)
self.addCleanup(compute_patch.stop)
self.addCleanup(long_patch.stop)
self.addCleanup(string_patch.stop)
self.addCleanup(native_patch.stop)
def test_regular_sequential(self):
tell_tale = np.arange(1, 5) * _message_length
grib_fh = mock.Mock(tell=mock.Mock(side_effect=tell_tale))
auto_regularise = False
grib_message = 'regular_ll'
for i, _ in enumerate(tell_tale):
gw = GribWrapper(grib_message, grib_fh, auto_regularise)
self.assertIsInstance(gw._data, NumpyArrayAdapter)
proxy = gw._data.concrete
self.assertIsInstance(proxy, GribDataProxy)
self.assertEqual(proxy.shape, (10, 20))
self.assertEqual(proxy.dtype, np.float)
self.assertIs(proxy.fill_valu | e, np.nan)
self.assertEqual(proxy.path, grib_fh.name)
self.assertEqual(proxy.offset, _message_length * i)
self.assertEqual(proxy.regularise, auto_regularise)
def test_regular_mixed(self):
tell_tale = np.arange(1, 5) * _message_length
expected = tell_tale - _message_length
grib_fh = mock.Mock(tell=mock.Mock(side_effect=tell_tale))
auto_regularise = False
grib_message = 'regular_ll'
| for offset in expected:
gw = GribWrapper(grib_message, grib_fh, auto_regularise)
self.assertIsInstance(gw._data, NumpyArrayAdapter)
proxy = gw._data.concrete
self.assertIsInstance(proxy, GribDataProxy)
self.assertEqual(proxy.shape, (10, 20))
self.assertEqual(proxy.dtype, np.float)
self.assertIs(proxy.fill_value, np.nan)
self.assertEqual(proxy.path, grib_fh.name)
self.assertEqual(proxy.offset, offset)
self.assertEqual(proxy.regularise, auto_regularise)
def test_reduced_sequential(self):
tell_tale = np.arange(1, 5) * _message_length
grib_fh = mock.Mock(tell=mock.Mock(side_effect=tell_tale))
auto_regularise = False
grib_message = 'reduced_gg'
for i, _ in enumerate(tell_tale):
gw = GribWrapper(grib_message, grib_fh, auto_regularise)
self.assertIsInstance(gw._data, NumpyArrayAdapter)
proxy = gw._data.concrete
self.assertIsInstance(proxy, GribDataProxy)
self.assertEqual(proxy.shape, (200,))
self.assertEqual(proxy.dtype, np.float)
self.assertIs(proxy.fill_value, np.nan)
self.assertEqual(proxy.path, grib_fh.name)
self.assertEqual(proxy.offset, _message_length * i)
self.assertEqual(proxy.regularise, auto_regularise)
def test_reduced_mixed(self):
tell_tale = np.arange(1, 5) * _message_length
expected = tell_tale - _message_length
grib_fh = mock.Mock(tell=mock.Mock(side_effect=tell_tale))
auto_regularise = False
grib_message = 'reduced_gg'
for offset in expected:
gw = GribWrapper(grib_message, grib_fh, auto_regularise)
self.assertIsInstance(gw._data, NumpyArrayAdapter)
proxy = gw._data.concrete
self.assertIsInstance(proxy, GribDataProxy)
self.assertEqual(proxy.shape, (200,))
self.assertEqual(proxy.dtype, np.float)
self.assertIs(proxy.fill_value, np.nan)
self.assertEqual(proxy.path, grib_fh.name)
self.assertEqual(proxy.offset, offset)
self.assertEqual(proxy.regularise, auto_regularise)
if __name__ == '__main__':
tests.main()
|
# -*- coding: utf-8 -*-
#
# Cherokee-admin
#
# Authors:
# Alvaro Lopez Ortega <alvaro@alobbs.com>
#
# Copyright (C) 2001-2014 Alvaro Lopez Ortega
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
import CTK
import Cherokee
import validations
URL_APPLY = '/plugin/handler/apply'
NOTE_DOCUMENT_ROOT = N_('Allows to specify an alternative Document Root path.')
class PluginHandler (CTK.Plugin):
def __init__ (self, key, **kwargs):
CTK.Plugin.__init__ (self, key)
self.show_document_root = kwargs.pop('show_document_root', True)
self.key_rule = '!'.join(self.key.split('!')[:-1])
def | AddCommon (self):
if se | lf.show_document_root:
table = CTK.PropsTable()
table.Add (_('Document Root'), CTK.TextCfg('%s!document_root'%(self.key_rule), True), _(NOTE_DOCUMENT_ROOT))
submit = CTK.Submitter (URL_APPLY)
submit += CTK.Indenter (table)
self += submit
# Publish
VALS = [("%s!document_root"%(self.key_rule), validations.is_dev_null_or_local_dir_exists)]
CTK.publish ('^%s'%(URL_APPLY), CTK.cfg_apply_post, validation=VALS, method="POST")
|
import numpy as np
from bmtools.filters import filtfilt_butter
def derivatefilterQ(model, q, t, cutoff, fs, filter_order=4):
'''
Differentiate data using backwards finite differences and
butterworth low pass filter. Takes care of quaternions
Inputs:
- Q : numpy matrix double x=time, y=coordinates(model.nq)
- t : numpy vector of time
- cutoff : cutoff frequency
- fs : sampling frequency
Output
- V : numpy matrix double x=time, y=coordinates(model.nv)
'''
# Debug
q = q.squeeze()
t = t.squeeze()
if type(q) != np.matrixlib.defmatrix.matrix:
q = np.matrix(q).squeeze()
if type(t) != np.ndarray:
t = np.array(t).squeeze()
assert type(q) == np.matrixlib.defmatrix.matrix
assert type(t) == np.ndarray
assert q.shape[0] == len(t)
assert q.shape[1] == model.nq
# Differentiate
dq = np.empty((len(t), model.nv))
for frame in xrange(0,len(t)-1):
# Backward differences
dt = np.float64(t[frame+1]-t[frame])
q1 = q[frame,:]
q2 = q[frame+1,:]
diff = se3.differentiate(model, q1, q2)/dt
dq[frame,:] = diff.A1
dq[-1,:] = dq[-2,:]
# Filter
dq_prime = np.empty((len(t), model.nv))
for i in xrange(model.nv):
filtered = filtfilt_butter(dq[:,i], cutoff, fs, filter_order)
dq_prime[:,i] = filtered
return np.matrix(dq_prime)
def derivatefilterV(model, dq, t, cutoff, fs, filter_order=4):
'''
Differentiate data using backwards finite differences and
butterworth low pass filter
Inputs:
- dq : numpy matrix double x=time, y=coordinates(model.nv)
- t : numpy vector of time
- cutoff : cutoff frequency
- fs : sampling frequency
Output
- ddq : numpy matrix double x=time, y=coordinates(model.nv)
'''
# Debug
dq = dq.squeeze()
t = t.squeeze()
if type(dq) != np.matrixlib.defmatrix.matrix:
dq = np.matrix(dq).squeeze()
if type(t) != np.ndarray:
t = np.array(t).squ | eeze()
assert type(dq) == np.matrixlib.defmatrix.matrix
assert type(t) == np.ndarray
assert dq.shape[0] == len(t)
assert dq.shape[1] == model.nv
# Differentiate
ddq = np.empty((len(t), model.nv))
for frame in xrange(0,len(t)-1):
# Backward differences
dt = np.float64(t[frame+1]-t[frame])
diff = (dq[frame+1,:] - dq[frame,:])/np.float64(dt)
ddq[frame,:] = diff.A1
ddq[-1,:] = ddq[-2,:]
# F | ilter
ddq_prime = np.empty((len(t), model.nv))
for i in xrange(model.nv):
filtered = filtfilt_butter(ddq[:,i], cutoff, fs, filter_order)
ddq_prime[:,i] = filtered
return np.matrix(ddq_prime)
|
import requests
import json
from JumpScale import j
from mongoengine import *
ENTRY_POINT =""
TOKEN=""
class ModelGogsRepo(j.core.models.getBaseModel()):
name = StringField(default='')
description = StringField(default='')
private = BoolField(default=False)
readme = StringField(default='Default')
gitignores=StringField(default='Python')
auto_init=BoolField(default=True)
def endpoint(resource):
C='%s/%s' % (ENTRY_POINT, resource)
if C.find("?")==-1:
C+='?token=%s'%TOKEN
else:
C+='&token=%s'%TOKEN
return C
def perform_post(resource, data):
headers = {'Content-Type': 'application/json'}
return requests.post(endpoint(resource), data, headers=headers)
def perform_delete(resource):
return requests.delete(endpoint(resource))
def perform_get(resource):
r = requests.get(endpoint(resource))
print r.json
curlexample='''
curl -H "Authorization: token b9d3768004daf48b4b6f963ab94ca47515444074" http://192.168.99.100:3001/api/v1/user/repos
curl http://192.168.99.100:3001/api/v1/user/repos?token=b9d3768004daf48b4b6f963ab94ca47515444074
'''
class GOGSClient():
def __init__(self):
ENTRY_POINT = 'http://192.168.99.100:3001/api/v1/'
TOKEN="b9d3768004daf48b4b6f963ab94ca47515444074"
def repos_list(self):
return perform_get("user/repos")
def repo_create(self,name,description,private):
model=ModelGogsRepo(name=name,description=description,private=private)
perform_post("")
people = [
{
'firstname': 'John',
'lastname': 'Doe',
'role': ['author'],
'location': {'address': '422 South Gay Street', 'city': 'Auburn'},
'born': 'Thu, 27 Aug 1970 14:37:13 GMT'
},
{
'firstname': 'Serena',
'lastname': 'Love',
'role': ['author'],
'location': {'address': '363 Brannan St', 'city': 'San Francisco'},
'born': 'Wed, 25 Feb 1987 17:00:00 GMT'
},
{
'firstname': 'Mark',
'lastname': 'Green',
'role': ['copy', 'author'],
'location': {'address': '4925 Lacross Road', 'city': 'New York'},
'born': 'Sat, 23 Feb 1985 12:00:00 GMT'
},
{
'firstname': 'Julia',
'lastname': 'Red',
'role': ['copy'],
'location': {'address': '98 Yatch Road', 'city': 'San Francisco'},
'born': 'Sun, 20 Jul 1980 11:00:00 GMT'
},
{
'firstname': 'Anne',
'lastname': 'White',
'role': ['contributor', 'copy'],
'location': {'address': '32 Joseph Street', 'city': 'Ashfield'},
| 'born': 'Fri, 25 Sep 1970 10:00:00 GMT'
},
]
r = perform_post('people', json.dumps(people))
print "'people' posted", r.status_code
valids = []
if r.status_code == 201:
response = r.json()
if response['_status'] == 'OK':
for person in response['_items']:
if person['_status'] == "OK":
valids.append(person['_id']) |
return valids
# def post_works(ids):
# works = []
# for i in range(28):
# works.append(
# {
# 'title': 'Book Title #%d' % i,
# 'description': 'Description #%d' % i,
# 'owner': random.choice(ids),
# }
# )
# r = perform_post('works', json.dumps(works))
# print "'works' posted", r.status_code
# def delete():
# r = perform_delete('people')
# print "'people' deleted", r.status_code
# r = perform_delete('works')
# print "'works' deleted", r.status_code
cl=GOGSClient
print (cl.list())
|
es.
==Operation==
The default 'Activate Stretch' checkbox is off. When it is on, the functions described below will wo | rk, when it is off, the | functions will not be called.
==Settings==
===Loop Stretch Over Perimeter Width===
Default is 0.1.
Defines the ratio of the maximum amount the loop aka inner shell threads will be stretched compared to the edge width, in general this value should be the same as the 'Perimeter Outside Stretch Over Perimeter Width' setting.
===Path Stretch Over Perimeter Width===
Default is zero.
Defines the ratio of the maximum amount the threads which are not loops, like the infill threads, will be stretched compared to the edge width.
===Perimeter===
====Perimeter Inside Stretch Over Perimeter Width====
Default is 0.32.
Defines the ratio of the maximum amount the inside edge thread will be stretched compared to the edge width, this is the most important setting in stretch. The higher the value the more it will stretch the edge and the wider holes will be. If the value is too small, the holes could be drilled out after fabrication, if the value is too high, the holes would be too wide and the part would have to junked.
====Perimeter Outside Stretch Over Perimeter Width====
Default is 0.1.
Defines the ratio of the maximum amount the outside edge thread will be stretched compared to the edge width, in general this value should be around a third of the 'Perimeter Inside Stretch Over Perimeter Width' setting.
===Stretch from Distance over Perimeter Width===
Default is two.
The stretch algorithm works by checking at each turning point on the extrusion path what the direction of the thread is at a distance of 'Stretch from Distance over Perimeter Width' times the edge width, on both sides, and moves the thread in the opposite direction. So it takes the current turning-point, goes "Stretch from Distance over Perimeter Width" * "Perimeter Width" ahead, reads the direction at that point. Then it goes the same distance in back in time, reads the direction at that other point. It then moves the thread in the opposite direction, away from the center of the arc formed by these 2 points+directions.
The magnitude of the stretch increases with:
the amount that the direction of the two threads is similar and
by the '..Stretch Over Perimeter Width' ratio.
==Examples==
The following examples stretch the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and stretch.py.
> python stretch.py
This brings up the stretch dialog.
> python stretch.py Screw Holder Bottom.stl
The stretch tool is parsing the file:
Screw Holder Bottom.stl
..
The stretch tool has created the file:
.. Screw Holder Bottom_stretch.gcode
"""
from __future__ import absolute_import
from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret
from fabmetheus_utilities import archive
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
from fabmetheus_utilities import settings
from skeinforge_application.skeinforge_utilities import skeinforge_craft
from skeinforge_application.skeinforge_utilities import skeinforge_polyfile
from skeinforge_application.skeinforge_utilities import skeinforge_profile
import sys
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
#maybe speed up feedRate option
def getCraftedText( fileName, gcodeText, stretchRepository = None ):
"Stretch a gcode linear move text."
return getCraftedTextFromText( archive.getTextIfEmpty(fileName, gcodeText), stretchRepository )
def getCraftedTextFromText( gcodeText, stretchRepository = None ):
"Stretch a gcode linear move text."
if gcodec.isProcedureDoneOrFileIsEmpty( gcodeText, 'stretch'):
return gcodeText
if stretchRepository == None:
stretchRepository = settings.getReadRepository( StretchRepository() )
if not stretchRepository.activateStretch.value:
return gcodeText
return StretchSkein().getCraftedGcode( gcodeText, stretchRepository )
def getNewRepository():
'Get new repository.'
return StretchRepository()
def writeOutput(fileName, shouldAnalyze=True):
"Stretch a gcode linear move file. Chain stretch the gcode if it is not already stretched."
skeinforge_craft.writeChainTextWithNounMessage(fileName, 'stretch', shouldAnalyze)
class LineIteratorBackward(object):
"Backward line iterator class."
def __init__( self, isLoop, lineIndex, lines ):
self.firstLineIndex = None
self.isLoop = isLoop
self.lineIndex = lineIndex
self.lines = lines
def getIndexBeforeNextDeactivate(self):
"Get index two lines before the deactivate command."
for lineIndex in xrange( self.lineIndex + 1, len(self.lines) ):
line = self.lines[lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
if firstWord == 'M103':
return lineIndex - 2
print('This should never happen in stretch, no deactivate command was found for this thread.')
raise StopIteration, "You've reached the end of the line."
def getNext(self):
"Get next line going backward or raise exception."
while self.lineIndex > 3:
if self.lineIndex == self.firstLineIndex:
raise StopIteration, "You've reached the end of the line."
if self.firstLineIndex == None:
self.firstLineIndex = self.lineIndex
nextLineIndex = self.lineIndex - 1
line = self.lines[self.lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
if firstWord == 'M103':
if self.isLoop:
nextLineIndex = self.getIndexBeforeNextDeactivate()
else:
raise StopIteration, "You've reached the end of the line."
if firstWord == 'G1':
if self.isBeforeExtrusion():
if self.isLoop:
nextLineIndex = self.getIndexBeforeNextDeactivate()
else:
raise StopIteration, "You've reached the end of the line."
else:
self.lineIndex = nextLineIndex
return line
self.lineIndex = nextLineIndex
raise StopIteration, "You've reached the end of the line."
def isBeforeExtrusion(self):
"Determine if index is two or more before activate command."
linearMoves = 0
for lineIndex in xrange( self.lineIndex + 1, len(self.lines) ):
line = self.lines[lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
if firstWord == 'G1':
linearMoves += 1
if firstWord == 'M101':
return linearMoves > 0
if firstWord == 'M103':
return False
print('This should never happen in isBeforeExtrusion in stretch, no activate command was found for this thread.')
return False
class LineIteratorForward(object):
"Forward line iterator class."
def __init__( self, isLoop, lineIndex, lines ):
self.firstLineIndex = None
self.isLoop = isLoop
self.lineIndex = lineIndex
self.lines = lines
def getIndexJustAfterActivate(self):
"Get index just after the activate command."
for lineIndex in xrange( self.lineIndex - 1, 3, - 1 ):
line = self.lines[lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
if firstWord == 'M101':
return lineIndex + 1
print('This should never happen in stretch, no activate command was found for this thread.')
raise StopIteration, "You've reached the end of the line."
def getNext(self):
"Get next line or raise exception."
while self.lineIndex < len(self.lines):
if self.lineIndex == self.firstLineIndex:
raise StopIteration, "You've reached the end of the line."
if self.firstLineIndex == None:
self.firstLineIndex = self.lineIndex
nextLineIndex = self.lineIndex + 1
line = self.lines[self.lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
if firstWord == 'M103':
if self.isLoop:
nextLineIndex = self.getIndexJustAfterActivate()
else:
raise StopIteration, "You've reached the end of the line."
self.lineIndex = nextLineIndex
if firstWord == 'G1':
re |
hasattr(sys, 'exc_clear'):
sys.exc_clear()
_app_ctx_stack.push(self)
appcontext_pushed.send(self.app)
def pop(self, exc=None):
"""Pops the app context."""
self._refcnt -= 1
if self._refcnt <= 0:
if exc is None:
exc = sys.exc_info()[1]
self.app.do_teardown_appcontext(exc)
rv = _app_ctx_stack.pop()
assert rv is self, 'Popped wrong app context. (%r instead of %r)' \
% (rv, self)
appcontext_popped.send(self.app)
def __enter__(self):
self.push()
return self
def __exit__(self, exc_type, exc_value, tb):
self.pop(exc_value)
class RequestContext(object):
"""The request context contains all request relevant information. It is
created at the beginning of the request and pushed to the
`_request_ctx_stack` and removed at the end of it. It will create the
URL adapter and request object for the WSGI environment provided.
Do not attempt to use this class directly, instead use
:meth:`~flask.Flask.test_request_context` and
:meth:`~flask.Flask.request_context` to create this object.
When the request context is popped, it will evaluate all the
functions registered on the application for teardown execution
(:meth:`~flask.Flask.teardown_request`).
The request context is automatically popped at the end of the request
for you. In debug mode the request context is kept around if
exceptions happen so that interactive debuggers have a chance to
introspect the data. With 0.4 this can also be forced for requests
that did not fail and outside of `DEBUG` mode. By setting
``'flask._preserve_context'`` to `True` on the WSGI environment the
context will not pop itself at the end of the request. This is used by
the :meth:`~flask.Flask.test_client` for example to implement the
deferred cleanup functionality.
You might find this helpful for unittests where you need the
information from the context local around for a little longer. Make
sure to properly :meth:`~werkzeug.LocalStack.pop` the stack yourself in
that situation, otherwise your unittests will leak memory.
"""
def __init__(self, app, environ, request=None):
self.app = app
if request is None:
request = app.request_class(environ)
self.request = request
self.url_adapter = app.create_url_adapter(self.request)
self.flashes = None
self.session = None
# Request contexts can be pushed multiple times and interleaved with
# other request contexts. Now only if the last level is popped we
# get rid of them. Additionally if an application context is missing
# one is created implicitly so for each level we add this information
self._implicit_app_ctx_stack = []
# indicator if the context was preserved. Next time another context
# is pushed the preserved context is popped.
self.preserved = False
# remembers the exception for pop if there is one in case the context
# preservation kicks in.
self._preserved_exc = None
# Functions that should be executed after the request on the response
# object. These will be called before the regular "after_request"
# functions.
self._after_request_functions = []
self.match_request()
# XXX: Support for deprecated functionality. This is going away with
# Flask 1.0
blueprint = self.request.blueprint
if blueprint is not None:
# better safe than sorry, we don't want to break code that
# already worked
bp = app.blueprints.get(blueprint)
if bp is not None and blueprint_is_module(bp):
self.request._is_old_module = True
def _get_g(self):
return _app_ctx_stack.top.g
def _set_g(self, value):
_app_ctx_stack.top.g = value
g = property(_get_g, _set_g)
del _get_g, _set_g
def copy(self):
"""Creates a copy of this request context with the same request object.
| This can be used to move a request context to a different greenlet.
Because the actual request object is the same this cannot be used to
move a request context to a different thread unless access to the
request object is locked.
.. versionadded:: 0.10
"""
return self.__class__(self.app,
environ=self.request.environ,
request=self.request
)
def match_request(self):
"""Can b | e overridden by a subclass to hook into the matching
of the request.
"""
try:
url_rule, self.request.view_args = \
self.url_adapter.match(return_rule=True)
self.request.url_rule = url_rule
except HTTPException as e:
self.request.routing_exception = e
def push(self):
"""Binds the request context to the current context."""
# If an exception occurs in debug mode or if context preservation is
# activated under exception situations exactly one context stays
# on the stack. The rationale is that you want to access that
# information under debug situations. However if someone forgets to
# pop that context again we want to make sure that on the next push
# it's invalidated, otherwise we run at risk that something leaks
# memory. This is usually only a problem in testsuite since this
# functionality is not active in production environments.
top = _request_ctx_stack.top
if top is not None and top.preserved:
top.pop(top._preserved_exc)
# Before we push the request context we have to ensure that there
# is an application context.
app_ctx = _app_ctx_stack.top
if app_ctx is None or app_ctx.app != self.app:
app_ctx = self.app.app_context()
app_ctx.push()
self._implicit_app_ctx_stack.append(app_ctx)
else:
self._implicit_app_ctx_stack.append(None)
if hasattr(sys, 'exc_clear'):
sys.exc_clear()
_request_ctx_stack.push(self)
# Open the session at the moment that the request context is
# available. This allows a custom open_session method to use the
# request context (e.g. code that access database information
# stored on `g` instead of the appcontext).
self.session = self.app.open_session(self.request)
if self.session is None:
self.session = self.app.make_null_session()
def pop(self, exc=None):
"""Pops the request context and unbinds it by doing that. This will
also trigger the execution of functions registered by the
:meth:`~flask.Flask.teardown_request` decorator.
.. versionchanged:: 0.9
Added the `exc` argument.
"""
app_ctx = self._implicit_app_ctx_stack.pop()
clear_request = False
if not self._implicit_app_ctx_stack:
self.preserved = False
self._preserved_exc = None
if exc is None:
exc = sys.exc_info()[1]
self.app.do_teardown_request(exc)
# If this interpreter supports clearing the exception information
# we do that now. This will only go into effect on Python 2.x,
# on 3.x it disappears automatically at the end of the exception
# stack.
if hasattr(sys, 'exc_clear'):
sys.exc_clear()
request_close = getattr(self.request, 'close', None)
if request_close is not None:
request_close()
clear_request = True
rv = _request_ctx_stack.pop()
assert rv is self, 'Popped wrong request context. (%r instead of %r)' \
% (rv, self)
# get rid of circular dependencies at the end of the request
# so that we don't require the GC to be active.
if clear_request:
rv.request.environ['werkzeug.request'] = Non |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from time import sleep
from airflow.exceptions import AirflowException, Ai | rflowSensorTimeout, \
AirflowSkipException
from airflow.models import BaseOperator
from airflow.utils import timezone
from a | irflow.utils.decorators import apply_defaults
class BaseSensorOperator(BaseOperator):
"""
Sensor operators are derived from this class an inherit these attributes.
Sensor operators keep executing at a time interval and succeed when
a criteria is met and fail if and when they time out.
:param soft_fail: Set to true to mark the task as SKIPPED on failure
:type soft_fail: bool
:param poke_interval: Time in seconds that the job should wait in
between each tries
:type poke_interval: int
:param timeout: Time, in seconds before the task times out and fails.
:type timeout: int
"""
ui_color = '#e6f1f2'
@apply_defaults
def __init__(self,
poke_interval=60,
timeout=60 * 60 * 24 * 7,
soft_fail=False,
*args,
**kwargs):
super(BaseSensorOperator, self).__init__(*args, **kwargs)
self.poke_interval = poke_interval
self.soft_fail = soft_fail
self.timeout = timeout
def poke(self, context):
"""
Function that the sensors defined while deriving this class should
override.
"""
raise AirflowException('Override me.')
def execute(self, context):
started_at = timezone.utcnow()
while not self.poke(context):
if (timezone.utcnow() - started_at).total_seconds() > self.timeout:
if self.soft_fail:
raise AirflowSkipException('Snap. Time is OUT.')
else:
raise AirflowSensorTimeout('Snap. Time is OUT.')
sleep(self.poke_interval)
self.log.info("Success criteria met. Exiting.")
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
<<<<<<< HEAD
# Dist | ributed under the terms of the MIT License.
=======
# Distributed under the terms of the MIT Li | cense.
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
|
'''
Copyright 2021 Travel Modelling Group, Department of Civil Engineering, University of Toronto
This file is part of the TMG Toolbox.
The TMG Toolbox is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
The TMG Toolbox is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with the TMG Toolbox. If not, see <http://www.gnu.org/licenses/>.
'''
#---METADATA-------------- | -------
'''
Copy Scenario
Authors: JamesVaughan
Latest revision by: JamesVaughan
This tool will allow XTMF to be able to execute a vdf batch file into
an EMME Databank.
'''
#---VERSION HISTORY
'''
0.0.1 Created on 2021-01-20 by JamesVaughan
'''
import inro.modeller as _m
imp | ort traceback as _traceback
_MODELLER = _m.Modeller() #Instantiate Modeller once.
process = _m.Modeller().tool('inro.emme.data.function.function_transaction')
class CopyScenario(_m.Tool()):
version = '0.0.1'
batch_file = _m.Attribute(str)
scenario_number = _m.Attribute(int)
def page(self):
pb = _m.ToolPageBuilder(self, title="Import VDF Batch File",
runnable=False,
description="Cannot be called from Modeller.",
branding_text="XTMF")
return pb.render()
def run(self):
pass
def __call__(self, batch_file, scenario_number):
try:
project = _MODELLER.emmebank
scenario = project.scenario(str(scenario_number))
process(transaction_file=batch_file,
scenario=scenario,
throw_on_error = True)
except Exception as e:
raise Exception(_traceback.format_exc())
|
_section in self.data.config[base].sections.now:
sect_data = self.data.config[base].sections.now[ns_section]
if sect_data.metadata["full_ns"] == namespace:
return [ns_section]
if ns_section in self.data.config[base].sections.latent:
sect_data = self.data.config[base].sections.latent[ns_section]
if sect_data.metadata["full_ns"] == namespace:
return [ns_section]
return []
def get_ns_is_default(self, namespace):
"""Sets if this namespace is the default for a section. Slow!"""
config_name = self.util.split_full_ns(self.data, namespace)[0]
config_data = self.data.config[config_name]
meta_config = config_data.meta
allowed_sections = self.get_sections_from_namespace(namespace)
empty = True
for section in allowed_sections:
for variable in config_data.vars.now.get(section, []):
if variable.metadata['full_ns'] == namespace:
empty = False
if rose.META_PROP_NS not in variable.metadata:
return True
for variable in config_data.vars.latent.get(section, []):
if variable.metadata['full_ns'] == namespace:
empty = False
if rose.META_PROP_NS not in variable.metadata:
return True
if empty:
# An added, non-metadata section with no variables.
return True
return False
def get_all_namespaces(self, only_this_config=None):
"""Return all unique namespaces."""
nses = self.data.namespace_meta_lookup.keys()
if only_this_config is not None:
nses = [n for n in nses if n.startswith(only_this_config)]
return nses
def get_missing_sections(self, config_name=None):
"""Return full section ids that are missing."""
full_sections = []
if config_name is not None:
config_names = [config_name]
else:
config_names = self.data.config.keys()
for config_name in config_names:
section_store = self.data.config[config_name].sections
miss_sections = []
real_sections = section_store.now.keys()
for section in section_store.latent.keys():
if section not in real_sections:
miss_sections.append(section)
for section in self.data.config[config_name].vars.latent:
if (section not in real_sections and
section not in miss_sections):
miss_sections.append(section)
full_sections += [config_name + ':' + s for s in miss_sections]
sorter = rose.config.sort_settings
full_sections.sort(sorter)
return full_sections
def get_default_namespace_for_section(self, section, config_name):
"""Return the default namespace for the section."""
if config_name not in self.data._config_section_namespace_lookup:
self.data._config_section_namespace_lookup.setdefault(
config_name, {})
section_ns = (
self.data._config_section_namespace_lookup[config_name].get(
section))
if section_ns is None:
config_data = self.data.config[config_name]
meta_config = config_data.meta
node = meta_config.get([section, rose.META_PROP_NS], no_ignore=True)
if node is not None:
subspace = node.value
else:
match = REC_ELEMENT_SECTION.match(section)
if match:
node = meta_config.get([match.groups()[0], rose.META_PROP_NS])
if node is None or node.is_ignored():
subspace = section.replace('(', '/')
subspace = subspace.replace(')', '')
subspace = subspace.replace(':', '/')
else:
subspace = node.value + '/' + str(match.groups()[1])
elif section.startswith(rose.SUB_CONFIG_FILE_DIR + ":"):
subspace = section.rstrip('/').replace('/', ':')
subspace = subspace.replace(':', '/', 1)
else:
subspace = section.rstrip('/').replace(':', '/')
section_ns = config_name + '/' + subspace
if not subspace:
section_ns = config_name
self.data._config_section_namespace_lookup[config_name].update(
{section: section_ns})
return section_ns
def get_format_sections(self, config_name):
"""Return all format-like sections in the current data."""
format_keys = []
for section in self.data.config[config_name].sections.now:
if (section not in format_keys and
':' in section and not section.startswi | th('file:')):
format_keys.append(section)
format_keys.sort(rose.config.sort_settings)
return format_keys
def get_icon_path_for_config(self, config_name):
"""Return the path to the config identifier icon or Non | e."""
icon_path = None
for filename in self.data.config[config_name].meta_files:
if filename.endswith('/images/icon.png'):
icon_path = filename
break
return icon_path
def get_macro_module_prefix(self, config_name):
# Return a valid module-like name for macros.
return re.sub("[^\w]", "_", config_name.strip("/")) + "/"
def get_ignored_sections(self, namespace, get_enabled=False):
"""Return the user-ignored sections for this namespace.
If namespace is a config_name, return all config ignored
sections.
Return enabled sections instead if get_enabled is True.
"""
config_name = self.util.split_full_ns(self.data, namespace)[0]
config_data = self.data.config[config_name]
if namespace == config_name:
sections = config_data.sections.now.keys()
else:
sections = self.get_sections_from_namespace(namespace)
return_sections = []
for section in sections:
sect_data = config_data.sections.get_sect(section)
if get_enabled:
if not sect_data.ignored_reason:
return_sections.append(section)
elif (rose.variable.IGNORED_BY_USER in
sect_data.ignored_reason):
return_sections.append(section)
return_sections.sort(rose.config.sort_settings)
return return_sections
def get_latent_sections(self, namespace):
"""Return the latent sections for this namespace."""
config_name = self.util.split_full_ns(self.data, namespace)[0]
config_data = self.data.config[config_name]
if namespace == config_name:
sections = config_data.sections.now.keys()
else:
sections = self.get_sections_from_namespace(namespace)
return_sections = []
for section in sections:
if section not in config_data.sections.now:
return_sections.append(section)
return_sections.sort(rose.config.sort_settings)
return return_sections
def get_ns_ignored_status(self, namespace):
"""Return the ignored status for a namespace's data."""
config_name = self.util.split_full_ns(self.data, namespace)[0]
config_data = self.data.config[config_name]
sections = self.get_sections_from_namespace(namespace)
status = rose.config.ConfigNode.STATE_NORMAL
default_section_statuses = {}
variable_statuses = {}
for section in sections:
sect_data = config_data.sections.get_sect(section)
if sect_data is None:
continue
if sect_data.metadata["full_ns"] == namespace:
if not sect_data.ignored_reason:
|
can be made to be very slow or very, very fast unlike `turtle`
- Artist metaphor matches 'canvas' metaphor used in all graphics coding.
- Artist draws lines individually instead of updating a single line with
new coordinates so that the artists drawn `lines` can be checked to
see if the line was drawn forward or backward and give credit for that
specific line segment. This allows set() to isolate the essential lines
when checking solutions without throwing out an otherwise good solution
that was drawn in a different way. This is critical for code.org puzzles
since often there is more than one way to retrace drawn lines to get
to a new position.
"""
import os
import json
import math
import random
from .tkcanvas import Canvas
from .gamegrids import XYGrid,xy,slope,bearing,length
class Artist():
start_direction = 0
startx = 0
starty = 0
color = 'black'
width = 7
speed = 'normal'
resources = os.path.join(os.path.dirname(__file__),'res','artist')
def __init__(self,proto=None):
"""In most cases you want Artist.from_json() instead."""
self.grid = None
self.solution = None
# aggregate
if proto:
self.canvas = proto.canvas
self.puzzle = proto.puzzle
self.log = proto.log
self.uid = proto.uid
self.type = proto.type
self.theme = proto.theme
self.x = proto.x
self.y = proto.y
self.direction = proto.start_direction
self.startx = proto.startx
self.starty = proto.starty
self.lastx = proto.lastx
self.lasty = proto.lasty
self.last_direction = proto.direction
self.sprite = proto.sprite
else:
self.canvas = Canvas()
self.puzzle = []
self.log = []
self.uid = None
self.type = 'artist'
self.theme = 'default'
self.x = self.startx
self.y = self.starty
self.direction = self.start_direction
self.lastx = self.x
self.lasty = self.y
self.last_direction = self.direction
self.sprite = None
self._lines_to_draw = [] # drawing cache
@property
def title(self):
return self.canvas.title
@title.setter
def title(self,new):
self._title = new
if not new:
if self.uid:
self.canvas.title = self.uid
else:
if self.uid:
self.canvas.title = new + ' [' + self.uid + ']'
else:
self.canvas.title = new
@title.deleter
def title(self):
self.canvas.title = self.uid
def config(self,conf):
"""Sets attributes based dictionary (usually after JSON load)."""
for key in conf:
if key in ('startx','starty','start_direction'):
setattr(__class__,key,conf[key])
if key in ('puzzle','uid','title','type','theme'):
setattr(self,key,conf[key])
def pen_color(self,color):
"""Just to be compatible with 'Show Code' JavaScript"""
self.color = color
@classmethod
def from_json(cls,json_):
if type(json_) is str:
json_ = json.loads(json_)
instance = cls()
instance.config(json_)
return instance
def setup(self):
self.title = self._title # for missing uid
self.direction = self.start_direction
self.x = self.startx
self.y = self.starty
self.grid = XYGrid().init(400,400,0)
self.draw_lines(self.puzzle, color='lightgrey', speed='fastest')
self.solution = XYGrid(self.grid)
self.grid = XYGrid().init(400,400,0) # wipe
strip = os.path.join(self.resources,self.theme,
'sprite_strip180_70x50.gif')
self.sprite = self.canvas.create_sprite(strip)
self.sprite.move(self.startx,self.starty,self.start_direction)
def check(self):
if self.grid == self.solution:
return self.good_job()
else:
if self._close_enough():
ret | urn self.good_job()
else:
return self.try_again()
| def _close_enough(self):
for y in range(400):
for x in range(400):
if self.solution[x][y] and not self.grid.ping(x,y):
return False
if self.grid[x][y] and not self.solution.ping(x,y):
return False
return True
def show_check(self):
canvas = Canvas()
for y in range(-200,201):
for x in range(-200,201):
if not self.solution[x][y] and not self.grid[x][y]:
pass
elif self.solution[x][y] == self.grid[x][y]:
canvas.poke(x,-y,'lightgreen')
elif self.solution[x][y]:
canvas.poke(x,-y,'red')
elif self.grid[x][y]:
canvas.poke(x,-y,'orange')
self.wait()
def show_solution(self):
canvas = Canvas()
for y in range(-200,201):
for x in range(-200,201):
if self.grid[x][y]:
canvas.poke(x,-y,'black')
self.wait()
def show_lines(self):
canvas = Canvas()
for y in range(-200,201):
for x in range(-200,201):
if self.grid[x][y]:
canvas.poke(x,-y,'black')
self.wait()
def show_wrong(self):
canvas = Canvas()
for y in range(-200,201):
for x in range(-200,201):
if self.grid[x][y] and self.grid[x][y] != self.solution[x][y]:
canvas.poke(x,-y,'black')
self.wait()
def save(self,name=None,fname=None):
name = name if name else self.uid
if os.path.isdir('puzzles'):
fname = os.path.join('puzzles', name + '.json')
assert not os.path.isfile(fname), '{} exists'.format(name)
else:
fname = name + '.json'
with open(fname,'w') as f:
f.write(json.dumps({
"uid": self.uid,
"type": self.type,
"title": self._title,
"startx": self.startx,
"starty": self.starty,
"start_direction": self.start_direction,
"puzzle": self.log
}))
def try_again(self,message='Nope. Try again.'):
# TODO replace with a canvas splash window graphic
print(message)
self.canvas.exit_on_click()
def good_job(self,message='Perfect! Congrats!'):
# TODO replace with a canvas splash window graphic
print(message)
self.canvas.exit_on_click()
def wait_for_click(self):
return self.good_job('Beautiful!')
wait = wait_for_click
def clear(self):
self._lines_to_draw = []
self.log = []
def draw_lines(self,lines,color=None,speed=None):
self.grid.draw_lines(lines,1)
if speed:
self.canvas.speed = speed
else:
self.canvas.speed = self.speed
for line in lines:
if self.sprite:
self.sprite.move(line[0],line[1],bearing(line))
if color:
self.canvas.draw_line(line,color=color)
else:
self.canvas.draw_line(line)
if self.sprite:
self.sprite.move(line[2],line[3],bearing(line))
self.canvas.speed = self.speed
def _draw(self):
self.draw_lines(self._lines_to_draw)
self._lines_to_draw = []
def _move(self,amount):
(self.x,self.y) = xy(self.x,self.y,self.direction,amount)
def move(self,amount=100):
self.lastx = self.x
self.lasty = self.y
self._move(amount)
if self.color == 'random':
color = self.random_color()
else:
color = self.color
line = (self.lastx,self.lasty,self.x,self.y,color,self.width)
self._lines_to_dr |
ort Errors
from email import Charset
SEMISPACE = '; '
# Regular expression used to split header parameters. BAW: this may be too
# simple. It isn't strictly RFC 2045 (section 5.1) compliant, but it catches
# most headers found in the wild. We may eventually need a full fledged
# parser eventually.
paramre = re.compile(r'\s*;\s*')
# Regular expression that matches `special' characters in parameters, the
# existance of which force quoting of the parameter value.
tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
# Helper functions
def _formatparam(param, value=None, quote=True):
"""Convenience function to format and return a key=value pair.
This will quote the value if needed or if quote is true.
"""
if value is not None and len(value) > 0:
# A tuple is used for RFC 2231 encoded parameter values where items
# are (charset, language, value). charset is a string, not a Charset
# instance.
if isinstance(value, tuple):
# Encode as per RFC 2231
param += '*'
value = Utils.encode_rfc2231(value[2], value[0], value[1])
# BAW: Please check this. I think that if quote is set it should
# force quoting even if not necessar | y.
if quote or tspecials.search(value):
return '%s="%s"' % (param, Utils.quote(value))
else:
return '%s=%s' % (param, value)
else:
return param
def _parseparam(s):
plist = []
while s[:1] == ';':
s = s[1:]
end = s.find(';')
while end > 0 and s.count('"', 0, e | nd) % 2:
end = s.find(';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
if '=' in f:
i = f.index('=')
f = f[:i].strip().lower() + '=' + f[i+1:].strip()
plist.append(f.strip())
s = s[end:]
return plist
def _unquotevalue(value):
# This is different than Utils.collapse_rfc2231_value() because it doesn't
# try to convert the value to a unicode. Message.get_param() and
# Message.get_params() are both currently defined to return the tuple in
# the face of RFC 2231 parameters.
if isinstance(value, tuple):
return value[0], value[1], Utils.unquote(value[2])
else:
return Utils.unquote(value)
class Message:
"""Basic message object.
A message object is defined as something that has a bunch of RFC 2822
headers and a payload. It may optionally have an envelope header
(a.k.a. Unix-From or From_ header). If the message is a container (i.e. a
multipart or a message/rfc822), then the payload is a list of Message
objects, otherwise it is a string.
Message objects implement part of the `mapping' interface, which assumes
there is exactly one occurrance of the header per message. Some headers
do in fact appear multiple times (e.g. Received) and for those headers,
you must use the explicit API to set or get all the headers. Not all of
the mapping methods are implemented.
"""
def __init__(self):
self._headers = []
self._unixfrom = None
self._payload = None
self._charset = None
# Defaults for multipart messages
self.preamble = self.epilogue = None
self.defects = []
# Default content type
self._default_type = 'text/plain'
def __str__(self):
"""Return the entire formatted message as a string.
This includes the headers, body, and envelope header.
"""
return self.as_string(unixfrom=True)
def as_string(self, unixfrom=False):
"""Return the entire formatted message as a string.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This is a convenience method and may not generate the message exactly
as you intend because by default it mangles lines that begin with
"From ". For more flexibility, use the flatten() method of a
Generator instance.
"""
from email.Generator import Generator
fp = StringIO()
g = Generator(fp)
g.flatten(self, unixfrom=unixfrom)
return fp.getvalue()
def is_multipart(self):
"""Return True if the message consists of multiple parts."""
return isinstance(self._payload, list)
#
# Unix From_ line
#
def set_unixfrom(self, unixfrom):
self._unixfrom = unixfrom
def get_unixfrom(self):
return self._unixfrom
#
# Payload manipulation.
#
def attach(self, payload):
"""Add the given payload to the current payload.
The current payload will always be a list of objects after this method
is called. If you want to set the payload to a scalar object, use
set_payload() instead.
"""
if self._payload is None:
self._payload = [payload]
else:
self._payload.append(payload)
def get_payload(self, i=None, decode=False):
"""Return a reference to the payload.
The payload will either be a list object or a string. If you mutate
the list object, you modify the message's payload in place. Optional
i returns that index into the payload.
Optional decode is a flag indicating whether the payload should be
decoded or not, according to the Content-Transfer-Encoding header
(default is False).
When True and the message is not a multipart, the payload will be
decoded if this header's value is `quoted-printable' or `base64'. If
some other encoding is used, or the header is missing, or if the
payload has bogus data (i.e. bogus base64 or uuencoded data), the
payload is returned as-is.
If the message is a multipart and the decode flag is True, then None
is returned.
"""
if i is None:
payload = self._payload
elif not isinstance(self._payload, list):
raise TypeError('Expected list, got %s' % type(self._payload))
else:
payload = self._payload[i]
if decode:
if self.is_multipart():
return None
cte = self.get('content-transfer-encoding', '').lower()
if cte == 'quoted-printable':
return Utils._qdecode(payload)
elif cte == 'base64':
try:
return Utils._bdecode(payload)
except binascii.Error:
# Incorrect padding
return payload
elif cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'):
sfp = StringIO()
try:
uu.decode(StringIO(payload+'\n'), sfp, quiet=True)
payload = sfp.getvalue()
except uu.Error:
# Some decoding problem
return payload
# Everything else, including encodings with 8bit or 7bit are returned
# unchanged.
return payload
def set_payload(self, payload, charset=None):
"""Set the payload to the given value.
Optional charset sets the message's default character set. See
set_charset() for details.
"""
self._payload = payload
if charset is not None:
self.set_charset(charset)
def set_charset(self, charset):
"""Set the charset of the payload to a given character set.
charset can be a Charset instance, a string naming a character set, or
None. If it is a string it will be converted to a Charset instance.
If charset is None, the charset parameter will be removed from the
Content-Type field. Anything else will generate a TypeError.
The message will be assumed to be of type text/* encoded with
charset.input_charset. It will be converted to charset.output_charset
and encoded properly, if needed, when generating the plain text
representation of the message. MIME headers (MIME-Version,
Content-Type, Content-Transfer-Encoding) will be added as needed.
"""
if charse |
ample:
```python
x = tf.placeholder(tf.float32, shape=(1024, 1024))
y = tf.matmul(x, x)
with tf.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
rand_array = np.random.rand(1024, 1024)
print(sess.run(y, feed_dict={x: rand_array})) # Will succeed.
```
Args:
dtype: The type of elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a tensor of any shape.
name: A name for the operation (optional).
Returns:
A `Tensor` that may be used as a handle for feeding a value, but not
evaluated directly.
"""
shape = tensor_shape.as_shape(shape)
if shape.is_fully_defined():
dim_list = shape.as_list()
else:
dim_list = []
ret = gen_array_ops._placeholder(
dtype=dtype,
shape=dim_list,
name=name)
ret.set_shape(shape)
return ret
# pylint: disable=redefined-outer-name
def _normalize_sparse_shape(shape, name):
"""Takes numpy array or Tensor or None and returns either None or Tensor."""
if shape is None: return None
if not isinstance(shape, ops.Tensor):
for el in shape:
if el is None:
return None
return ops | .convert_to_tensor(shape, name=n | ame)
def sparse_placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a sparse tensor that will be always fed.
**Important**: This sparse tensor will produce an error if evaluated.
Its value must be fed using the `feed_dict` optional argument to
`Session.run()`, `Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.sparse_placeholder(tf.float32)
y = tf.sparse_reduce_sum(x)
with tf.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64)
values = np.array([1.0, 2.0], dtype=np.float32)
shape = np.array([7, 9, 2], dtype=np.int64)
print(sess.run(y, feed_dict={
x: tf.SparseTensorValue(indices, values, shape)})) # Will succeed.
print(sess.run(y, feed_dict={
x: (indices, values, shape)})) # Will succeed.
sp = tf.SparseTensor(indices=indices, values=values, shape=shape)
sp_value = sp.eval(session)
print(sess.run(y, feed_dict={x: sp_value})) # Will succeed.
```
Args:
dtype: The type of `values` elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a sparse tensor of any shape.
name: A name for prefixing the operations (optional).
Returns:
A `SparseTensor` that may be used as a handle for feeding a value, but not
evaluated directly.
"""
shape_name = (name + "/shape") if name is not None else None
shape = _normalize_sparse_shape(shape, shape_name)
if shape is None:
shape = placeholder(dtypes.int64, shape=[None], name=shape_name)
return sparse_tensor.SparseTensor(
values=placeholder(
dtype, shape=[None],
name=(name + "/values") if name is not None else None),
indices=placeholder(
dtypes.int64, shape=[None, None],
name=(name + "/indices") if name is not None else None),
shape=shape
)
# pylint: enable=redefined-outer-name
def pad(tensor, paddings, mode="CONSTANT", name=None): # pylint: disable=invalid-name
"""Pads a tensor.
This operation pads a `tensor` according to the `paddings` you specify.
`paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of
`tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how
many values to add before the contents of `tensor` in that dimension, and
`paddings[D, 1]` indicates how many values to add after the contents of
`tensor` in that dimension. If `mode` is "REFLECT" then both `paddings[D, 0]`
and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If
`mode` is "SYMMETRIC" then both `paddings[D, 0]` and `paddings[D, 1]` must be
no greater than `tensor.dim_size(D)`.
The padded size of each dimension D of the output is:
`paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`
For example:
```python
# 't' is [[1, 2, 3], [4, 5, 6]].
# 'paddings' is [[1, 1,], [2, 2]].
# rank of 't' is 2.
pad(t, paddings, "CONSTANT") ==> [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 2, 3, 0, 0],
[0, 0, 4, 5, 6, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
pad(t, paddings, "REFLECT") ==> [[6, 5, 4, 5, 6, 5, 4],
[3, 2, 1, 2, 3, 2, 1],
[6, 5, 4, 5, 6, 5, 4],
[3, 2, 1, 2, 3, 2, 1]]
pad(t, paddings, "SYMMETRIC") ==> [[2, 1, 1, 2, 3, 3, 2],
[2, 1, 1, 2, 3, 3, 2],
[5, 4, 4, 5, 6, 6, 5],
[5, 4, 4, 5, 6, 6, 5]]
```
Args:
tensor: A `Tensor`.
paddings: A `Tensor` of type `int32`.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive)
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `tensor`.
Raises:
ValueError: When mode is not one of "CONSTANT", "REFLECT", or "SYMMETRIC".
"""
# Convert lower/mixed case to upper for NumPy compatibility
# NumPy uses all lower-case modes.
mode = mode.upper()
if mode == "CONSTANT":
return gen_array_ops._pad(tensor, paddings, name=name)
if mode == "REFLECT":
return gen_array_ops._mirror_pad(tensor,
paddings,
mode="REFLECT",
name=name)
if mode == "SYMMETRIC":
return gen_array_ops._mirror_pad(tensor,
paddings,
mode="SYMMETRIC",
name=name)
raise ValueError("Unknown padding mode: %s" % mode)
def meshgrid(*args, **kwargs):
"""Broadcasts parameters for evaluation on an N-D grid.
Given N one-dimensional coordinate arrays `*args`, returns a list `outputs`
of N-D coordinate arrays for evaluating expressions on an N-D grid.
Notes:
`meshgrid` supports cartesian ('xy') and matrix ('ij') indexing conventions.
When the `indexing` argument is set to 'xy' (the default), the broadcasting
instructions for the first two dimensions are swapped.
Examples:
Calling `X, Y = meshgrid(x, y)` with the tensors
```prettyprint
x = [1, 2, 3]
y = [4, 5, 6]
```
results in
```prettyprint
X = [[1, 1, 1],
[2, 2, 2],
[3, 3, 3]]
Y = [[4, 5, 6],
[4, 5, 6],
[4, 5, 6]]
```
Args:
*args: `Tensor`s with rank 1
indexing: Either 'xy' or 'ij' (optional, default: 'xy')
name: A name for the operation (optional).
Returns:
outputs: A list of N `Tensor`s with rank N
"""
indexing = kwargs.pop("indexing", "xy")
name = kwargs.pop("name", "meshgrid")
if kwargs:
key = list(kwargs.keys())[0]
raise TypeError("'{}' is an invalid keyword argument "
"for this function".format(key))
if indexing not in ("xy", "ij"):
raise ValueError("indexing parameter must be either 'xy' or 'ij'")
with ops.name_scope(name, "meshgrid", args) as name:
ndim = len(args)
s0 = (1,) * ndim
# Prepare reshape by inserting dimensions with size 1 where needed
output = []
for i, x in enumerate(args):
output.append(reshape(stack(x), (s0[:i] + (-1,) + s0[i + 1::])) )
# Create parameters for broadcasting each tensor to the full size
shapes = [size(x) for x in args]
output_dtype = ops.convert_to_tensor(args[0]).dtype.base_dtype
if indexing == "xy" and ndim > 1:
output[0] = reshape(output[0], (1, -1) + (1,)*(ndim - 2))
output[1] = reshape(output[1], (-1, 1) + (1,)*(ndim - 2))
shapes[0], shapes[1] = shapes[1], shapes[0]
# TODO: improve performance with a broadcast
mult_fact = ones(shapes, output_dtype)
return [x * mult_fact for x in output]
ops. |
# -*- coding: utf-8 -*-
import csv
import os
import gzip
class File:
def read(self, path, **kwargs):
path = os.path.join(kwargs.get('root_path', ''), path)
content_type = kwargs.get('content_type', 'txt')
if content_type == 'txt':
with file(path, 'r') as f:
content = f.read()
yield content
elif content_type == 'gz':
with gzip.open(path, 'r') as f:
content = f.read()
yield content
elif content_type == 'csv':
with open(path, 'rU') as f:
reader = csv.reader(f)
for line in reader:
yield line
else:
raise Exception('Bad file type')
def write(self, path, content, **kwargs):
path = os.path.join(kwargs.get('root_path', ''), path)
content_type = kwargs.get('content_type', 'txt')
if content_type == 'txt':
with file(path, 'wb') as f:
f.write(content)
elif content_type == 'gz':
with gzip.open(path, 'w') as f:
f.write(content)
elif content_type == 'csv':
with open(path, 'wb') as f:
writer = csv.writer(f)
for c in content:
if c['type'] == ' | single':
writer.writerow(c['data'])
elif c['type'] == 'multi':
writer.writerows(c['data'])
else:
raise Except | ion('Row type must be specified')
else:
raise Exception('Bad file type')
def exists(self, path):
return os.path.exists(path)
|
until the confirmation key expires.
@rtype: dict
@return: A dictionary with the following keys:
'confirmation_key': <str> The code that is used to confirm the invitation
'match_user': <str> A username, if a user exists with that email-address
'match_user_email': An e-mailaddress. Not sure why?
"""
ac = self.account_class(self.db)
assert hasattr(inviter, 'entity_id')
assert hasattr(group, 'entity_id')
timeout = DateTimeDelta(int(timeout))
if timeout.day < 1:
raise CerebrumError('Timeout too short (%d)' % timeout.day)
if (timeout > cereconf.MAX_INVITE_PERIOD):
raise CerebrumError("Timeout too long (%d)" % timeout.day)
ret = {'confirmation_key': self.vhutils.setup_event_request(
group.entity_id,
self.clconst.va_group_invitation,
params={'inviter_id': inviter.entity_id,
'group_id': group.entity_id,
'invitee_mail': email,
'timeout': timeout.day,},
change_by=inviter.entity_id)}
# check if e-mail matches a valid username
try:
ac.find_by_name(email)
ret['match_user'] = ac.account_name
if ac.np_type in (self.co.fedaccount_type, self.co.virtaccount_type):
ret['match_user_email'] = ac.get_email_address()
except NotFoundError:
pass
return ret
def group_disable(self, group):
"""This method removes all members and auth data related to a group,
effectively disabling it without actually 'nuking' it.
@type group_name: str
@param group_name: The name of the group that should be joined
@rtype: str
| @return: The name of the group that was disabled, nice for feedback.
"""
assert hasattr(group, 'entity_id')
# Yan | k all the spreads
for row in group.get_spread():
group.delete_spread(row["spread"])
# Remove all members
for membership in group.search_members(group_id=group.entity_id,
member_filter_expired=False):
group.remove_member(membership["member_id"])
group.write_db()
# Clean up the permissions (granted ON the group and TO the group)
self.vhutils.remove_auth_targets(group.entity_id)
self.vhutils.remove_auth_roles(group.entity_id)
return group.group_name
class VirthomeUtils:
""" Helper methods related to virthome """
def __init__(self, db):
self.db = db
self.co = Factory.get('Constants')(db)
self.clconst = Factory.get('CLConstants')(db)
self.group_class = Factory.get('Group')
self.account_class = Factory.get('Account')
# Or compile on each call to
self.url_whitelist = [re.compile(r) for r in cereconf.FORWARD_URL_WHITELIST]
def group_exists(self, name):
""" This method simply tests if a group name exists in the database
@type name: str
@param name: Name of the group to look for
@rtype: bool
@return: True if the group exists, otherwise False
"""
group = self.group_class(self.db)
try:
group.find_by_name(name)
return True
except NotFoundError:
pass
return False
def list_group_members(self, group, indirect_members=False):
""" This methid lists members of a group. It does NOT include operators
or moderators, unless they are also members.
@type group: Cerebrum.Group
@param group: The group to list members of
@type indirect: bool
@param indirect: If we should include indirect members
"""
ac = self.account_class(self.db)
gr = self.group_class(self.db)
assert hasattr(group, 'entity_id')
result = list()
for x in group.search_members(group_id=group.entity_id,
indirect_members=indirect_members):
owner_name = None
member_name = None
email_address = None
member_type = self.co.EntityType(x['member_type'])
if member_type == self.co.entity_account:
ac.clear()
ac.find(x['member_id'])
if ac.np_type in (self.co.fedaccount_type,
self.co.virtaccount_type):
member_name = ac.account_name
owner_name = ac.get_owner_name(self.co.human_full_name)
email_address = ac.get_email_address()
elif member_type == self.co.entity_group:
gr.clear()
gr.find(x['member_id'])
member_name = gr.group_name
result.append({'member_id': x['member_id'],
'member_type': str(member_type),
'member_name': member_name,
'owner_name': owner_name,
'email_address': email_address,})
result.sort(lambda x, y: cmp(x['member_name'], y['member_name']))
return result
def list_group_memberships(self, account, indirect_members=False, realm=None):
""" This method lists groups that an account is member of.
@type account: Cerebrum.Account
@param account: The account we're looking up memberships for
@type indirect_members: bool
@param indirect_members: Whether indirect members
@type realm: str or NoneType
@param realm: Filter groups by realm. A realm of 'webid.uio.no' will
only return groups on the format '*@webid.uio.no'. No
filtering for empty string or None.
@rtype: list
@return: A list with dictionaries, one dict per group membership.
Contain keys 'group_id', 'name', 'description', 'visibility',
'creator_id', 'created_at', 'expire_date'
"""
gr = self.group_class(self.db)
assert hasattr(account, 'entity_id')
result = list()
for group in gr.search(member_id=account.entity_id,
indirect_members=indirect_members):
if realm and not self.in_realm(group['name'], realm):
continue
gr.clear()
gr.find(group['group_id'])
tmp = dict(group)
# Fetch url
resource = gr.get_contact_info(self.co.system_virthome,
self.co.virthome_group_url)
if resource:
tmp['url'] = resource[0]['contact_value']
result.append(tmp)
return result
def get_trait_val(self, entity, trait_const, val='strval'):
"""Get the trait value of type L{val} of L{entity} that is of type
L{trait_const}.
@type entity: Cerebrum.Entity
@param entity: The entity which trait is being looked up
@type trait_const: _EntityTraitCode
@param trait_const: The type of trait to load
@rtype: str
@return: The L{val} of the trait, if it exists. None if the L{entity}
doesn't have a trait of type L{trait_const}, or the trait
doesn't have a value L{val}
"""
assert hasattr(entity, 'entity_id') and hasattr(entity, 'get_trait')
try:
trait = entity.get_trait(trait_const)
return trait.get(val, None)
except AttributeError:
pass
return None
def whitelist_url(self, url):
""" This is a 'last stand' for forward urls. The URL must match at
least one of the whitelist regular expressions if we're to store it as
a forward url.
@type url: str
@param url: The URL to whitelist
@rtype: str
|
# -*- coding: utf-8 -*-
#above helps to declare what encoding we want to use in the module
#note this is copied from the first json lab
#above is used to set the encoding for this module, (unfortunately didn't help that much)
#used for seeing our data in nicest string format possible
import pprint
#again idiom for reading in a file, relative path given
with open('../pres_on_trade.txt', 'r') as fp:
all_text = fp.read()
#str.split() will split groups of characters on any white space, easy... nice
#sorted built-in function will only sort alphbetically here
all_words = sorted(all_text.split())
#begin preparation of words for a reasonable word frequency count
#we need to change our words from str to unicode
#unicode_words = [unicode(word) for word in all_words if unicode(word)]
#list comprehensions won't work because we get errors,
#let's do a try: except: block
unicode_words = []
for word in all_words:
try:
unicode_words.append(unicode(word))
except UnicodeDecodeError:
pass
#awesome list comprehension, they take iterables and return lists
#this will clean our words of unwanted punctuation and change to all lowercase
all_words = [word.strip("?.\'-,().").lower() for word in unicode_words]
#print all_words
#help(''.strip)
#reminder on dictionary syntax - setting the key and value
#dict_name[key] = value
#word_freq_dc['word'] = 18
#using dict.get method to check for existence and build word_freq dictionary
word_freq_dc = {}
for word in all_words:
times = word_freq_dc.get(word, 0)
times += 1
word_freq_dc[word] = times
#the easy way :) if you knew about it or where to look
from collections import Counter
#help(Counter)
counter = Counter(all_words)
#can use slice method on a sequence, this gets first 40 of type list
#that is: Counter.most_common() returns a list, a list is considerd one kind of sequence
print(counter.most_common()[:40])
#end line character for clarity when printing
print '\n'
#to be sure
counter_for_dc = Counter(word_freq_dc)
counter_from_before = Counter(all_words)
print counter_for_dc == counter_from_before
#going further with a generator expression
non_small_words = (word for word in all_words
if len(word) > 4 and
word is not 'usa' and
word not in
['applause', 'laughter', 'there', 'these', 'those'])
recounter = Counter(non_small_words)
print(rec | ounter.most_common()[:40])
#below is work we did to figure out the proper procedure to
#count words using a dictionary
#pprint.pprint(word_freq_dc)
#for k, v in word_freq_dc.iteritems():
# tupled_word_freq.ap | pend((k, v))
#tupled_word_freq = zip(word_freq_dc.itervalues(), word_freq_dc.iterkeys())
#print(tupled_word_freq)
#print sorted(tupled_word_freq)
#help(word_freq_dc.get)
|
# shipHeavyMissileExpDmgPirateCruiser
#
# Used by:
# Ship: Gnosis
t | ype = "passive"
def handler(fit, ship, context):
fit.modules.filteredChargeBoost(lambda mod: mod.charge.requiresSkill("Heavy Missiles"),
"explosiveDamage", sh | ip.getModifiedItemAttr("shipBonusRole7"))
|
import urllib.request
import json
from modules import userDatabase
def get_what_pulse_url(param: str):
return "http://api.whatpulse.org/user.php?user=" + str(param) + "&formatted=yes&format=json"
# noinspection PyUnusedLocal
def on_channel_pm(irc, user_mask, user, channel, message):
command = message.split()
if command[0].lower() == '!setwhatpulse' or command[0].lower() == '!setwp':
if len(command) != 2:
irc.send_private_message(channel, "USAGE: !setw[hat]p[ulse] (WhatPulse ID/WhatPulse Username)")
return
if command[1].isdigit():
irc.send_private_message(channel, "USAGE: !setw[hat]p[ulse] (WhatPulse ID/WhatPulse Username)")
return
irc.send_private_message(channel, "3SUCCESS: Your WhatPulse ID has been changed.")
irc.user_info[user.lower()]['whatpulse'] = str(command[1])
userDatabase.save_user_database(irc)
elif command[0].lower() == '!whatpulse' or command[0].lower() == '!wp':
param = str()
if len(command) == 1:
param = str(irc.userData[user.lower()]['whatpulse'])
if irc.userData[user.lower()]['whatpulse'] == "":
irc.send_private_message(channel, "5ERROR: You have not set your WhatPulse ID yet.")
irc.send_private_message(channel,
"USAGE: !w[hat]p[ulse] (WhatPulse ID/WhatPulse Username/IRC Nickname)")
irc.send_private_message(channel, "USAGE: !setw[hat]p[ulse] (WhatPulse ID)")
return
elif len(command) == 2:
command = message.split(' ', 1)
param = str(command[1])
if command[1].lower() in irc.user_info:
if irc.user_infoData[command[1].lower()]['whatpulse'] != "":
param = str(irc.userData[command[1].lower()]['whatpulse'])
try:
response = urllib.request.urlopen(get_what_pulse_url(param))
html_source = response.read().decode('utf-8')
response.close()
except IOError:
irc.send_private_message(channel, "5ERROR: The WhatPulse service is currently unavailable.")
return
try:
whatpulse_info = json.loads(html_source)
except ValueError:
irc.send_private_message(channel, '5ERROR: An unknown WhatPulse Username/ID was given.')
return
if 'error' in whatpulse_info:
irc.send_private_message(channel, '5ERROR: An unknown WhatPulse Username/ID was given.')
return
account_name = whatpulse_info['AccountName'] # Username
user_id = whatpulse_info['UserID'] # ID
country = whatpulse_info['Country'] # User's Country
joined_date = whatpulse_info['DateJoined'] # Date Joined
last_pulse_date = whatpulse_info['LastPulse'] # Last Pulsed
pulses = whatpulse_info['Pulses'] # Pulses
total_key_count = whatpulse_info['Keys'] # Total Key Count
total_mouse_clicks = whatpulse_info['Clicks'] # Total Mouse Clicks
avg_kpp = whatpulse_info['AvKeysPerPulse'] # Average Keys Per Pulse
avg_cpp = whatpulse_info['AvClicksPerPulse'] # Average Clicks Per Pulse
avg_kps = whatpulse_info['AvKPS'] # Average Keys Per Second
avg_cps = whatpulse_info['AvCPS'] # Average Clicks Per Second
# Ranks
clicks_rank = whatpulse_info['Ranks']['Clicks']
keys_rank = whatpulse_info['Ranks']['Keys']
uptime_rank = whatpulse_info['Ranks']['Uptime']
irc.send_ | private_message(channel,
"\u000310WhatPulse:\u0003 {0}(ID:{1}) \u000310Country:\u0003 {2} "
"\u000310Date Joined:\u0003 {3} \u000310LastPulse | d:\u0003 {4} "
"\u000310Pulses:\u0003 {5} \u000310Keys:\u0003 {6} \u000310Clicks:\u0003 {7} "
"\u000310AvKeysPerPulse:\u0003 {8} \u000310AvClicksPerPulse:\u0003 {9} "
"\u000310AvKeyPerSecond:\u0003 {10} \u000310AvClicksPerSecond:\u0003 {11} "
"\u000310Rank: Clicks:\u0003 {12} \u000310Keys:\u0003 {13} "
"\u000310Uptime:\u0003 {14}".format(
str(account_name), str(user_id), str(country), str(joined_date),
str(last_pulse_date),
str(pulses), str(total_key_count), str(total_mouse_clicks), str(avg_kpp),
str(avg_cpp),
str(avg_kps), str(avg_cps), str(clicks_rank), str(keys_rank), str(uptime_rank)))
|
from .dispatch import dispatch
from .compatibility import basestring
from blaze.expr.literal import BoundSymbol, data as bz_data
@dispatch(object, (basestr | ing, list, tuple))
def create_index(t, column_name_or_names, name=None):
"""Create an index on a column.
Parameters
----------
o : table-like
index_name : str
Th | e name of the index to create
column_name_or_names : string, list, tuple
A column name to index on, or a list or tuple for a composite index
Examples
--------
>>> # Using SQLite
>>> from blaze import SQL
>>> # create a table called 'tb', in memory
>>> sql = SQL('sqlite:///:memory:', 'tb',
... schema='{id: int64, value: float64, categ: string}')
>>> dta = [(1, 2.0, 'a'), (2, 3.0, 'b'), (3, 4.0, 'c')]
>>> sql.extend(dta)
>>> # create an index on the 'id' column (for SQL we must provide a name)
>>> sql.table.indexes
set()
>>> create_index(sql, 'id', name='id_index')
>>> sql.table.indexes
{Index('id_index', Column('id', BigInteger(), table=<tb>, nullable=False))}
"""
raise NotImplementedError("create_index not implemented for type %r" %
type(t).__name__)
@dispatch(BoundSymbol, (basestring, list, tuple))
def create_index(dta, column_name_or_names, name=None, **kwargs):
return create_index(dta.data, column_name_or_names, name=name, **kwargs)
@dispatch(basestring, (basestring, list, tuple))
def create_index(uri, column_name_or_names, name=None, **kwargs):
dta = bz_data(uri, **kwargs)
create_index(dta, column_name_or_names, name=name)
return dta
|
normal argument may be a quoted variable
# e.g. strip now, not later
if not value:
return None
else:
value = value.strip("'\"")
variables = ApacheParser.arg_var_interpreter.findall(value)
for var in variables:
# Strip off ${ and }
try:
value = value.replace(var, self.variables[var[2:-1]])
except KeyError:
raise errors.PluginError("Error Parsing variable: %s" % var)
return value
def _exclude_dirs(self, matches):
"""Exclude directives | that are not loaded into the configuration."""
filters = [("ifmodule", self.modules), ("ifdefine", self.variables)]
valid_matches = []
for match in matches:
for filter_ in filters:
if not self._pass_filter(match, filter_):
break
| else:
valid_matches.append(match)
return valid_matches
def _pass_filter(self, match, filter_):
"""Determine if directive passes a filter.
:param str match: Augeas path
:param list filter: list of tuples of form
[("lowercase if directive", set of relevant parameters)]
"""
match_l = match.lower()
last_match_idx = match_l.find(filter_[0])
while last_match_idx != -1:
# Check args
end_of_if = match_l.find("/", last_match_idx)
# This should be aug.get (vars are not used e.g. parser.aug_get)
expression = self.aug.get(match[:end_of_if] + "/arg")
if expression.startswith("!"):
# Strip off "!"
if expression[1:] in filter_[1]:
return False
else:
if expression not in filter_[1]:
return False
last_match_idx = match_l.find(filter_[0], end_of_if)
return True
def _get_include_path(self, arg):
"""Converts an Apache Include directive into Augeas path.
Converts an Apache Include directive argument into an Augeas
searchable path
.. todo:: convert to use os.path.join()
:param str arg: Argument of Include directive
:returns: Augeas path string
:rtype: str
"""
# Check to make sure only expected characters are used <- maybe remove
# validChars = re.compile("[a-zA-Z0-9.*?_-/]*")
# matchObj = validChars.match(arg)
# if matchObj.group() != arg:
# logger.error("Error: Invalid regexp characters in %s", arg)
# return []
# Remove beginning and ending quotes
arg = arg.strip("'\"")
# Standardize the include argument based on server root
if not arg.startswith("/"):
# Normpath will condense ../
arg = os.path.normpath(os.path.join(self.root, arg))
else:
arg = os.path.normpath(arg)
# Attempts to add a transform to the file if one does not already exist
if os.path.isdir(arg):
self.parse_file(os.path.join(arg, "*"))
else:
self.parse_file(arg)
# Argument represents an fnmatch regular expression, convert it
# Split up the path and convert each into an Augeas accepted regex
# then reassemble
split_arg = arg.split("/")
for idx, split in enumerate(split_arg):
if any(char in ApacheParser.fnmatch_chars for char in split):
# Turn it into a augeas regex
# TODO: Can this instead be an augeas glob instead of regex
split_arg[idx] = ("* [label()=~regexp('%s')]" %
self.fnmatch_to_re(split))
# Reassemble the argument
# Note: This also normalizes the argument /serverroot/ -> /serverroot
arg = "/".join(split_arg)
return get_aug_path(arg)
def fnmatch_to_re(self, clean_fn_match): # pylint: disable=no-self-use
"""Method converts Apache's basic fnmatch to regular expression.
Assumption - Configs are assumed to be well-formed and only writable by
privileged users.
https://apr.apache.org/docs/apr/2.0/apr__fnmatch_8h_source.html
http://apache2.sourcearchive.com/documentation/2.2.16-6/apr__fnmatch_8h_source.html
:param str clean_fn_match: Apache style filename match, like globs
:returns: regex suitable for augeas
:rtype: str
"""
if sys.version_info < (3, 6):
# This strips off final /Z(?ms)
return fnmatch.translate(clean_fn_match)[:-7]
else: # pragma: no cover
# Since Python 3.6, it returns a different pattern like (?s:.*\.load)\Z
return fnmatch.translate(clean_fn_match)[4:-3]
def parse_file(self, filepath):
"""Parse file with Augeas
Checks to see if file_path is parsed by Augeas
If filepath isn't parsed, the file is added and Augeas is reloaded
:param str filepath: Apache config file path
"""
use_new, remove_old = self._check_path_actions(filepath)
# Ensure that we have the latest Augeas DOM state on disk before
# calling aug.load() which reloads the state from disk
if self.configurator:
self.configurator.ensure_augeas_state()
# Test if augeas included file for Httpd.lens
# Note: This works for augeas globs, ie. *.conf
if use_new:
inc_test = self.aug.match(
"/augeas/load/Httpd['%s' =~ glob(incl)]" % filepath)
if not inc_test:
# Load up files
# This doesn't seem to work on TravisCI
# self.aug.add_transform("Httpd.lns", [filepath])
if remove_old:
self._remove_httpd_transform(filepath)
self._add_httpd_transform(filepath)
self.aug.load()
def parsed_in_current(self, filep):
"""Checks if the file path is parsed by current Augeas parser config
ie. returns True if the file is found on a path that's found in live
Augeas configuration.
:param str filep: Path to match
:returns: True if file is parsed in existing configuration tree
:rtype: bool
"""
return self._parsed_by_parser_paths(filep, self.parser_paths)
def parsed_in_original(self, filep):
"""Checks if the file path is parsed by existing Apache config.
ie. returns True if the file is found on a path that matches Include or
IncludeOptional statement in the Apache configuration.
:param str filep: Path to match
:returns: True if file is parsed in existing configuration tree
:rtype: bool
"""
return self._parsed_by_parser_paths(filep, self.existing_paths)
def _parsed_by_parser_paths(self, filep, paths):
"""Helper function that searches through provided paths and returns
True if file path is found in the set"""
for directory in paths.keys():
for filename in paths[directory]:
if fnmatch.fnmatch(filep, os.path.join(directory, filename)):
return True
return False
def _check_path_actions(self, filepath):
"""Determine actions to take with a new augeas path
This helper function will return a tuple that defines
if we should try to append the new filepath to augeas
parser paths, and / or remove the old one with more
narrow matching.
:param str filepath: filepath to check the actions for
"""
try:
new_file_match = os.path.basename(filepath)
existing_matches = self.parser_paths[os.path.dirname(filepath)]
if "*" in existing_matches:
use_new = False
else:
use_new = True
if new_file_match == "*":
remove_old = True
else:
remove_old = False
except KeyError:
use_new = True
remove_old = False
return use_ |
"""docker"""
import http.client
import json
import socket
__all__ = ['HTTPConnection', 'HTTPError', 'get']
class HTTPConnection(http.client.HTTPConnection):
def __init__(self):
http.client.HTTPConnection.__init__(self, 'localhost')
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect('/var/run/docker.sock')
self.sock = sock
class HTTPError(Exception):
def __init__(self, status, reason):
self.status = status
self.reason = reason
def get(path, async=False):
conn = HTTPConnection()
try:
conn.request('GET', path)
resp = conn.getresponse()
| if resp.status != 200:
raise HTTPError(resp.status, resp.reason)
except Exception:
conn.close()
raise
try:
if async:
return resp
elif resp.headers.get('Content-Type') == 'application/json':
return json.loa | ds(resp.read().decode('utf-8'))
else:
return resp.read()
finally:
if not async:
conn.close()
|
import csv
import os
import sys
import traceback
import sqlite3
import fnmatch
import decimal
import datetime
def valid_dt(dt):
try:
datetime.datetime.strptime(dt, "%m/%d/%Y")
return True
except:
return False
def adapt_decimal(d):
return str(d)
def convert_decimal(s):
return decimal.Decimal(s)
def db_cur(source = ":memory:"):
# Register the adapter
sqlite3.register_adapter(decimal.Decimal, adapt_decimal)
# Register the converter
sqlite3.register_converter("DECTEXT", convert_decimal)
conn = sqlite3.connect(source, detect_types=sqlite3.PARSE_DECLTYPES)
#conn.row_factory = sqlite3.Row
cur = conn.cursor()
return conn, cur
def question_marks(st):
question_marks = '?'
for i in range(0, len(st.split(','))-1):
question_marks = question_marks + ",?"
return question | _marks
def create_tbl(cur, tbl_name, header, arr = [], index_arr = []):
cur.execute("""select count(*) FROM sqlite_master WHERE type='ta | ble' AND name = '%s' """ % (tbl_name))
tbl_exists = cur.fetchone()
if tbl_exists[0] == 0:
print "CREATE TABLE " + tbl_name + " (" + header.replace("id,", "id PRIMARY KEY,") + " );"
cur.execute("CREATE TABLE " + tbl_name + " (" + header.replace("id,", "id PRIMARY KEY,") + " );")
for index in index_arr:
cur.execute("CREATE INDEX " + tbl_name + "_" + index + " ON " + tbl_name + " (" + index + ");")
if arr != []:
cur.executemany("INSERT INTO " + tbl_name + " VALUES ("+question_marks(header)+")", arr)
return
def csv_to_arr(csv_file, start=1, has_header=True):
arr = []
with open(csv_file, 'rU') as f:
reader = csv.reader(f)
arr = list(reader)
arr = zip(*arr)
arr = [[(datetime.datetime.strptime(y, "%m/%d/%Y").date().strftime("%Y-%m-%d") if valid_dt(y) else y) for y in x] for x in arr if any(x)]
arr = zip(*arr)
header = ""
if has_header:
header = ','.join(arr[0])
arr = arr[start:]
return header, arr
else:
return arr[start:]
return
def arr_to_csv(file_name, header, data_arr):
csv_file = open(file_name, 'wb')
wr = csv.writer(csv_file, quoting=csv.QUOTE_ALL)
wr.writerow(header.split(','))
for data_row in data_arr:
line = []
for ele in data_row:
line.append(str(ele))
wr.writerow(line)
csv_file.close()
return
conn, cur = db_cur()
header, arr = csv_to_arr("tmp\\20160914.csv")
print arr[0]
|
import datetime
from django.db import models
from django.utils.translation import ugettext, ugettext_lazy as _
from django.contrib.auth.models import User
from django.contrib import admin
from schedule.models import Occurrence
from django_attendance.conf import settings as attendance | _settings
class EventAttendance(models.Model):
'''
The attendance status of a user for a specific Occurrence of an event.
'''
occurrence = models.OneToOneField(Occurrence)
attendees = models.ManyToManyField(User)
class Meta:
verbose_name | = _('attendance')
verbose_name_plural = _('attendances')
def __unicode__(self):
return "Attendance for %s-%s" % (self.occurrence.title,
self.occurrence.start)
def duration(self):
"""
Get the duration of this event in hours, taking the HOUR_MULTIPLIER in
to account.
"""
delta = self.occurrence.end - self.occurrence.start
real_hours = delta.days * 24 + delta.seconds / (60.0 * 60.0)
adjusted_hours = attendance_settings.HOUR_MULTIPLIER * real_hours
return adjusted_hours
admin.site.register(EventAttendance)
|
s_total': 0, 'results_current': 0, 'delete_total': 0, 'images_total': 0, 'select_total': 0 }
counter = {}
for k,v in EXTENSIONS.iteritems():
counter[k] = 0
dir_list = os.listdir(abs_path)
files = []
for file in dir_list:
# EXCLUDE FILES MATCHING VERSIONS_PREFIX OR ANY OF THE EXCLUDE PATTERNS
filtered = file.startswith('.')
for re_prefix in filter_re:
if re_prefix.search(file):
filtered = True
if filtered:
continue
results_var['results_total'] += 1
# CREATE FILEOBJECT
fileobject = FileObject(os.path.join(DIRECTORY, path, file))
# FILTER / SEARCH
append = False
if fileobject.filetype == request.GET.get('filter_type', fileobject.filetype) and get_filterdate(request.GET.get('filter_date', ''), fileobject.date):
append = True
if request.GET.get('q') and not re.compile(request.GET.get('q').lower(), re.M).search(file.lower()):
append = False
# APPEND FILE_LIST
if append:
try:
# COUNTER/RESULTS
if fileobject.filetype == 'Image':
results_var['images_total'] += 1
if fileobject.filetype != 'Folder':
results_var['delete_total'] += 1
elif fileobject.filetype == 'Folder' and fileobject.is_empty:
results_var['delete_total'] += 1
if query.get('type') and query.get('type') in SELECT_FORMATS and fileobject.filetype in SELECT_FORMATS[query.get('type')]:
results_var['select_total'] += 1
elif not query.get('type'):
results_var['select_total'] += 1
except OSError:
# Ignore items that have problems
continue
else:
files.append(fileobject)
results_var['results_current'] += 1
# COUNTER/RESULTS
if fileobject.filetype:
counter[fileobject.filetype] += 1
# SORTING
query['o'] = request.GET.get('o', DEFAULT_SORTING_BY)
query['ot'] = request.GET.get('ot', DEFAULT_SORTING_ORDER)
files = sort_by_attr(files, request.GET.get('o', DEFAULT_SORTING_BY))
if not request.GET.get('ot') and DEFAULT_SORTING_ORDER == "desc" or request.GET.get('ot') == "desc":
files.reverse()
p = Paginator(files, LIST_PER_PAGE)
try:
page_nr = request.GET.get('p', '1')
except:
page_nr = 1
try:
page = p.page(page_nr)
except (EmptyPage, InvalidPage):
page = p.page(p.num_pages)
return render_to_response('filebrowser/index.html', {
'dir': path,
'p': p,
'page': page,
'results_var': results_var,
'counter': counter,
'query': query,
'title': _(u'FileBrowser'),
'settings_var': get_settings_var(),
'breadcrumbs': get_breadcrumbs(query, path),
'breadcrumbs_title': ""
}, context_instance=Context(request))
browse = staff_member_required(never_cache(browse))
# mkdir signals
filebrowser_pre_createdir = Signal(providing_args=["path", "dirname"])
filebrowser_post_createdir = Signal(providing_args=["path", "dirname"])
def mkdir(request):
"""
Make Directory.
"""
from filebrowser.forms import MakeDirForm
# QUERY / PATH CHECK
query = request.GET
path = get_path(query.get('dir', ''))
if path is None:
msg = _('The requested Folder does not exist.')
request.user.message_set.create(message=msg)
return HttpResponseRedirect(reverse("fb_browse"))
abs_path = os.path.join(MEDIA_ROOT, DIRECTORY, path)
if request.method == 'POST':
form = MakeDirForm(abs_path, request.POST)
if form.is_valid():
server_path = os.path.join(abs_path, form.cleaned_data['dir_name'])
try:
# PRE CREATE SIGNAL
filebrowser_pre_createdir.send(sender=request, path=path, dirname=form.cleaned_data['dir_name'])
# CREATE FOLDER
os.mkdir(server_path)
os.chmod(server_path, 0775)
# POST CREATE SIGNAL
filebrowser_post_createdir.send(sender=request, path=path, dirname=form.cleaned_data['dir_name'])
# MESSAGE & REDIRECT
msg = _('The Folder %s was successfully created.') % (form.cleaned_data['dir_name'])
request.user.message_set.create(message=msg)
# on redirect, sort by date desc to see the new directory on top of the list
# remove filter in order to actually _see_ the new folder
# remove pagination
redirect_url = reverse("fb_browse") + query_helper(query, "ot=desc,o=date", "ot,o,filter_type,filter_date,q,p")
return HttpResponseRedirect(redirect_url)
except OSError, (errno, strerror):
if errno == 13:
form.errors['dir_name'] = forms.util.ErrorList([_('Permission denied.')])
else:
| form.errors['dir_name'] = forms.util.ErrorList([_('Error creating folder.')])
else:
form = MakeDirForm(abs_path)
return render_to_response('filebrowser/makedir.html', {
'form': form,
'query': query,
'title': _(u'New Folder'),
'settings_var': get_settings_var(),
'breadcrumbs': get_breadcrum | bs(query, path),
'breadcrumbs_title': _(u'New Folder')
}, context_instance=Context(request))
mkdir = staff_member_required(never_cache(mkdir))
def upload(request):
"""
Multipe File Upload.
"""
from django.http import parse_cookie
# QUERY / PATH CHECK
query = request.GET
path = get_path(query.get('dir', ''))
if path is None:
msg = _('The requested Folder does not exist.')
request.user.message_set.create(message=msg)
return HttpResponseRedirect(reverse("fb_browse"))
abs_path = os.path.join(MEDIA_ROOT, DIRECTORY, path)
# SESSION (used for flash-uploading)
cookie_dict = parse_cookie(request.META.get('HTTP_COOKIE', ''))
engine = __import__(settings.SESSION_ENGINE, {}, {}, [''])
session_key = cookie_dict.get(settings.SESSION_COOKIE_NAME, None)
return render_to_response('filebrowser/upload.html', {
'query': query,
'title': _(u'Select files to upload'),
'settings_var': get_settings_var(),
'session_key': session_key,
'breadcrumbs': get_breadcrumbs(query, path),
'breadcrumbs_title': _(u'Upload')
}, context_instance=Context(request))
upload = staff_member_required(never_cache(upload))
@csrf_exempt
def _check_file(request):
"""
Check if file already exists on the server.
"""
from django.utils import simplejson
folder = request.POST.get('folder')
fb_uploadurl_re = re.compile(r'^.*(%s)' % reverse("fb_upload"))
folder = fb_uploadurl_re.sub('', folder)
fileArray = {}
if request.method == 'POST':
for k,v in request.POST.items():
if k != "folder":
v = convert_filename(v)
if os.path.isfile(smart_str(os.path.join(MEDIA_ROOT, DIRECTORY, folder, v))):
fileArray[k] = v
return HttpResponse(simplejson.dumps(fileArray))
# upload signals
filebrowser_pre_upload = Signal(providing_args=["path", "file"])
filebrowser_post_upload = Signal(providing_args=["path", "file"])
@csrf_exempt
@flash_login_required
def _upload_file(request):
"""
Upload file to the server.
"""
from django.core.files.move import file_move_safe
if request.method == 'POST':
folder = request.POST.get('folder')
fb_uploadurl_re = re.compile(r'^.*(%s)' % reverse("fb_upload"))
folder = fb_uploadurl_re.sub('', folder)
abs_path = os.path.join(MEDIA_ROOT, DIRECTORY, folder)
if request.FILES:
filedata = request.FILES['Filedata']
filedata.name = con |
"""
Provides a more structured call to the nodejs bing
"""
import os
import sys
import argparse
import subprocess
def main():
parser = argparse.ArgumentParser(description="Use Bing's API services")
parser.add_argument("query", nargs="*", help="Query string")
parser.add_argument("-s", "--service", default="Web", choices=["Web", "Image", "News"])
args = parser.parse_args()
query = "%20".join(args.query)
sys.stdout.write(" | %s %s\n" % (args.service, que | ry))
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
#
# diffoscope: in-depth comparison of files, archives, and directories
#
# Copyright © 2015 Jérémy Bobbio <lunar@debian.org>
# Copyright © 2015 Clemens Lang <cal@macports.org>
#
# diffoscope is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# diffoscope is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with diffoscope. If not, see <https://www.gnu.org/licenses/>.
import pytest
import os.path
from diffoscope.config import Config
from diffoscope.comparators.macho import MachoFile
from diffoscope.comparators.missing_file import MissingFile
from utils.data import data, load_fixture
from utils.tools import skip_unless_tools_exist
obj1 = load_fixture('test1.macho')
obj2 = load_fixture('test2.macho')
def test_obj_identification(obj1):
assert isinstance(obj1, MachoFile)
def test_obj_no_differences(obj1):
difference = obj1.compare(obj1)
| assert difference is None
@pytest.fixture
def obj_differences(obj1, obj2):
return obj1.compare(obj2).details
@skip_unless_tools_exist('otool', 'lipo')
def t | est_obj_compare_non_existing(monkeypatch, obj1):
monkeypatch.setattr(Config(), 'new_file', True)
difference = obj1.compare(MissingFile('/nonexisting', obj1))
assert difference.source2 == '/nonexisting'
assert len(difference.details) > 0
@skip_unless_tools_exist('otool', 'lipo')
def test_diff(obj_differences):
assert len(obj_differences) == 4
l = ['macho_expected_diff_arch', 'macho_expected_diff_headers', 'macho_expected_diff_loadcommands', 'macho_expected_diff_disassembly']
for idx, diff in enumerate(obj_differences):
with open(os.path.join(os.path.dirname(__file__), '../data', l[idx]), 'w') as f:
print(diff.unified_diff, file=f)
expected_diff = open(data('macho_expected_diff')).read()
assert obj_differences[0].unified_diff == expected_diff
|
dling of }{ -- each brace should be a separate token
# A regular expression that matches postscript each different kind of postscript token
pattern = '/?[a-zA-Z][a-zA-Z0-9_]*|[-]?[0-9]+|[}{]|%.*|[^\t\n ]'
######## Debug Functions
def Debug(*s):
print(s)
sys.exit(1)
return
######## Common Functions
#### Is Variable
def isNumber(x):
if type(x) is int == False or type(x) is float == False:
return False
else:
return True
def isBool(x):
if type(x) is bool == False:
return False
else:
return True
#### Pop Operands
def PopNumber(s,v):
x = OperandPop()
if isNumber(x) == False:
Debug("%s in operation %s is not a number or doesn't exist!",v,s)
return x;
def PopBoolean(s,v):
x = OperandPop()
if isBool(x) == False:
Debug("%s in operation %s is not a boolean or doesn't exist!",v,s)
return x;
def PopTwoNumbers(s):
return (PopNumber(s,"x1"),PopNumber(s,"x2"));
def PopTwoBooleans(s):
return (PopBoolean(s,"x1"),PopBoolean(s,"x2"));
#### Command Logic
def CommandLogic(t):
if t == "add":
_ADD()
return True
elif t == "sub":
_SUB()
return True
elif t == "mul":
_MUL()
return True
elif t == "div":
_DIV()
return True
elif t == "eq":
_EQ()
return True
elif t == "gt":
_LT()
return True
elif t == "lt":
_GT()
return True
elif t == "and":
_AND()
return True
elif t == "or":
_OR()
return True
elif t == "not":
_NOT()
return True
elif t == "if":
_IF()
return True
elif t == "ifelse":
_IFELSE()
return True
return False
######## SPS Functions
#### Number Operators
def _ADD():
x1,x2 = PopTwoNumbers("_ADD")
return OperandPush(x1 + x2)
def _SUB():
x1,x2 = PopTwoNumbers("_SUB")
return OperandPush(x1 - x2)
def _MUL():
x1,x2 = PopTwoNumbers("_MUL")
return OperandPush(x1 * x2)
def _DIV():
x1,x2 = PopTwoNumbers("_DIV")
return OperandPush(x1 / x2)
def _EQ():
x1,x2 = PopTwoNumbers("_EQ")
return OperandPu | sh(x1 == x2)
def _LT():
x1,x2 = PopTwoNumbers("_LT")
return OperandPush(x1 < x2)
def _GT():
x1,x2 = PopTwoNumbers("_GT")
return OperandPush(x1 > x2)
#### Boolean Operators
def _AND():
x1,x2 = PopTwoBooleans("_AND")
return OperandPush(x1 and x2)
def _OR():
x1,x2 = PopTwoBooleans("_OR")
return OperandPush(x1 or x2)
def _NOT():
return OperandPush(not PopBoolean("_OR","x"))
|
#### Sequencing Operators
def _IF():
t == ""
while t != "{":
CommandLogic(OperandPush(ExecutionPop()))
if OperandPop() == True:
OperandPush(ExecutionPop())
ExecutionPop()
return True
def _IFELSE():
t == ""
while t != "{":
CommandLogic(OperandPush(ExecutionPop()))
if OperandPop() == True:
OperandPush(ExecutionPop())
ExecutionPop()
else:
ExecutionPop()
ExecutionPop()
ExecutionPop()
OperandPush(ExecutionPop())
ExecutionPop()
return True
#### Stack Operators
def _DUP():
t = OperandPop()
OperandPush(t)
OperandPush(t)
return True
def _EXCH():
t = OperandPop()
OperandPushPosition(t, len(operand) - 1)
return True
def _POP():
return OperandPop()
#### Dictionary Creation
def _DICTZ():
t = {}
DictionaryPush(t)
return True
#### DICTIONARY MANIPULATION
def _BEGIN():
if len(dictionary) < 1:
Debug("No item on the Dictionary Stack for _BEGIN!")
return False
DictionaryPush(OperandPop())
return
def _END():
return DictionaryPop()
#### Name Defination
def _DEF():
t1 = OperandPop()
t2 = OperandPop()
if type(t1) is str == False:
Debug("T1 in operation _DEF is not a string!")
return False
t = [t1,t2]
DictionaryPush(t)
return True
#### Stack Printing
def _STACK():
for i in operand:
print(i)
return True
def _EQUALS():
t = OperandPop()
print(t)
return True
######## Stack Control
def DictionaryPushItem(t,value):
x = {t:value}
dictionary[len(dictionary)].append(x)
def DictionaryPush(t):
dictionary.append(t)
def DictionaryPop():
return dictionary.pop()
def ExecutionPush(t):
execution.append(t)
def ExecutionPop():
if len(execution) > 0:
return execution.pop()
else:
printOutput()
def OperandPush(t):
operand.append(t)
return t
def OperandPushPosition(t,p):
operand.insert(p,t)
return t
def OperandPop():
return operand.pop()
######## File Reader
# Given a string, return the tokens it contains
def parse(s):
tokens = re.findall(pattern, s)
return tokens
# Given an open file, return the tokens it contains
def parseFile(f):
tokens = parse(''.join(f.readlines()))
return tokens
######## Interpretor
def InterpretorMain(L):
for w in L:
ExecutionPush(w)
InterpretorLoop()
return
def InterpretorLoop():
word = ExecutionPop()
if word == None:
return
elif isBool(word) == True or isNumber(word) == True:
OperandPush(word)
InterpretorLoop()
else:
Interpretor(word)
InterpretorLoop()
def Interpretor(w):
if w.startswith('/') == True:
t = w.split('/')
word = ExecutionPop()
if isBool(word) == True or isNumber(word) == True:
DictionaryPushItem(t[1],x)
else:
tL = ["{"]
i = 1
while i > 0:
word = ExecutionPop()
if word == "{":
i = i + 1
elif word == "}":
i = i - 1
tL.append(word)
while word != "def":
word = ExecutionPop()
DictionaryPushItem(t[1], tL)
elif w == "add":
_ADD()
elif w == "sub":
_SUB()
elif w == "mul":
_MUL()
elif w == "div":
_DIV()
elif w == "eq":
_EQ()
elif w == "gt":
_LT()
elif w == "lt":
_GT()
elif w == "and":
_AND()
elif w == "or":
_OR()
elif w == "not":
_NOT()
elif w == "if":
_IF()
elif w == "ifelse":
_IFELSE()
elif w == "dup":
_DUP()
elif w == "exch":
_EXCH()
elif w == "pop":
_POP()
elif w == "dictz":
_DICTZ()
elif w == "begin":
_BEGIN()
elif w == "end":
_END()
elif w == "stack":
_STACK()
elif w == "=":
_EQUALS()
else:
wordIntepretor(w)
return 0
def wordInterpretor(w):
L = dictionary[len(dictionary)].get(w)
for word in L:
if isBool(word) == True or isNumber(word) == True:
OperandPush(word)
else:
if w.startswith('/') == True:
t = w.split('/')
word = L.pop()
if isBool(word) == True or isNumber(word) == True:
DictionaryPushItem(t[1],x)
else:
tL = ["{"]
i = 1
while i > 0:
word = ExecutionPop()
if word == "{":
i = i + 1
elif word == "}":
i = i - 1
tL.append(word)
while word != "def":
word = L.pop()
DictionaryPushItem(t[1], tL)
elif w == "add":
_ADD()
elif w == "sub":
_SUB()
elif w == "mul":
_MUL()
elif w == "div":
_DIV()
elif w == "eq":
_EQ()
elif w == "gt":
_LT()
elif w == "lt":
_GT()
elif w == "and":
_AND()
elif w == "or":
_OR()
elif w == "not":
_NOT()
elif w == "if":
_IF()
elif w == "ifels |
# testStr = "Hello {name}, How long hav | e you bean?. I'm {my | Name}"
#
# testStr = testStr.format(name="Leo", myName="Serim")
#
# print(testStr)
limit = None
hello = str(limit, "")
print(hello)
# print( "4" in "3.5")
|
'''
Reads scrambled Vigenere Cipher text from stdin and attempts to decrypt it.
Written for Python 2.7.
Copyright (C) 2014 leechy9
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import string
from collections import defaultdict, Counter
# Average letter frequencies found in English (from Wikipedia)
letter_frequ | encies = [
('A', 0.08167),
('B', 0.01492),
('C', 0.02782),
('D', 0.04253),
('E', 0.12702),
('F', 0.02228),
('G', | 0.02015),
('H', 0.06094),
('I', 0.06966),
('J', 0.00153),
('K', 0.00772),
('L', 0.04025),
('M', 0.02406),
('N', 0.06749),
('O', 0.07507),
('P', 0.01929),
('Q', 0.00095),
('R', 0.05987),
('S', 0.06327),
('T', 0.09056),
('U', 0.02758),
('V', 0.00978),
('W', 0.02360),
('X', 0.00150),
('Y', 0.01974),
('Z', 0.00074),
]
def all_substrings(text, size):
'''Returns a list of all substrings with length of size in text'''
length = len(text)
return [text[i:i+size] for i in range(length) if i+size<=length]
def rotate_letter(letter, shift):
'''Rotates the letter clockwise by the given shift value.'''
rotated_letter = ord(letter) + shift
if rotated_letter < ord('A'):
rotated_letter = ord('Z') - (ord('A') - rotated_letter) + 1
if rotated_letter > ord('Z'):
rotated_letter = ord('A') + (rotated_letter - ord('Z')) - 1
return chr(rotated_letter)
def calculate_distances(string_list):
'''
Takes in a list of strings and tells how far away matching elements are from
one another. ['a','a'] has a distance of 1.
Returns list((string, distance)).
'''
distances = []
length = len(string_list)
for i in range(length):
for x in range(length):
if string_list[i] == string_list[x] and i != x and i-x > 0:
distances.append((string_list[i], i-x))
return distances
def mod_counts(numbers, max_mod):
'''
Takes in a list(int). Calculates how many of the int%x==0 for 1<x<max_mod .
Returns a defaultdict{mod, count} showing how many of the int%mod==0 .
'''
counts = defaultdict(int)
for i in range(2, max_mod):
for num in numbers:
if num%i == 0:
counts[i] += 1
return counts
def find_shift_value(shift_column):
'''
Takes in a list of letters. Finds the most common occurrances.
Uses these common occurrances to estimate how much the text was shifted.
Returns a probable integer shift value.
'''
count = Counter(shift_column)
# Ensure counts of 0 appear
unfound_letters = [l for l in string.uppercase if l not in count]
for l in unfound_letters: count[l] = 0
total_letters = 0.0
for l,c in count.most_common(): total_letters += c
# Try to find the smallest difference between actual and expected frequencies
differences = defaultdict(float)
# Try shifting through every combination
for r in range(len(letter_frequencies)):
for l,f in letter_frequencies:
rotated = rotate_letter(l, r)
differences[r] += abs(f - count[rotated]/total_letters)
# The smallest difference is most likely the shift value
smallest = 0
for s,d in differences.iteritems():
if differences[s] < differences[smallest]:
smallest = s
return smallest
def vigenere_shift(text, shift_values):
'''Rotates text by the shift_values given. Returns shifted value.'''
key_length = len(shift_values)
text = text.upper()
shifted_letters = []
for i in range(len(text)):
rotated_letter = rotate_letter(text[i], -shift_values[i%key_length])
shifted_letters.append(rotated_letter)
return ''.join(shifted_letters)
def main():
'''Main method'''
cipher_text = raw_input('Enter the cipher text to decrypt: \n')
print('Calculating...\n')
substrings = all_substrings(cipher_text, 3)
distances = calculate_distances(substrings)
counts = mod_counts(zip(*distances)[1], 20)
counts = [x for x in counts.iteritems()]
counts.sort(key=lambda x: -x[1])
# counts[x][0] should now contain key sizes from most to least probable
key_size = counts[0][0]
# Split letters by the key length and find the most common occurrences
shift_values = []
for i in range(key_size):
shift_column = [cipher_text[x] for x in range(len(cipher_text)) if x%key_size==i]
shift_values.append(find_shift_value(shift_column))
decrypted_text = vigenere_shift(cipher_text, shift_values)
print('\nExpected key: ')
print(''.join([rotate_letter('A', c) for c in shift_values]))
print('\nAlternative key (different starting shift sometimes encountered): ')
print(''.join([rotate_letter('Z', c) for c in shift_values]))
print('\nDecrypted text: ')
print(decrypted_text)
# Call main method
if __name__ == '__main__':
main()
|
"""
Run example scripts.
@author J. Chiang <jchiang@slac.stanford.edu>
@author Paul F. Kunz <Paul_Kunz@slac.stanford.edu>
"""
#$Id: run_examples.py,v 1.13 2006/10/03 20:02:20 pfkeb Exp $
import sys
from load_hippo import app, canvas
prompt = 1
scripts = []
scripts.append('static_vs_dynamic')
scripts.append('loglog')
scripts.append('datareps')
scripts.append('append_ntuple')
scripts.append('cut_multi_displays')
scripts.append('cuts_one_display')
scripts.append('cuts_complex')
scripts.append('function_ntuple')
scripts.append('fitting')
scripts.append('fitting2')
scripts.append('simple_xyplot')
scripts.append('mainpage')
scripts.append('fft')
scripts.append('displays')
def prompt(prompt = None):
if (prompt):
sys.stderr.write(prompt) |
else:
sys.stderr.write(" | Hit return to continue: ")
x = sys.stdin.readline()
return x
print "Hit return to run named script"
for name in scripts :
prompt("Run %s: " % name)
canvas.clear()
command = 'import ' + name
exec(command)
print "All done. Enjoy!"
|
# -*- coding: ISO-8859-1 -*-
"""
Form Widget classes specific to the geoSite admin site.
"""
# A class that corresponds to an HTML form widget,
# e.g. <input type="text"> or <textarea>.
# This handles rendering of the widget as HTML.
import json
from django.template.loader import render_to_string
from .conf import settings
from django.utils import six
from django import forms
from django.forms import widgets, MultiWidget, Media
from django.utils.html import conditional_escape, format_html, format_html_join
from django.forms.util import flatatt, to_current_timezone
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django.utils.safestring import mark_safe
from django.templatetags.static import static
from . import LatLng
# classe widget utilizzata dal campo forms.geoFields LatLngField
class LatLngTextInputWidget(forms.MultiWidget):
def __init__(self, attrs=None):
widgets = (
forms.TextInput(),
forms.TextInput(),
)
super(LatLngTextInputWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if isinstance(value, six.text_type):
return value.rsplit(',')
if value:
return [value.lat, value.lng]
return [None,None]
def format_output(self, rendered_widgets):
return render_to_string('geopositionmap/widgets/geopositionmap.html', {
'latitude': {
'html': rendered_widgets[0],
'label': _("latitude"),
},
'longitude': {
| 'html': rendered_widgets[1],
'label': _("longitude"),
},
'config': {
'map_widget_height': settings.GEOPOSITIONMAP_MAP_WIDGET_HEIGHT,
'map_options': json.dumps(settings.GEOPOSITIONMAP_MAP_OPTIONS),
'marker_options': json.dumps(settings.GEOPOSITIONMAP_MARKER_OPTIONS),
'google_view': json.dumps(setting | s.GEOPOSITIONMAP_GOOGLE_VIEW),
'osm_view': json.dumps(settings.GEOPOSITIONMAP_OSM_VIEW),
}
})
class Media:
#extend = False
css = {
'all': (
'geopositionmap/geopositionmap.css',
'//cdn.leafletjs.com/leaflet-0.7.3/leaflet.css',
)
}
js = (
'//maps.google.com/maps/api/js?sensor=false',
'//cdn.leafletjs.com/leaflet-0.7.3/leaflet.js',
'geopositionmap/geopositionmap.js',
) |
import elastic
import nlp
def lookup(description, synonyms=None):
'''
Look up words by their definitions
using the indexed terms and their synonyms. |
'''
description = nlp.correct(description)
query = {'bool':{'must':get_definition_query(description)}}
synonym_query = get_synonym_query(description, synonyms)
if synonym_query:
query['bool']['should'] = synonym_query
quer | y['bool']['minimum_should_match'] = 0
query['bool']['boost'] = 1.2
return search(query)
def search(query):
print 'searching', query
results = elastic.client.search(index=elastic.SEARCH_INDEX, body={'query':query})
return list(parse_results(results))
def get_definition_query(description, synonyms=None):
query = {'match':{'definitions':{'query':unicode(description),
'cutoff_frequency':0.001}}}
return query
def get_synonym_query(description, synonyms=None):
tokens = nlp.tokenize(description) + (synonyms or [])
if not tokens:
return None
return {'match':{'synonyms':{'query':tokens, 'operator':'or'}}}
def parse_results(results):
print 'found', results['hits'].get('total')
return (h['_source']['doc'] for h in results['hits'].get('hits',[]))
|
# Copyright (c) 2008 Mikeal Rogers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in complian | ce with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2 | .0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
LOOKUP = {}
from .paths import add_lookup, lookup_template, clear_lookups
|
= {int(x[:5]): x for x in allfiles if x[-12:] == '_parsed.zlib'}
toifiles = {int(x[:5]): x for x in allfiles if x[-19:] == '_shifts_parsed.zlib'}
for team in teams:
teamgames = {int(g) for g in basic_gamelog.query('Season == {0:d} & (Home == "{1:s}" | Away == "{1:s}")'.format(
season, team))['Game'].values}
current_pbp = None
games_already_done = set()
if os.path.exists(get_team_pbplog_filename(season, team)):
current_pbp = feather.read_dataframe(get_team_pbplog_filename(season, team))
games_already_done = {x for x in current_pbp.Game}
dflist = []
if not force_overwrite and current_pbp is not None:
dflist.append(current_pbp)
teamgames = {int(g) for g in teamgames if g not in games_already_done}
### TODO do I need to flip any columns?
#if force_overwrite:
for game in teamgames:
try:
df = pd.read_hdf(scrape_game.get_parsed_save_filename(season, game))
df = df.assign(Game = game)
if df is not None:
dflist.append(df)
except FileNotFoundError:
pass
if len(dflist) > 0:
new_pbp = pd.concat(dflist)
for col in new_pbp.columns:
if new_pbp[col].dtype == 'object':
new_pbp[col] = new_pbp[col].astype(str)
feather.write_dataframe(new_pbp, get_team_pbplog_filename(season, team))
current_toi = None
games_already_done = set()
if os.path.exists(get_team_toilog_filename(season, team)):
current_toi = feather.read_dataframe(get_team_toilog_filename(season, team))
games_already_done = {x for x in current_toi.Game}
### TODO issues here
dflist = []
if not force_overwrite:
dflist.append(current_toi)
teamgames = {g for g in teamgames if g not in games_already_done}
#if force_overwrite:
for game in teamgames:
try:
df = pd.read_hdf(scrape_game.get_parsed_shifts_save_filename(season, game))
df = df.assign(Game = game)
cols_to_replace = {col for col in df.columns if str.isdigit(col[-1]) if col[:3] != team}
df.rename(columns = {col: 'Opp' + col[3:] for col in cols_to_replace}, inplace = True)
if df is not None:
dflist.append(df)
except FileNotFoundError:
pass
import pandas as pd
dflist = [df for df in dflist if df is not None]
if len(dflist) > 0:
new_toi = pd.concat(dflist)
for col in new_toi.columns:
if new_toi[col].dtype == 'object':
new_toi[col] = new_toi[col].astype(str)
feather.write_dataframe(new_toi, get_team_toilog_filename(season, team))
def get_team_toilog(season, team):
import feather
return feather.read_dataframe(get_team_toilog_filename(season, team))
def get_team_pbplog(season, team):
import feather
return feather.read_dataframe(get_team_pbplog_filename(season, team))
def get_season_schedule_url(season):
return 'https://statsapi.web.nhl.com/api/v1/schedule?startDate={0:d}-09-01&endDate={1:d}-06-25'.format(season,
season + 1)
def parse_games(season, games, force_overwrite = False, marker = 10):
"""
Parses the specified games.
Parameters
-----------
season : int
The season of the game. 2007-08 would be 2007.
games : iterable of ints (e.g. list)
The game id. This can range from 20001 to 21230 for regular season, and 30111 to 30417 for playoffs.
The preseason, all-star game, Olympics, and World Cup also have game IDs that can be provided.
force_overwrite : bool
If True, will overwrite previously parsed files. If False, will not parise if files already found.
marker : float or int
The number of times to print progress. 10 will print every 10%; 20 every 5%.
"""
import time
import datetime
starttime = time.time()
games = sorted(list(games))
marker_i = [len(games) // marker * i for i in range(marker)]
marker_i[-1] = len(games) - 1
marker_i_set = set(marker_i)
for i in range(len(games)):
game = games[i]
scrape_game.parse_game(season, game, force_overwrite)
if i in marker_i_set:
print('Done through', season, game, ' ~ ', round((marker_i.index(i)) * 100 / marker), '% in',
str(datetime.timedelta(seconds=time.time() - starttime)))
print('Done parsing games in', season)
def autoupdate(season = scrapenhl_globals.MAX_SEASON):
"""
Scrapes unscraped games for the specified season.
This is a convenience function that finds the highest completed game in a year and scrapes up to that point only.
This reduces unnecessary requests for unplayed games.
Parameters
-----------
season : int
The season of the game. 2007-08 would be 2007.
"""
import urllib.request
url = get_season_schedule_url(season)
with urllib.request.urlopen(url) as reader:
page = reader.read().decode('latin-1')
import json
jsonpage = json.loads(page)
completed_games = set()
for gameday in jsonpage['dates']:
for game in gameday['games']:
if game['status']['abstractGameState'] == 'Final':
completed_games.add(int(str(game['gamePk'])[-5:]))
scrape_games(season, completed_games)
parse_games(season, completed_games)
def read_completed_games_from_url(season):
import urllib.request
url = get_season_schedule_url(season)
with urllib.request.urlopen(url) as reader:
page = reader.read().decode('latin-1')
| import js | on
jsonpage = json.loads(page)
completed_games = set()
for gameday in jsonpage['dates']:
for game in gameday['games']:
if game['status']['abstractGameState'] == 'Final':
completed_games.add(int(str(game['gamePk'])[-5:]))
return completed_games
def reparse_season(season = scrapenhl_globals.MAX_SEASON):
"""
Re-parses entire season.
:param season: int
The season of the game. 2007-08 would be 2007.
:return:
"""
completed_games = read_completed_games_from_url(season)
parse_games(season, completed_games, True)
def rewrite_globals(start_from_scratch = True, seasons = None):
"""
Recreates global files: PLAYER_IDS, BASIC_GAMELOG, TEAM_IDS, CORRECTED_PLAYERNAMES
Parameters
-----------
seasons : list of int or None
The seasons of the games. 2007-08 would be 2007. Should only be provided when start_from_scratch is False.
start_from_scratch: bool
If True, will search through all files; if False, will look only at missing games in BASIC_GAMELOG.
False not yet implemented.
"""
import os.path
import zlib
import json
import pandas as pd
import time
import datetime
if seasons is None:
seasons = [i for i in range(2007, scrapenhl_globals.MAX_SEASON + 1)]
elif isinstance(seasons, int):
seasons = [seasons]
if start_from_scratch:
import os
try:
os.remove(scrapenhl_globals.PLAYER_ID_FILE)
except FileNotFoundError:
pass
try:
os.remove(scrapenhl_globals.TEAM_ID_FILE)
except FileNotFoundError:
pass
try:
os.remove(scrapenhl_globals.BASIC_GAMELOG_FILE)
except FileNotFoundError:
pass
for season in seasons:
starttime = time.time()
games = read_completed_games_from_url(season)
marker = 20
games = sorted(list(games))
marker_i = [len(games) // marker * i for i in range(marker)]
marker_i[-1] = len(games) - 1
marker_i_set = set(marker_i)
for i in range(len(games)):
game = games[i]
#print(season, game)
|
import bpy
from ... base_types.node import AnimationNode
class CombineVectorNode(bpy.types.Node, AnimationNode):
bl_idname = "an_CombineVectorNode"
bl_label = "Combine Vector"
dynamicLabelType = "HIDDEN_ONLY"
def create(self):
self.newInput("Float", "X", "x")
self.newInput("Float", "Y", "y")
self.newInput("Float", "Z", "z")
self.newOutput("Vector", "Vector", "vector")
def drawLabel(self):
| label = "<X, | Y, Z>"
for axis in "XYZ":
if self.inputs[axis].isUnlinked:
label = label.replace(axis, str(round(self.inputs[axis].value, 4)))
return label
def getExecutionCode(self):
return "vector = Vector((x, y, z))"
|
def extractRumorsBlock(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if "Rumor's Block" in item['tags'] and 'chapter' in item['title'].l | ower():
return buildReleaseMessageWithType(item, "Rumor's Block", vol, chp, frag=frag, postfix=postfix, tl_type='oel')
| return False
|
a helper class that provides housekeeping for LSTM cells.
This may be useful for alternative LSTM and similar type of cells.
The subclasses must implement `_call_cell` method and `num_units` property.
"""
@abc.abstractproperty
def num_units(self):
"""Number of units in this cell (output dimension)."""
pass
@abc.abstractmethod
def _call_cell(self, inputs, initial_cell_state, initial_output, dtype,
sequence_length):
"""Run this LSTM on inputs, starting from the given state.
This method must be implemented by subclasses and does the actual work
of calling the cell.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
initial_cell_state: initial value for cell state, shape `[batch_size,
self._num_units]`
initial_output: initial value of cell output, shape `[batch_size,
self._num_units]`
dtype: The data type for the initial state and expected output.
sequence_length: Specifies the length of each sequence in inputs. An int32
or int64 vector (tensor) size [batch_size], values in [0, time_len) or
None.
Returns:
A pair containing:
- State: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
- Output: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
"""
pass
def __call__(self,
inputs,
initial_state=None,
dtype=None,
sequence_length=None,
scope=None):
"""Run this LSTM on inputs, starting from the given state.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
or a list of `time_len` tensors of shape `[batch_size, input_size]`.
initial_state: a tuple `(initial_cell_state, initial_output)` with tensors
of shape `[batch_size, self._num_units]`. If this is not provided, the
cell is expected to create a zero initial state of type `dtype`.
dtype: The data type for the initial state and expected output. Required
if `initial_state` is not provided or RNN state has a heterogeneous
dtype.
sequence_length: Specifies the length of each sequence in inputs. An
`int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
time_len).`
Defaults to `time_len` for each element.
scope: `VariableScope` for the created subgraph; defaults to class name.
Returns:
A pair containing:
- Output: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
or a list of time_len tensors of shape `[batch_size, output_size]`,
to match the type of the `inputs`.
- Final state: a tuple `(cell_state, output)` matching `initial_state`.
Raises:
ValueError: in case of shape mismatches
"""
with vs.variable_scope(scope or type(self).__name__):
is_list = isinstance(inputs, list)
if is_list:
inputs = array_ops.pack(inputs)
inputs_shape = inputs.get_shape().with_rank(3)
if not inputs_shape[2]:
raise ValueError("Expecting inputs_shape[2] to be set: %s" %
inputs_shape)
batch_size = inputs_shape[1].value
if batch_size is None:
batch_size = array_ops.shape(inputs)[1]
time_len = inputs_shape[0].value
if time_len is None:
time_len = array_ops.shape(inputs)[0]
# Provide default values for initial_state and dtype
if initial_state is None:
if dtype is None:
raise ValueError(
"Either initial_state or dtype needs to be specified")
z = array_ops.zeros(
array_ops.pack([batch_size, self.num_units]), dtype=dtype)
initial_state = z, z
else:
if len(initial_state) != 2:
raise ValueError(
"Expecting initial_state to be a tuple with length 2 or None")
if dtype is None:
dtype = initial_state[0].dtype
# create the actual cell
if sequence_length is not None:
sequence_length = ops.convert_to_tensor(sequence_length)
initial_cell_state, initial_output = initial_state # pylint: disable=unpacking-non-sequence
cell_states, outputs = self._call_cell(inputs, initial_cell_state,
initial_output, dtype,
sequence_length)
if sequence_length is not None:
# Mask out the part beyond sequence_length
mask = array_ops.transpose(
array_ops.sequence_mask(
sequence_length, time_len, dtype=dtype), [1, 0])
mask = array_ops.tile(
array_ops.expand_dims(mask, [-1]), [1, 1, self.num_units])
outputs *= mask
# Prepend initial states to cell_states and outputs for indexing to work
# correctly,since we want to access the last valid state at
# sequence_length - 1, which can even be -1, corresponding to the
# initial state.
mod_cell_states = array_ops.concat(
0, [array_ops.expand_ | dims(initial_cell_state, [0]), cel | l_states])
mod_outputs = array_ops.concat(
0, [array_ops.expand_dims(initial_output, [0]), outputs])
final_cell_state = self._gather_states(mod_cell_states, sequence_length,
batch_size)
final_output = self._gather_states(mod_outputs, sequence_length,
batch_size)
else:
# No sequence_lengths used: final state is the last state
final_cell_state = cell_states[-1]
final_output = outputs[-1]
if is_list:
# Input was a list, so return a list
outputs = array_ops.unpack(outputs)
return outputs, (final_cell_state, final_output)
def _gather_states(self, data, indices, batch_size):
"""Produce `out`, s.t. out(i, j) = data(indices(i), i, j)."""
mod_indices = indices * batch_size + math_ops.range(batch_size)
return array_ops.gather(
array_ops.reshape(data, [-1, self.num_units]), mod_indices)
class LSTMBlockFusedCell(LSTMBlockWrapper):
"""FusedRNNCell implementation of LSTM.
This is an extremely efficient LSTM implementation, that uses a single TF op
for the entire LSTM. It should be both faster and more memory-efficient than
LSTMBlockCell defined above.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
The variable naming is consistent with `rnn_cell.LSTMCell`.
"""
def __init__(self,
num_units,
forget_bias=1.0,
cell_clip=None,
use_peephole=False):
"""Initialize the LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
cell_clip: clip the cell to this value. Defaults to `3`.
use_peephole: Whether to use peephole connections or not.
"""
self._num_units = num_units
self._forget_bias = forget_bias
self._cell_clip = cell_clip
self._use_peephole = use_peephole
@property
def num_units(self):
"""Number of units in this cell (output dimension)."""
return self._num_units
def _call_cell(self, inputs, initial_cell_state, initial_output, dtype,
sequence_length):
"""Run this LSTM on inputs, starting from the given state.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
initial_cell_state: initial value for cell state, shape `[batch_size,
self._num_units]`
initial_output: initial value of cell output, shape `[batch_size,
self._num_units]`
dtype: The data type for the initial state and expected output.
sequence_length: Specifies the length of each sequence in inputs. An
`int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
time_len)` or None.
Returns:
A pair containing:
- Ce |
# -*- coding: utf-8 - | *-
__doc__ = """\
Copyright Puzzlebox Productions, LLC (2010)
This code is released under the GNU Pulic License (GPL) version 2
For more information please refer to http://www.gnu | .org/copyleft/gpl.html
"""
|
h(self._permission_owner_execute, 3, 4, 0, 1)
# group checkboxes
self._permission_group_read = gtk.CheckButton(_('Read'))
self._permission_group_read.connect('toggled', self._update_octal, (1 << 2) * 10)
table.attach(self._permission_group_read, 1, 2, 1, 2)
self._permission_group_write = gtk.CheckButton(_('Write'))
self._permission_group_write.connect('toggled', self._update_octal, (1 << 1) * 10)
table.attach(self._permission_group_write, 2, 3, 1, 2)
self._permission_group_execute = gtk.CheckButton(_('Execute'))
self._permission_group_execute.connect('toggled', self._update_octal, (1 << 0) * 10)
table.attach(self._permission_group_execute, 3, 4, 1, 2)
# others checkboxes
self._permission_others_read = gtk.CheckButton(_('Read'))
self._permission_others_read.connect('toggled', self._update_octal, (1 << 2))
table.attach(self._permission_others_read, 1, 2, 2, 3)
self._permission_others_write = gtk.CheckButton(_('Write'))
self._permission_others_write.connect('toggled', self._update_octal, (1 << 1))
table.attach(self._permission_others_write, 2, 3, 2, 3)
self._permission_others_execute = gtk.CheckButton(_('Execute'))
self._permission_others_execute.connect('toggled', self._update_octal, (1 << 0))
table.attach(self._permission_others_execute, 3, 4, 2, 3)
# octal representation
label = gtk.Label(_('Octal:'))
label.set_alignment(0, 0.5)
table.attach(label, 0, 1, 3, 4)
self._permission_octal_entry = gtk.Entry(4)
self._permission_octal_entry.set_width_chars(5)
self._permission_octal_entry.connect('activate', self._entry_activate)
table.attach(self._permission_octal_entry, 1, 2, 3, 4)
table.set_row_spacing(2, 10)
# create button for saving default configuration
image_save = gtk.Image()
image_save.set_from_stock(gtk.STOCK_SAVE, gtk.ICON_SIZE_BUTTON)
button_save = gtk.Button()
button_save.set_image(image_save)
button_save.connect('clicked', self._save_configuration)
button_save.set_tooltip_text(_('Save as default configuration'))
align_save = gtk.Alignment()
align_save.add(button_save)
# pack interface
self._dialog.action_area.pack_start(align_save, True, True, 0)
self._dialog.action_area.set_child_secondary(align_save, True)
self._advanced.pack_start(table, False, False, 0)
expander.add(self._advanced)
self._container.pack_start(expander, False, False, 0)
expander.show_all()
align_save.show_all()
def _save_configuration(self, widget=None, data=None):
"""Save default configuration for create dialog"""
pass
| def _entry_activate(self, widget, data=None):
"""Handle octal mode change"""
self._mode = int(widget.get_text(), 8)
se | lf.update_mode()
def _update_octal(self, widget, data=None):
"""Update octal entry box"""
if self._permission_updating: return
data = int(str(data), 8)
self._mode += (-1, 1)[widget.get_active()] * data
self.update_mode()
def _update_checkboxes(self, widget=None, data=None):
"""Update checkboxes accordingly"""
self._permission_updating = True
self._permission_owner_read.set_active(self._mode & 0b100000000)
self._permission_owner_write.set_active(self._mode & 0b010000000)
self._permission_owner_execute.set_active(self._mode & 0b001000000)
self._permission_group_read.set_active(self._mode & 0b000100000)
self._permission_group_write.set_active(self._mode & 0b000010000)
self._permission_group_execute.set_active(self._mode & 0b000001000)
self._permission_others_read.set_active(self._mode & 0b000000100)
self._permission_others_write.set_active(self._mode & 0b000000010)
self._permission_others_execute.set_active(self._mode & 0b000000001)
self._permission_updating = False
def _expander_event(self, widget, data=None):
"""Return dialog size back to normal"""
if widget.get_expanded():
self._dialog.set_size_request(1, 1)
self._dialog.resize(*self._dialog_size)
else:
self._dialog_size = self._dialog.get_size()
self._dialog.set_size_request(-1, -1)
def get_mode(self):
"""Returns default directory/file creation mode"""
return self._mode
def set_mode(self, mode):
"""Set directory/file creation mode"""
self._mode = mode
self.update_mode()
def update_mode(self):
"""Update widgets"""
self._permission_octal_entry.set_text('{0}'.format(oct(self._mode)))
self._update_checkboxes()
class PasswordDialog(InputDialog):
"""Dialog used for safe entry of passwords. Contains two fields."""
def __init__(self, application):
InputDialog.__init__(self, application)
# create user interface
vbox = gtk.VBox(False, 0)
self._label_description = gtk.Label()
self._label_description.set_alignment(0, 0)
self._label_description.set_line_wrap(True)
self._label_description.connect('size-allocate', self._adjust_label)
self._label.set_text(_('Password:'))
label_confirm = gtk.Label(_('Confirm:'))
label_confirm.set_alignment(0, 0.5)
self._entry_confirm = gtk.Entry()
self._entry.set_property('caps-lock-warning', True)
self._entry_confirm.set_property('caps-lock-warning', True)
self._entry.set_visibility(False)
self._entry_confirm.set_visibility(False)
# configure interface
self._container.set_spacing(5)
# pack user interface
vbox.pack_start(label_confirm, False, False, 0)
vbox.pack_start(self._entry_confirm, False, False, 0)
self._container.pack_start(vbox, False, False, 0)
self._container.pack_start(self._label_description, False, False, 0)
self._container.reorder_child(self._label_description, 0)
# show all elements
vbox.show_all()
self._label_description.show()
def _adjust_label(self, widget, data=None):
"""Adjust label size"""
widget.set_size_request(data.width-1, -1)
def set_label(self, text):
"""Set label text"""
self._label_description.set_text(text)
def get_response(self):
"""Return value and self-destruct
This method returns tuple with response code password
and confirmation string.
"""
code = self._dialog.run()
password = self._entry.get_text()
confirmation = self._entry_confirm.get_text()
self._dialog.destroy()
return code, password, confirmation
class FileCreateDialog(CreateDialog):
def __init__(self, application):
CreateDialog.__init__(self, application)
self.set_title(_('Create empty file'))
self.set_label(_('Enter new file name:'))
# create option to open file in editor
self._checkbox_edit_after = gtk.CheckButton(_('Open file in editor'))
# create template list
vbox_templates = gtk.VBox(False, 0)
label_templates = gtk.Label(_('Template:'))
label_templates.set_alignment(0, 0.5)
self._templates = gtk.ListStore(str, str, str)
cell_icon = gtk.CellRendererPixbuf()
cell_name = gtk.CellRendererText()
# create template combobox
self._template_list = gtk.ComboBox(self._templates)
self._template_list.set_row_separator_func(self._row_is_separator)
self._template_list.connect('changed', self._template_changed)
self._template_list.pack_start(cell_icon, False)
self._template_list.pack_start(cell_name, True)
self._template_list.add_attribute(cell_icon, 'icon-name', 2)
self._template_list.add_attribute(cell_name, 'text', 0)
# pack interface
vbox_templates.pack_start(label_templates, False, False, 0)
vbox_templates.pack_start(self._template_list, False, False, 0)
self._container.pack_start(self._checkbox_edit_after, False, False, 0)
self._container.pack_start(vbox_templates, False, False, 0)
self._container.reorder_child(self._checkbox_edit_after, 1)
self._container.reorder_child(vbox_templates, 1)
# populate template list
self._populate_templates()
# set options to previously stored values
section = self._application.options.section('create_dialog')
self.set_mode(section.get('file_mode'))
self._checkbox_edit_after.set_active(section.get('edit_file'))
# show all widgets
self._dialog.show_all()
def _save_configuration(self, widget=None, data=None):
"""Save default configuration for create dialog"""
section = self._application.options.section('create_dialog')
section.set('file_mode', self._mode)
section.set('edit_file', self._checkbox_edit_after.get_active())
def _populate_tem |
"""
Contains tests for the collector base class.
"""
import pytest
from tests.base import TestBase
from statsite.metrics import Counter, KeyValue, Timer
from statsite.collector import Collector
class TestCollector(TestBase):
def test_stores_aggregator(self):
"""
Tests that collectors will store aggregator objects.
"""
agg = object()
assert agg is Collector(agg).aggregator
def test_parse_metrics_succeeds(self):
"""
Tests that parsing metrics succeeds and returns an array
of proper metric objects.
"""
message = "\n".join(["k:1|kv", "j:27|ms"])
results = Collector(None)._parse_metrics(message)
assert isinstance(results[0], KeyValue)
assert isinstance(results[1], Timer)
def test_parse_metrics_suppress_error(self):
"""
Tests that parsing metrics will suppress errors if requested.
"""
message = "k:1|nope"
results = Collector(None)._parse_metrics(message) |
assert 0 == len(results)
def test_parse_metrics_keeps_good_metrics(self, aggregator):
"""
Tests that parse_metrics will keep the good metrics in the face
of an error.
"""
message = "\n".join(["k::1|c",
"j:2|nope",
"k:2|ms"])
resul | ts = Collector(aggregator)._parse_metrics(message)
assert [Timer("k", 2)] == results
def test_parse_metrics_ignores_blank_lines(self, aggregator):
"""
Tests that parse_metrics will properly ignore blank lines.
"""
message = "\n".join(["", "k:2|ms"])
assert [Timer("k", 2)] == Collector(aggregator)._parse_metrics(message)
def test_add_metrics(self, aggregator):
"""
Tests that add_metrics successfully adds an array of metrics to
the configured aggregator.
"""
now = 17
metrics = [KeyValue("k", 1, now), Counter("j", 2)]
Collector(aggregator)._add_metrics(metrics)
assert metrics == aggregator.metrics
def test_set_aggregator(self, aggregator):
"""
Tests that setting an aggregator properly works.
"""
coll = Collector(aggregator)
new_agg = object()
assert aggregator is coll.aggregator
coll.set_aggregator(new_agg)
assert new_agg is coll.aggregator
|
from django.contrib.auth import get_user_model
User = get_user_model()
from rest_framework import serializers
from nodeshot.core.base.serializers import ModelValidationSerializer
from nodeshot.community.profiles.serializers import ProfileRelationSerializer
from .models import Comment, Vote, Rating, NodeRatingCount
__all__ = ['CommentSerializer',
'RatingSerializer',
'CommentRelationSerializer',
'VoteSerializer',
'ParticipationSerializer']
class AutoNodeMixin(object):
"""
automatically adds node to validated_data
the node info is taken from views that extend NodeRelationViewMixin
"""
def validate(self, data):
data['node'] = self.context['view'].node
return super(AutoNodeMixin, self).validate(data)
class CommentSerializer(AutoNodeMixin, ModelValidationSerializer):
""" Comment serializer """
node = serializers.ReadOnlyField(source='node.name')
username = serializers.ReadOnlyField(source='user.username')
class Meta:
model = Comment
fields = ('node', 'username', 'text', 'added')
read_only_fields = ('added',)
class CommentRelationSerializer(serializers.ModelSerializer):
""" display user info """
user = ProfileRelationSerializer()
class Meta:
model = Comment
fields = ('user', 'text', 'added',)
class RatingSerializer(AutoNodeMixin, ModelValidationSerializer):
""" Rating serializer """
node = serializers.ReadOnlyField(source='node.name')
username = serializers.ReadOnlyField(source='user.username')
class Meta:
model = Rating
fields = ('node', 'username', 'value',)
read_only_fields = ('added',)
class VoteSerializer(AutoNodeMixin, ModelValidationSerializer):
node = serializers.ReadOnlyField(source='node.name')
username = serializers.ReadOnlyField(source='user.username')
class Meta:
model = Vote
fields = ('node', 'username', 'vote',)
read_only_fields = ('added',)
class ParticipationSerializer(serializers.Mo | delSerializer):
class Meta:
model = NodeRatingCount
| fields = ('likes', 'dislikes', 'rating_count',
'rating_avg', 'comment_count')
|
__author__ = 'jbowman'
# https://pythonspot.com/inner-classes/
class Human:
def __init__(self, name):
| self.name = name
self.head = self.Head()
def addhead(self):
self.head2 = self.Head()
class Head:
def __init__(self):
self.brain = self.Brain()
def talk(self):
return 'talking...'
class Brain:
def think(self):
return 'thinking...'
if __name__ == '__main__': # | execute only if run as a script directly
joey = Human('Joey')
print(joey.name)
print(joey.head.talk())
print(joey.head.brain.think()) |
#! | /usr/bin/env python3.5
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "app.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.ar | gv)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Custom filters for use in openshift_aws
'''
from ansible import errors
class FilterModule(object):
''' Custom ansible filters for use by openshift_aws role'''
@staticmethod
def scale_groups_serial(scale_group_info, upgrade=False):
''' This function will determine what the deployment serial should be and return it
Search through the tags and find the deployment_serial tag. Once found,
determine if an increment is needed during an upgrade.
if upgrade is true then increment the serial and return it
else return the serial
'''
if scale_group_info == []:
return 1
scale_ | group_info = scale_group_info[0]
if not isinstance(scale_group_info, dict):
| raise errors.AnsibleFilterError("|filter plugin failed: Expected scale_group_info to be a dict")
serial = None
for tag in scale_group_info['tags']:
if tag['key'] == 'deployment_serial':
serial = int(tag['value'])
if upgrade:
serial += 1
break
else:
raise errors.AnsibleFilterError("|filter plugin failed: deployment_serial tag was not found")
return serial
@staticmethod
def scale_groups_match_capacity(scale_group_info):
''' This function will verify that the scale group instance count matches
the scale group desired capacity
'''
for scale_group in scale_group_info:
if scale_group['desired_capacity'] != len(scale_group['instances']):
return False
return True
@staticmethod
def build_instance_tags(clusterid):
''' This function will return a dictionary of the instance tags.
The main desire to have this inside of a filter_plugin is that we
need to build the following key.
{"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": "{{ openshift_aws_clusterid}}"}
'''
tags = {'clusterid': clusterid,
'kubernetes.io/cluster/{}'.format(clusterid): clusterid}
return tags
def filters(self):
''' returns a mapping of filters to methods '''
return {'build_instance_tags': self.build_instance_tags,
'scale_groups_match_capacity': self.scale_groups_match_capacity,
'scale_groups_serial': self.scale_groups_serial}
|
def get_attendance_records(file_path):
attendance_file = open(file_path,'r')
lines = attendance_file.readlines()
attendance_file.close()
header = lines[0]
attendance_records = lines[1:]
return attendance_records
def convert_attendance_record_to_bools(sessions):
sessions_bool = []
for session in sessions:
if session == 'Yes':
sessions_bool.append(1)
else:
sessions_bool.append(0)
return sessions_bool
def session_attendance(file_path):
number_of_sessions = 9
session_attendance = {u'Session_0':0, u'Session_1':0, u'Session_2':0, u'Session_3':0, u'Session_4':0, u'Session_5':0, u'Session_6':0, u'Session_7':0, u'Session_8':0}
attendee_consistency = {u'0_Sessions':0, u'1_Sessions':0, u'2_Sessions':0, u'3_Sessions':0, u'4_Sessions':0, u'5_Sessions':0, u'6_Sessions':0, u'7_Sessions':0, u'8_Sessions':0, u'9_Sessions':0}
attendance_records = get_attendance_records(file_path)
for record in attendance_records:
record = record.strip('\n').split(',') # convert record from a string to a list
sessions = convert_attendance_record_to_bools(record[2:])
number_of_sessions = len(sessions)
number_of_sessions_attended = str(sum(sessions))+'_Sessions'
# add record to attendee_consitency dictionary
attendee_consistency[number | _of_sessions_attended] += 1
# add record to session attendance dictionary
for i in range(number_of_sessions):
key = u'Session_'+ str(i)
session_attendance[key] += sessions[i]
return {
u"by_attendee" : attendee_consistency,
u"by_session" : session_attendance
}
# print session_attendance('attendance.csv')
import string
import collections
from operator import i | temgetter
IGNORE = {
'a', 'also', 'an', 'and', 'are', 'as', 'be', 'by', 'can', 'do', 'for', 'from',
'have', 'in', 'is', 'it', 'just', 'more', 'not', 'of', 'on', 'or', 'our',
'over', 'than', 'that', 'the', 'their', 'these', 'they', 'this', 'those',
'to', 'up', 'we', 'with'
}
def build_word_counter(file_path):
with open(file_path, 'r') as f:
speech = f.read()
chars_to_remove = list(string.punctuation) + ['\n'] + list(string.digits)
for char in chars_to_remove:
speech = speech.replace(char, '')
return collections.Counter(w.lower() for w in speech.split() if w not in IGNORE)
def common_words(file_path):
word_counter = build_word_counter(file_path)
return sorted(w.decode('utf-8') for w in word_counter if word_counter[w] > 10)
def most_used_words(file_path):
word_counter = build_word_counter(file_path)
word_counter_sorted = sorted(word_counter.most_common(20), key=itemgetter(1,0))
return [word.decode('utf-8') for word, _ in word_counter_sorted] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
import time
from item import Item, Items
from shinken.property import BoolProp, IntegerProp, StringProp, ListProp
from shinken.util import to_name_if_possible
from shinken.log import logger
class CheckModulation(Item):
id = 1 # zero is always special in database, so we do not take risk here
my_type = 'checkmodulation'
properties = Item.properties.copy()
properties.update({
'checkmodulation_name': StringProp(fill_brok=['full_status']),
'check_command': StringProp(fill_brok=['full_status']),
'check_period' : StringProp(brok_transformation=to_name_if_possible, fill_brok=['full_status']),
})
running_properties = Item.running_properties.copy()
_special_properties = ('check_period',)
macros = {}
# For debugging pur | pose only (nice name)
def get_name(self):
return self.checkmodulation_name
# Will look at if our check_period is ok, and give our check_command if we got it
def get_ch | eck_command(self, t_to_go):
if not self.check_period or self.check_period.is_time_valid(t_to_go):
return self.check_command
return None
# Should have all properties, or a void check_period
def is_correct(self):
state = True
cls = self.__class__
# Raised all previously saw errors like unknown commands or timeperiods
if self.configuration_errors != []:
state = False
for err in self.configuration_errors:
logger.error("[item::%s] %s" % (self.get_name(), err))
for prop, entry in cls.properties.items():
if prop not in cls._special_properties:
if not hasattr(self, prop) and entry.required:
logger.warning("[checkmodulation::%s] %s property not set" % (self.get_name(), prop))
state = False # Bad boy...
# Ok now we manage special cases...
# Service part
if not hasattr(self, 'check_command'):
logger.warning("[checkmodulation::%s] do not have any check_command defined" % self.get_name())
state = False
else:
if self.check_command is None:
logger.warning("[checkmodulation::%s] a check_command is missing" % self.get_name())
state = False
if not self.check_command.is_valid():
logger.warning("[checkmodulation::%s] a check_command is invalid" % self.get_name())
state = False
# Ok just put None as check_period, means 24x7
if not hasattr(self, 'check_period'):
self.check_period = None
return state
# In the scheduler we need to relink the commandCall with
# the real commands
def late_linkify_cw_by_commands(self, commands):
if self.check_command:
self.check_command.late_linkify_with_command(commands)
class CheckModulations(Items):
name_property = "checkmodulation_name"
inner_class = CheckModulation
def linkify(self, timeperiods, commands):
self.linkify_with_timeperiods(timeperiods, 'check_period')
self.linkify_one_command_with_commands(commands, 'check_command')
def new_inner_member(self, name=None, params={}):
if name is None:
name = CheckModulation.id
params['checkmodulation_name'] = name
#print "Asking a new inner checkmodulation from name %s with params %s" % (name, params)
cw = CheckModulation(params)
self.items[cw.id] = cw
|
# Copyright (C) 2012-2015, Alphan Ulusoy (alphan@bu.edu)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published b | y
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOU | T ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
##############################################################################
# Copyright (c) 2013-2018, | Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/s | pack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import pytest
import llnl.util.filesystem
import spack.cmd.compiler
import spack.compilers
import spack.spec
import spack.util.pattern
from spack.version import Version
test_version = '4.5-spacktest'
@pytest.fixture()
def mock_compiler_dir(tmpdir):
"""Return a directory containing a fake, but detectable compiler."""
tmpdir.ensure('bin', dir=True)
bin_dir = tmpdir.join('bin')
gcc_path = bin_dir.join('gcc')
gxx_path = bin_dir.join('g++')
gfortran_path = bin_dir.join('gfortran')
gcc_path.write("""\
#!/bin/sh
for arg in "$@"; do
if [ "$arg" = -dumpversion ]; then
echo '%s'
fi
done
""" % test_version)
# Create some mock compilers in the temporary directory
llnl.util.filesystem.set_executable(str(gcc_path))
gcc_path.copy(gxx_path, mode=True)
gcc_path.copy(gfortran_path, mode=True)
return str(tmpdir)
@pytest.mark.usefixtures('config', 'mock_packages')
class TestCompilerCommand(object):
def test_compiler_remove(self):
args = spack.util.pattern.Bunch(
all=True, compiler_spec='gcc@4.5.0', add_paths=[], scope=None
)
spack.cmd.compiler.compiler_remove(args)
compilers = spack.compilers.all_compiler_specs()
assert spack.spec.CompilerSpec("gcc@4.5.0") not in compilers
def test_compiler_add(self, mock_compiler_dir):
# Compilers available by default.
old_compilers = set(spack.compilers.all_compiler_specs())
args = spack.util.pattern.Bunch(
all=None,
compiler_spec=None,
add_paths=[mock_compiler_dir],
scope=None
)
spack.cmd.compiler.compiler_find(args)
# Ensure new compiler is in there
new_compilers = set(spack.compilers.all_compiler_specs())
new_compiler = new_compilers - old_compilers
assert any(c.version == Version(test_version) for c in new_compiler)
|
# Copyright (C) 2013 Claudio "nex" Guarnieri (@botherder)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. | If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class NetworkIRC(Signature):
name = "net | work_irc"
description = "Connects to an IRC server, possibly part of a botnet"
severity = 3
categories = ["irc"]
authors = ["nex"]
minimum = "0.6"
def run(self):
if "irc" in self.results["network"]:
if len(self.results["network"]["irc"]) > 0:
return True
return False
|
from . import utils
import bpy
from bpy.types import NodeTree, Node, NodeSocket
# Implementation of custom nodes from Python
# Derived from the NodeTree base type, similar to Menu, O | perator, Panel, etc.
class GameGraph(NodeTree):
# Description string
'''A custom node tree type that will show up in the node editor header'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'GameGraphType'
|
# Label for nice name display
bl_label = 'Game Graph'
# Icon identifier
bl_icon = 'NODETREE'
# Custom socket types
class ReadySocket(NodeSocket):
def __new__(self):
self.link_limit = 500
print("set link limit!")
# Description string
'''Custom node socket type'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'ReadySocket'
# Label for nice name display
bl_label = 'Then Socket'
link_limit = 500;
# Optional function for drawing the socket input value
def draw(self, context, layout, node, text):
if self.is_output or self.is_linked:
layout.label(text)
else:
layout.label(text)
pass#layout.prop(self, "myEnumProperty", text=text)
# Socket color
def draw_color(self, context, node):
return (1.0, 0.4, 0.216, 0.5)
# Mix-in class for all custom nodes in this tree type.
# Defines a poll function to enable instantiation.
class GameGraphNode:
@classmethod
def poll(cls, ntree):
print("type", ntree.bl_idname)
return ntree.bl_idname == 'GameGraphType'
# Derived from the Node base type.
class MyCustomNode(Node, GameGraphNode):
# === Basics ===
# Description string
'''A custom node'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'CustomNodeType'
# Label for nice name display
bl_label = 'Custom Node'
# Icon identifier
bl_icon = 'SOUND'
# === Custom Properties ===
# These work just like custom properties in ID data blocks
# Extensive information can be found under
# http://wiki.blender.org/index.php/Doc:2.6/Manual/Extensions/Python/Properties
myStringProperty = bpy.props.StringProperty()
myFloatProperty = bpy.props.FloatProperty(default=3.1415926)
# === Optional Functions ===
# Initialization function, called when a new node is created.
# This is the most common place to create the sockets for a node, as shown below.
# NOTE: this is not the same as the standard __init__ function in Python, which is
# a purely internal Python method and unknown to the node system!
def init(self, context):
self.inputs.new('CustomSocketType', "Hello")
self.inputs.new('NodeSocketFloat', "World")
self.inputs.new('NodeSocketVector', "!")
self.outputs.new('NodeSocketColor', "How")
self.outputs.new('NodeSocketColor', "are")
self.outputs.new('NodeSocketFloat', "you")
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
layout.label("Node settings")
layout.prop(self, "myFloatProperty")
# Detail buttons in the sidebar.
# If this function is not defined, the draw_buttons function is used instead
def draw_buttons_ext(self, context, layout):
layout.prop(self, "myFloatProperty")
# myStringProperty button will only be visible in the sidebar
layout.prop(self, "myStringProperty")
# Optional: custom label
# Explicit user label overrides this, but here we can define a label dynamically
def draw_label(self):
return "I am a custom node"
def register():
bpy.utils.register_class(MyCustomTree)
bpy.utils.register_class(MyCustomSocket)
bpy.utils.register_class(MyCustomNode)
nodeitems_utils.register_node_categories("CUSTOM_NODES", node_categories)
def unregister():
nodeitems_utils.unregister_node_categories("CUSTOM_NODES")
bpy.utils.unregister_class(MyCustomTree)
bpy.utils.unregister_class(MyCustomSocket)
bpy.utils.unregister_class(MyCustomNode)
bpy_classes = utils.Registrar([
GameGraph,
ReadySocket
])
|
# __init__.py - collection of Luxembourgian numbers
# coding: utf-8
#
# Copyright (C) 2012 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General | Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, | MA
# 02110-1301 USA
"""Collection of Luxembourgian numbers."""
# provide vat as an alias
from stdnum.lu import tva as vat # noqa: F401
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2020 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import pathlib
from typing import Any, Dict, List, Type
import tinydb
import yaml
import snapcraft
from . import errors, migration
logger = logging.getLogger(__name__)
class _YAMLStorage(tinydb.Storage):
"""Provide YAML-backed storage for TinyDB."""
def __init__(self, path: str):
self.path = pathlib.Path(path)
logger.debug(f"_YAMLStorage init {self.path}")
def read(self) -> Dict[str, Any]:
"""Read database from file."""
logger.debug(f"_YAMLStorage read: {self.path}")
try:
with self.path.open() as fd:
db_data = yaml.safe_load(fd)
except FileNotFoundError:
return dict()
if not isinstance(db_data, dict) or any(
[not isinstance(k, str) for k in db_data.keys()]
):
raise RuntimeError(
f"Invalid datastore contents for {str(self.path)}: {db_data!r}"
)
return db_data
def write(self, data) -> None:
"""Write database (data) to file."""
logger.debug(f"_YAMLStorage write: {self.path} data={data!r}")
self.path.write_text(yaml.dump(data))
def close(self):
"""Nothing to do since we do not keep <path> open."""
logger.debug(f"_YAMLStorage close: {self.path}")
class _YAMLStorageReadOnly(_YAMLStorage):
def write(self, data) -> None:
"""Ignore any writes in read-only mode."""
class Datastore:
"""Datastore class, providing context manager for TinyDB.
Manages migrations and storage requirements. If migrations
do not indicate support for current datastore version,
SnapcraftDatastoreVersionUnsupported will be raised. In that
event, some basic fallback mode can be utilized by re-opening
datastore in read-only mode."""
def __init__(
self,
*,
path: pathlib.Path,
migrations: List[Type[migration.Migration]],
read_only: bool = False,
snapcraft_version: str = snapcraft.__version__,
) -> None | :
self.path = path
self._snapcraft_version = snapcraft_version
if read_only:
storage_class = _YAMLStorageReadOnly
else:
storage_clas | s = _YAMLStorage
self.db = tinydb.TinyDB(str(path), storage=storage_class)
logger.debug(f"Datastore init: {self.path} read_only={read_only}")
# Force the datastore to be read by making a query, otherwise it is
# only read on the first access.
_ = self.db.tables()
# Nothing left to do if opening in read-only mode.
if read_only:
return
current_version: int = 0
supported_version: int = 0
for migration_class in migrations:
current_version = migration_class(
db=self.db, snapcraft_version=self._snapcraft_version
).apply()
supported_version = migration_class.SCHEMA_VERSION
if current_version > supported_version:
raise errors.SnapcraftDatastoreVersionUnsupported(
path=self.path,
current_version=current_version,
supported_version=supported_version,
)
def __enter__(self) -> tinydb.TinyDB:
return self.db
def __exit__(self, exc_value, exc_type, exc_traceback) -> None:
self.close()
def close(self) -> None:
"""Close database."""
self.db.close()
logger.debug(f"Datastore close: {self.path}")
|
from django.db import models, DEFAULT_DB_ALIAS, connection
from django.contrib.auth.models import User
from django.conf import settings
class Animal(models.Model):
name = models.CharField(max_length=150)
latin_name = models.CharField(max_length=150)
count = models.IntegerField()
weight = models.FloatField()
# use a non-default name for the default manager
specimens = models.Manager()
def __unicode__(self):
return self.name
class Plant(models.Model):
name = models.CharField(max_length=150)
class Meta:
# For testing when upper case letter in app name; regression for #4057
db_table = "Fixtures_regress_plant"
class Stuff(models.Model):
name = models.CharField(max_length=20, null=True)
owner = models.ForeignKey(User, null=True)
def __unicode__(self):
return unicode(self.name) + u' is owned by ' + unicode(self.owner)
class Absolute(models.Model):
name = models.CharField(max_length=40)
load_count = 0
def __init__(self, *args, **kwargs):
super(Absolute, self).__init__(*args, **kwargs)
Absolute.load_count += 1
class Parent(models.Model):
name = models.CharField(max_length=10)
class Meta:
ordering = ('id',)
class Child(Parent):
data = models.CharField(max_length=10)
# Models to regression test #7572
class Channel(models.Model):
name = models.CharField(max_length=255)
c | lass Article(models.Model):
title = models.CharField(max_length=255)
channels = models.ManyToManyField(Channel)
class Meta:
| ordering = ('id',)
# Models to regression test #11428
class Widget(models.Model):
name = models.CharField(max_length=255)
class Meta:
ordering = ('name',)
def __unicode__(self):
return self.name
class WidgetProxy(Widget):
class Meta:
proxy = True
# Check for forward references in FKs and M2Ms with natural keys
class TestManager(models.Manager):
def get_by_natural_key(self, key):
return self.get(name=key)
class Store(models.Model):
objects = TestManager()
name = models.CharField(max_length=255)
class Meta:
ordering = ('name',)
def __unicode__(self):
return self.name
def natural_key(self):
return (self.name,)
class Person(models.Model):
objects = TestManager()
name = models.CharField(max_length=255)
class Meta:
ordering = ('name',)
def __unicode__(self):
return self.name
# Person doesn't actually have a dependency on store, but we need to define
# one to test the behaviour of the dependency resolution algorithm.
def natural_key(self):
return (self.name,)
natural_key.dependencies = ['fixtures_regress.store']
class Book(models.Model):
name = models.CharField(max_length=255)
author = models.ForeignKey(Person)
stores = models.ManyToManyField(Store)
class Meta:
ordering = ('name',)
def __unicode__(self):
return u'%s by %s (available at %s)' % (
self.name,
self.author.name,
', '.join(s.name for s in self.stores.all())
)
class NKManager(models.Manager):
def get_by_natural_key(self, data):
return self.get(data=data)
class NKChild(Parent):
data = models.CharField(max_length=10, unique=True)
objects = NKManager()
def natural_key(self):
return self.data
def __unicode__(self):
return u'NKChild %s:%s' % (self.name, self.data)
class RefToNKChild(models.Model):
text = models.CharField(max_length=10)
nk_fk = models.ForeignKey(NKChild, related_name='ref_fks')
nk_m2m = models.ManyToManyField(NKChild, related_name='ref_m2ms')
def __unicode__(self):
return u'%s: Reference to %s [%s]' % (
self.text,
self.nk_fk,
', '.join(str(o) for o in self.nk_m2m.all())
)
# ome models with pathological circular dependencies
class Circle1(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle2']
class Circle2(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle1']
class Circle3(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle3']
class Circle4(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle5']
class Circle5(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle6']
class Circle6(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle4']
class ExternalDependency(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.book']
# Model for regression test of #11101
class Thingy(models.Model):
name = models.CharField(max_length=255)
|
from django.apps im | port AppConfig
class AuditorConfig(AppConfig):
name = 'audit | or'
|
def extractFeelinthedarkWordpressCom(item):
'''
Parser for 'feelinthedark.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return bu | ildReleaseMessageWithType(item, | name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
pons.warlock import BloodFury
from SDWLE.constants import CHARACTER_CLASS, CARD_RARITY, MINION_TYPE
from SDWLE.game_objects import Minion
from SDWLE.tags.action import Summon, Kill, Damage, Discard, DestroyManaCrystal, Give, Equip, \
Remove, Heal, ReplaceHeroWithMinion
from SDWLE.tags.base import Effect, Aura, Deathrattle, Battlecry, Buff, ActionTag
from SDWLE.tags.card_source import HandSource
from SDWLE.tags.condition import IsType, MinionCountIs, Not, OwnersTurn, IsHero, And, Adjacent, IsMinion
from SDWLE.tags.event import TurnEnded, CharacterDamaged, DidDamage, Damaged
from SDWLE.tags.selector import MinionSelector, PlayerSelector, \
SelfSelector, BothPlayer, HeroSelector, CharacterSelector, RandomPicker, Attribute, EventValue, CardSelector, \
FriendlyPlayer
from SDWLE.tags.status import ChangeHealth, ManaChange, ChangeAttack, Immune
class FlameImp(MinionCard):
def __init__(self):
super().__init__("Flame Imp", 1, CHARACTER_CLASS.WARLOCK, CARD_RARITY.COMMON, minion_type=MINION_TYPE.DEMON,
battlecry=Battlecry(Damage(3), HeroSelector()))
def create_minion(self, player):
return Minion(3, 2)
class PitLord(MinionCard):
def __init__(self):
super().__init__("Pit Lord", 4, CHARACTER_CLASS.WARLOCK, CARD_RARITY.EPIC, minion_type=MINION_TYPE.DEMON,
battlecry=Battlecry(Damage(5), HeroSelector()))
def create_minion(self, player):
return Minion(5, 6)
class Voidwalker(MinionCard):
def __init__(self):
super().__init__("Voidwalker", 1, CHARACTER_CLASS.WARLOCK, CARD_RARITY.FREE, minion_type=MINION_TYPE.DEMON)
def create_minion(self, player):
return Minion(1, 3, taunt=True)
class DreadInfernal(MinionCard):
def __init__(self):
super().__init__("Dread Infernal", 6, CHARACTER_CLASS.WARLOCK, CARD_RARITY.COMMON,
minion_type=MINION_TYPE.DEMON,
battlecry=Battlecry(Damage(1), CharacterSelector(players=BothPlayer())))
def create_minion(self, player):
return Minion(6, 6)
class Felguard(MinionCard):
def __init__(self):
super().__init__("Felguard", 3, CHARACTER_CLASS.WARLOCK, CARD_RARITY.RARE, minion_type=MINION_TYPE.DEMON,
battlecry=Battlecry(DestroyManaCrystal(), PlayerSelector()))
def create_minion(self, player):
return Minion(3, 5, taunt=True)
class Doomguard(MinionCard):
def __init__(self):
super().__init__("Doomguard", 5, CHARACTER_CLASS.WARLOCK, CARD_RARITY.RARE, minion_type=MINION_TYPE.DEMON,
battlecry=Battlecry(Discard(amount=2), PlayerSelector()))
def create_minion(self, player):
return Minion(5, 7, charge=True)
class Succubus(MinionCard):
def __init__(self):
super().__init__("Succubus", 2, CHARACTER_CLASS.WARLOCK, CARD_RARITY.FREE, minion_type=MINION_TYPE.DEMON,
| battlecry=Battlecry(Discard(), PlayerSelector()))
def create_minion(self, p | layer):
return Minion(4, 3)
class SummoningPortal(MinionCard):
def __init__(self):
super().__init__("Summoning Portal", 4, CHARACTER_CLASS.WARLOCK, CARD_RARITY.COMMON)
def create_minion(self, player):
return Minion(0, 4, auras=[Aura(ManaChange(-2, 1, minimum=1), CardSelector(condition=IsMinion()))])
class BloodImp(MinionCard):
def __init__(self):
super().__init__("Blood Imp", 1, CHARACTER_CLASS.WARLOCK, CARD_RARITY.COMMON, minion_type=MINION_TYPE.DEMON)
def create_minion(self, player):
return Minion(0, 1, stealth=True,
effects=[Effect(TurnEnded(), ActionTag(Give(ChangeHealth(1)),
MinionSelector(picker=RandomPicker())))])
class LordJaraxxus(MinionCard):
def __init__(self):
super().__init__("Lord Jaraxxus", 9, CHARACTER_CLASS.WARLOCK, CARD_RARITY.LEGENDARY,
minion_type=MINION_TYPE.DEMON,
battlecry=(Battlecry(ReplaceHeroWithMinion(Jaraxxus()), HeroSelector()),
Battlecry(Remove(), SelfSelector()),
Battlecry(Equip(BloodFury()), PlayerSelector())))
def create_minion(self, player):
return Minion(3, 15)
class Infernal(MinionCard):
def __init__(self):
super().__init__("Infernal", 6, CHARACTER_CLASS.WARLOCK, CARD_RARITY.COMMON, False,
minion_type=MINION_TYPE.DEMON)
def create_minion(self, player):
return Minion(6, 6)
class VoidTerror(MinionCard):
def __init__(self):
super().__init__("Void Terror", 3, CHARACTER_CLASS.WARLOCK, CARD_RARITY.RARE, minion_type=MINION_TYPE.DEMON,
battlecry=(Battlecry(
Give([Buff(ChangeHealth(Attribute("health", MinionSelector(Adjacent())))),
Buff(ChangeAttack(Attribute("attack", MinionSelector(Adjacent()))))]),
SelfSelector()), Battlecry(Kill(), MinionSelector(Adjacent()))))
def create_minion(self, player):
return Minion(3, 3)
class Voidcaller(MinionCard):
def __init__(self):
super().__init__("Voidcaller", 4, CHARACTER_CLASS.WARLOCK, CARD_RARITY.COMMON, minion_type=MINION_TYPE.DEMON)
def create_minion(self, player):
return Minion(3, 4, deathrattle=Deathrattle(Summon(HandSource(FriendlyPlayer(), [IsType(MINION_TYPE.DEMON)])),
PlayerSelector()))
class AnimaGolem(MinionCard):
def __init__(self):
super().__init__("Anima Golem", 6, CHARACTER_CLASS.WARLOCK, CARD_RARITY.EPIC, minion_type=MINION_TYPE.MECH)
def create_minion(self, player):
return Minion(9, 9, effects=[Effect(TurnEnded(MinionCountIs(1), BothPlayer()),
ActionTag(Kill(), SelfSelector()))])
class Imp(MinionCard):
def __init__(self):
super().__init__("Imp", 1, CHARACTER_CLASS.WARLOCK, CARD_RARITY.COMMON, False, minion_type=MINION_TYPE.DEMON,
ref_name="Imp (warlock)")
def create_minion(self, player):
return Minion(1, 1)
class WorthlessImp(MinionCard):
def __init__(self):
super().__init__("Worthless Imp", 1, CHARACTER_CLASS.WARLOCK, CARD_RARITY.COMMON, False, MINION_TYPE.DEMON)
def create_minion(self, p):
return Minion(1, 1)
class FelCannon(MinionCard):
def __init__(self):
super().__init__("Fel Cannon", 4, CHARACTER_CLASS.WARLOCK, CARD_RARITY.RARE, minion_type=MINION_TYPE.MECH)
def create_minion(self, player):
return Minion(3, 5, effects=[Effect(TurnEnded(), ActionTag(Damage(2),
MinionSelector(Not(IsType(MINION_TYPE.MECH, True)),
BothPlayer(), RandomPicker())))])
class MalGanis(MinionCard):
def __init__(self):
super().__init__("Mal'Ganis", 9, CHARACTER_CLASS.WARLOCK, CARD_RARITY.LEGENDARY, minion_type=MINION_TYPE.DEMON)
def create_minion(self, player):
return Minion(9, 7, auras=[Aura(ChangeHealth(2), MinionSelector(IsType(MINION_TYPE.DEMON))),
Aura(ChangeAttack(2), MinionSelector(IsType(MINION_TYPE.DEMON))),
Aura(Immune(), HeroSelector())])
class FloatingWatcher(MinionCard):
def __init__(self):
super().__init__("Floating Watcher", 5, CHARACTER_CLASS.WARLOCK, CARD_RARITY.COMMON,
minion_type=MINION_TYPE.DEMON)
def create_minion(self, player):
return Minion(4, 4, effects=[Effect(CharacterDamaged(And(IsHero(), OwnersTurn())),
ActionTag(Give([Buff(ChangeAttack(2)), Buff(ChangeHealth(2))]),
SelfSelector()))])
class MistressOfPain(MinionCard):
def __init__(self):
super().__init__("Mistress of Pain", 2, CHARACTER_CLASS.WARLOCK, CARD_RARITY.RARE,
|
et_driver):
failed_amphora = data_models.Amphora(
id=uuidutils.generate_uuid(), role=constants.ROLE_MASTER,
load_balancer_id=uuidutils.generate_uuid())
amp_flow = self.AmpFlow.get_failover_amphora_flow(
failed_amphora, 2)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.AVAILABILITY_ZONE, amp_flow.requires)
self.assertIn(constants.BUILD_TYPE_PRIORITY, amp_flow.requires)
self.assertIn(constants.FLAVOR, amp_flow.requires)
self.assertIn(constants.LOADBALANCER, amp_flow.requires)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.VIP, amp_flow.requires)
self.assertIn(constants.ADDED_PORTS, amp_flow.provides)
self.assertIn(constants.AMP_VRRP_INT, amp_flow.provides)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.AMPHORAE, amp_flow.provides)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides)
self.assertIn(constants.BASE_PORT, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.DELTA, amp_flow.provides)
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertIn(constants.VIP_SG_ID, amp_flow.provides)
self.assertEqual(7, len(amp_flow.requires))
self.assertEqual(13, len(amp_flow.provides))
def test_get_failover_flow_standalone(self, mock_get_net_driver):
failed_amphora = data_models.Amphora(
id=uuidutils.generate_uuid(), role=constants.ROLE_STANDALONE,
load_balancer_id=uuidutils.generate_uuid(), vrrp_ip='2001:3b8::32')
amp_flow = self.AmpFlow.get_failover_amphora_flow(
failed_amphora, 1)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.AVAILABILITY_ZONE, amp_flow.requires)
self.assertIn(constants.BUILD_TYPE_PRIORITY, amp_flow.requires)
self.assertIn(constants.FLAVOR, amp_flow.requires)
self.assertIn(constants.LOADBALANCER, amp_flow.requires)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.VIP, amp_flow.requires)
self.assertIn(constants.ADDED_PORTS, amp_flow.provides)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.AMPHORAE, amp_flow.provides)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides)
self.assertIn(constants.BASE_PORT, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.DELTA, amp_flow.provides)
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertIn(constants.VIP_SG_ID, amp_flow.provides)
self.assertEqual(7, len(amp_flow.requires))
self.assertEqual(12, len(amp_flow.provides))
def test_get_failover_flow_bogus_role(self, mock_get_net_driver):
failed_amphora = data_models.Amphora(id=uuidutils.generate_uuid(),
role='bogus')
amp_flow = self.AmpFlow.get_failover_amphora_flow(
failed_amphora, 1)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.VIP_SG_ID, amp_flow.provides)
print(amp_flow.requires)
self.assertEqual(1, len(amp_flow.requires))
self.assertEqual(1, len(amp_flow.provides))
def test_cert_rotate_amphora_flow(self, mock_get_net_driver):
self.AmpFlow = amphora_flows.AmphoraFlows()
amp_rotate_flow = self.AmpFlow.cert_rotate_amphora_flow()
self.assertIsInstance(amp_rotate_flow, flow.Flow)
self.assertIn(constants.SERVER_PEM, amp_rotate_flow.provides)
self.assertIn(constants.AMPHORA, amp_rotate_flow.requires)
self.assertEqual(1, len(amp_rotate_flow.provides))
self.assertEqual(2, len(amp_rotate_flow.requires))
def test_get_vrrp_subflow(self, mock_get_net_driver):
vrrp_subflow = self.AmpFlow.get_vrrp_subflow('123')
self.assertIsInstance(vrrp_subflow, flow.Flow)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, vrrp_subflow.provides)
self.assertIn(constants.AMP_VRRP_INT, vrrp_subflow.provides)
self.assertIn(constants.LOADBALANCER_ID, vrrp_subflow.requires)
self.assertIn(constants.AMPHORAE, vrrp_subflow.requires)
self.assertEqual(2, len(vrrp_subflow.provides))
self.assertEqual(2, len(vrrp_subflow.requires))
def test_get_vrrp_subflow_dont_create_vrrp_group(
self, mock_get_net_driver):
vrrp_subflow = self.AmpFlow.get_vrrp_subflow('123',
create_vrrp_group=False)
self.assertIsInstance(vrrp_subflow, flow.Flow)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, vrrp_subflow.provides)
self.assertIn(constants.AMP_VRRP_INT, vrrp_subflow.provides)
self.assertIn(constants.LOADBALANCER_ID, vrrp_subflow.requires)
self.assertIn(constants.AMPHORAE, vrrp_subflow.requires)
self.assertEqual(2, len(vrrp_subflow.provides))
self.assertEqual(2, len(vrrp_subflow.requires))
def | test_get_post_map_lb_subflow(self, mock_get_net_driver):
self.AmpFlow = amphora_flows.AmphoraFlows()
amp_flow = self.AmpFlow._get_post_map_lb_subflow(
'SOMEPREFIX', constants.ROLE_MASTER)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.FLAVOR, amp_flow.requires)
self.assertIn(constants.AMPHORA_ID, | amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertEqual(1, len(amp_flow.provides))
self.assertEqual(2, len(amp_flow.requires))
amp_flow = self.AmpFlow._get_post_map_lb_subflow(
'SOMEPREFIX', constants.ROLE_BACKUP)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.FLAVOR, amp_flow.requires)
self.assertIn(constants.AMPHORA_ID, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertEqual(1, len(amp_flow.provides))
self.assertEqual(2, len(amp_flow.requires))
amp_flow = self.AmpFlow._get_post_map_lb_subflow(
'SOMEPREFIX', constants.ROLE_STANDALONE)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.FLAVOR, amp_flow.requires)
self.assertIn(constants.AMPHORA_ID, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertEqual(1, len(amp_flow.provides))
self.assertEqual(2, len(amp_flow.requires))
amp_flow = self.AmpFlow._get_post_map_lb_subflow(
'SOMEPREFIX', 'BOGUS_ROLE')
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.FLAVOR, amp_flow.requires)
self.assertIn(constants.AMPHORA_ID, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertEqual(1, len(amp_flow.provides))
self.assertEqual(2, len(amp_flow.requires))
def test_update_amphora_config_flow(self, mock_get_net_driver):
amp_flow = self.AmpFlow.update_amphora_config_flow()
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.AMPHORA, amp_flow.requires)
self.assertIn(constants.FLAVOR, amp_flow.requires)
self.assertEqual(2, len(amp_flow.requires))
self.assertEqual(0, len(amp_flow.provides))
def test_get_amphora_for_lb_failover_flow_single(self,
mock_get_net_driver):
FAILED_PORT_ID |
* stride )
filt = filt.reshape( fh*fw )
newIm = numpy.zeros ( (height * stride), numpy.int )
code = """
int sum=0, pos;
int ys=0, fys=0;
for (int y=0; y < (height-(fh/2)); y++) {
for (int x=0; x < (stride-(fw/2)); x++) {
fys=sum=0;
pos=ys+x;
int th = ((height-y) < fh ) ? height-y : fh;
int tw = ((stride-x) < fw ) ? stride-x : fw;
for (int fy=0; fy < th; fy++) {
for (int fx=0; fx < tw; fx++) {
sum+=im[pos+fx]*filt[fys+fx];
}
fys+=fw;
pos+=stride;
}
newIm[ys+x] = sum;
}
ys+=stride;
}
"""
scipy.weave.inline(code,['height','stride','fh','fw','im','filt','newIm'])
if reshape:
return newIm.reshape(height,stride )
else:
return newIm
class barImage (object):
def __init__ ( self, im ):
self.im = numpy.array ( im.getdata() )
self.stride, self.height = im.size
self.im = self.im.reshape(self.height,self.stride)
# Note: im is indexed as [y][x] not...
def printImg( self, l=[], offset=0):
l = [ (i[1], i[2]) for i in l ]
print l
for y in range( 0, self.height-1):
output = []
for x in range( 5+offset, self.stride-1):
if x > 115+offset:
continue
i = self.im[y][x]
if (x,y) in l:
output.append("B")
elif i < 20:
output.append(".")
elif i < 64:
output.append("+")
elif i < 128:
output.append("*")
elif i < 196:
output.append("x")
else:
output.append("X")
print "%03d" % y, "".join(output)
print " 56789 123456789 123456789 123456789 123456789 123456789 123456789 123456789 123456789 123456789"
def applyFilter ( self, f, reshape=True ):
value = 0
filt = getattr( self, f, False)
if type(filt) == type(False):
filt = numpy.array( getattr(filter, f, False), dtype=numpy.int )
setattr( self, f, filt )
if type(filt) == type(False):
raise ValueError("Error: filter %s was not found in filter.py" % f)
return convolve( self.im, filt, reshape )
def findBarcode( self ):
results = self.applyFilter("scaledFilter", reshape=False)
list = [ (x[1], int(x[0] % self.stride), int(x[0] / self.stride)) for x in enumerate(results) if x[1] > 1000 ]
list.sort(reverse=True)
return list[0:20]
def unAlias(s):
"Remove dithering. "
#s.im= ss.convolve2d( s.im, unAliasFilter, mode="same" )
s.im=convolve( s.im, unAliasFilter, reshape=True )
s.im=numpy.piecewise(s.im, [ s.im > 1000 ], [255, 0])
return
""" Convolve operator does the following:
for y in range(1, s.height-1):
for x in range(1, s.stride-1):
if s.im[y][x-1] == s.im[y][x+1] == s.im[y+1][x] == s.im[y-1][x]:
s.im[y][x] = s.im[y][x+1]
return
"""
def bw( self, whitePoint=64):
self.im=numpy.piecewise(self.im, [self.im < whitePoint, self.im >= whitePoint], [255, 0])
#self.im=self.vApplyBW( self.im, whitePoint )
def virtualLine(self, x1, y1, x2, y2, ox=0, oy=0):
totalLength = math.sqrt(math.pow(x2-x1,2) + math.pow(y2-y1,2))
if totalLength < 300:
return []
if x1 < x2:
sx,sy,ex,ey=(x1,y1,x2,y2)
else:
sx,sy,ex,ey=(x2,y2,x1,y1)
xgain = float(ex-sx)/totalLength
ygain = float(ey-sy)/totalLength
if ex - sx < 150:
# Skip vertical codes, save them for the next run.
return []
if sx < 1 or (ex+ox) >= self.stride or sx > self.stride:
return []
if not (1< sy <self.height) or not (1< sy+ygain*totalLength <self.height):
return []
#slope = float(h2-h1)/(w2-w1)
newLine = numpy.zeros( shape=(totalLength), dtype=int )
code = """
float x=sx, y=sy;
for ( int i=1; i < int(totalLength); i++ ) {
int top = stride*int(y) + int(x),
bot = stride*int(y+1) + int(x);
float xr = x-int(x),
xl = 1-xr,
yt = y-int(y),
yb = 1-yt;
newLine[i]= im[top]*xr*yt +
im[top-1]*xl*yt +
im[bot]*xr*yb +
im[bot-1]*xl*yb;
x+=xgain;
y+=ygain;
}
"""
stride, im = self.stride, self.im
scipy.weave.inline(code,['im', 'stride', \
'newLine', 'totalLength', 'ygain', 'xgain', 'sx', 'sy'])
if DEBUG:
log.debug( "".join(
[ chr( 0x2e + int(x/6.07142857142857142857) ) for x in list(newLine) ] ) )
return newLine
def checkLineCharacteristics( self, line ):
whiteCount= blackCount= 0
if 300 < len(line) < 475:
for i in line:
if int(i) < 128:
whiteCount+=1
else:
blackCount+=1
if whiteCount >= 18:
return False
if blackCount > 1:
whiteCount=0
blackCount=0
else:
return False
return True
def getValidPoint ( self, point, possible ):
for endpoint in possible:
#print point, endpoint
found = True
for i in range ( 8, 50, 10 ):
if not found:
continue
#print point, endpoint, i
line = self.virtualLine(point[0]+2, point[1]+i, endpoint[0], endpoint[1]+i)
if | not self.checkLineCharacteristics(line):
found = False
#print "False"
#print "True"
if found:
return endpoint
return False
def getValidPair ( self, l, r ):
"""Returns the first pair that is a barcode and is locat | ed at the top
edges of a barcode. """
if not l or not r:
return False
l.sort( key=lambda x: x[1] )
r.sort( key=lambda x: x[1] )
if l[0][1] > r[0][1]:
r.sort( key=lambda x: x[0], reverse=True )
res = self.getValidPoint( l[0], r )
if not res:
return self.getValidPair( l[1:], r)
return l[0], res
else:
l.sort( key=lambda x: x[0], reverse=False )
res = self.getValidPoint( r[0], l )
if not res:
return self.getValidPair( l, r[1:] )
return res, r[0]
def removeNeighbors ( self, l, rev ):
l.sort( key= lambda x: x[0], reverse=rev )
restart = False
sizeOfArray = len(l)-1
for i in range (1, sizeOfArray):
for j in range(i, sizeOfArray):
if abs( l[i-1][1] - l[j][1] ) < 5:
restart = True
l[j] = False
if restart==True:
return self.removeNeighbors ([ x for x in l if x], rev)
return l
def getCode ( self, barcode ):
"""
Return a single code from a code 128 barcode.
"""
code=[]
start = False
trend = 1
for pos, c in enumerate(barcode):
if (pos+1) >= len(barcode):
continue
if not start:
if c > int(10*250): # Ignore leading white space
start=True
level = barcode[pos+1]
code.append(pos)
continue
if abs(level - c) > 1250 and abs(level-barcode[pos+1]) > 1250:
if (trend<0 and (level-c)>0) or (trend>0 and (level-c)<0):
# Trend is in the same direction we are going, ignore.
continue
code.append(pos)
if trend > 0:
trend=-1
else:
trend=1
level = c
if trend > 0:
level = max(c, level)
else:
level = min(c, level)
if len(code) >= 7:
return code, barcode[pos:]
return False
def applyHeuristics ( self, barcode=[5,] ):
"""
Try to determine the numerical values of barcode image.
@barcode: list to prepend to output. (defaults to [5,])
@return: barcode weights (i.e. 211214... prepended with pre)
"""
rotated = numpy.rot90(self.im, 3)
values = [ int(sum( list(line)[:30] )) for line in rotated ]
characters=[]
codes=True
while (codes):
codes = self.getCode(values)
if codes:
if DEBUG:
print codes[0][0], codes[0][-1]
print "".join([ "%c" % int(v/255+0x5f) for v in values[codes[0][0]:codes[0][-1]] ])
print codes[0]
characters.append(values[codes[0][0]:codes[0][-1]])
values=codes[1]
return False
def findBarcodeLimits( self, barType ):
#origImg |
version = requests.__version__
major, minor, patch = [int(i) for i in version.split('.')]
except Exception:
# Probably some new-fangled version, so it should support verify
pass
else:
if (major, minor, patch) < (0, 8, 8):
sys.stderr.write(
'Warning: the Stripe library requires that your Python '
'"requests" library be newer than version 0.8.8, but your '
'"requests" library is version %s. Stripe will fall back to '
'an alternate HTTP library so everything should work. We '
'recommend upgrading your "requests" library. If you have any '
'questions, please contact support@stripe.com. (HINT: running '
'"pip install -U requests" should upgrade your requests '
'library to the latest version.)' % (version,))
requests = None
try:
from google.appengine.api import urlfetch
except ImportError:
urlfetch = None
def new_default_http_client(*args, **kwargs):
if urlfetch:
impl = UrlFetchClient
elif requests:
impl = RequestsClient
elif pycurl:
impl = PycurlClient
else:
impl = Urllib2Client
warnings.warn(
"Warning: the Stripe library is falling back to urllib2/urllib "
"because neither requests nor pycurl are installed. "
"urllib2's SSL i | mplementation doesn't verify server "
"certificates. For improved security, we suggest installing "
"requests.")
return impl(*args, **kwargs)
class HT | TPClient(object):
def __init__(self, verify_ssl_certs=True):
self._verify_ssl_certs = verify_ssl_certs
def request(self, method, url, headers, post_data=None):
raise NotImplementedError(
'HTTPClient subclasses must implement `request`')
class RequestsClient(HTTPClient):
name = 'requests'
def request(self, method, url, headers, post_data=None):
kwargs = {}
if self._verify_ssl_certs:
kwargs['verify'] = os.path.join(
os.path.dirname(__file__), 'data/ca-certificates.crt')
else:
kwargs['verify'] = False
try:
try:
result = requests.request(method,
url,
headers=headers,
data=post_data,
timeout=80,
**kwargs)
except TypeError, e:
raise TypeError(
'Warning: It looks like your installed version of the '
'"requests" library is not compatible with Stripe\'s '
'usage thereof. (HINT: The most likely cause is that '
'your "requests" library is out of date. You can fix '
'that by running "pip install -U requests".) The '
'underlying error was: %s' % (e,))
# This causes the content to actually be read, which could cause
# e.g. a socket timeout. TODO: The other fetch methods probably
# are succeptible to the same and should be updated.
content = result.content
status_code = result.status_code
except Exception, e:
# Would catch just requests.exceptions.RequestException, but can
# also raise ValueError, RuntimeError, etc.
self._handle_request_error(e)
return content, status_code
def _handle_request_error(self, e):
if isinstance(e, requests.exceptions.RequestException):
msg = ("Unexpected error communicating with Stripe. "
"If this problem persists, let us know at "
"support@stripe.com.")
err = "%s: %s" % (type(e).__name__, str(e))
else:
msg = ("Unexpected error communicating with Stripe. "
"It looks like there's probably a configuration "
"issue locally. If this problem persists, let us "
"know at support@stripe.com.")
err = "A %s was raised" % (type(e).__name__,)
if str(e):
err += " with error message %s" % (str(e),)
else:
err += " with no error message"
msg = textwrap.fill(msg) + "\n\n(Network error: %s)" % (err,)
raise error.APIConnectionError(msg)
class UrlFetchClient(HTTPClient):
name = 'urlfetch'
def request(self, method, url, headers, post_data=None):
try:
result = urlfetch.fetch(
url=url,
method=method,
headers=headers,
# Google App Engine doesn't let us specify our own cert bundle.
# However, that's ok because the CA bundle they use recognizes
# api.stripe.com.
validate_certificate=self._verify_ssl_certs,
# GAE requests time out after 60 seconds, so make sure we leave
# some time for the application to handle a slow Stripe
deadline=55,
payload=post_data
)
except urlfetch.Error, e:
self._handle_request_error(e, url)
return result.content, result.status_code
def _handle_request_error(self, e, url):
if isinstance(e, urlfetch.InvalidURLError):
msg = ("The Stripe library attempted to fetch an "
"invalid URL (%r). This is likely due to a bug "
"in the Stripe Python bindings. Please let us know "
"at support@stripe.com." % (url,))
elif isinstance(e, urlfetch.DownloadError):
msg = "There was a problem retrieving data from Stripe."
elif isinstance(e, urlfetch.ResponseTooLargeError):
msg = ("There was a problem receiving all of your data from "
"Stripe. This is likely due to a bug in Stripe. "
"Please let us know at support@stripe.com.")
else:
msg = ("Unexpected error communicating with Stripe. If this "
"problem persists, let us know at support@stripe.com.")
msg = textwrap.fill(msg) + "\n\n(Network error: " + str(e) + ")"
raise error.APIConnectionError(msg)
class PycurlClient(HTTPClient):
name = 'pycurl'
def request(self, method, url, headers, post_data=None):
s = util.StringIO.StringIO()
curl = pycurl.Curl()
if method == 'get':
curl.setopt(pycurl.HTTPGET, 1)
elif method == 'post':
curl.setopt(pycurl.POST, 1)
curl.setopt(pycurl.POSTFIELDS, post_data)
else:
curl.setopt(pycurl.CUSTOMREQUEST, method.upper())
# pycurl doesn't like unicode URLs
curl.setopt(pycurl.URL, util.utf8(url))
curl.setopt(pycurl.WRITEFUNCTION, s.write)
curl.setopt(pycurl.NOSIGNAL, 1)
curl.setopt(pycurl.CONNECTTIMEOUT, 30)
curl.setopt(pycurl.TIMEOUT, 80)
curl.setopt(pycurl.HTTPHEADER, ['%s: %s' % (k, v)
for k, v in headers.iteritems()])
if self._verify_ssl_certs:
curl.setopt(pycurl.CAINFO, os.path.join(
os.path.dirname(__file__), 'data/ca-certificates.crt'))
else:
curl.setopt(pycurl.SSL_VERIFYHOST, False)
try:
curl.perform()
except pycurl.error, e:
self._handle_request_error(e)
rbody = s.getvalue()
rcode = curl.getinfo(pycurl.RESPONSE_CODE)
return rbody, rcode
def _handle_request_error(self, e):
if e[0] in [pycurl.E_COULDNT_CONNECT,
pycurl.E_COULDNT_RESOLVE_HOST,
pycurl.E_OPERATION_TIMEOUTED]:
msg = ("Could not connect to Stripe. Please check your "
"internet connection and try again. If this problem "
"persists, you should check Stripe's service status at "
"https://twitter.c |
from game.models import (Card,
HEART,
SPADE,
CLUB,
DIAMOND)
def test_card_creation_without_name():
c = Card('heart', 5)
assert c.name == 'heart'
assert c.suit == 'heart'
assert c.value == 5
def test_card_creation_with_name():
c = Card('spade', 5, name='jabberwock')
assert c.name == 'jabberwock'
assert c.suit == 'spade'
assert c.value == | 5
de | f test_card_spade_is_monster():
c = Card(SPADE, 5)
assert c.is_monster() == True
def test_card_club_is_monster():
c = Card(CLUB, 5)
assert c.is_monster() == True
def test_card_heart_is_not_monster():
c = Card(HEART, 5)
assert c.is_monster() == False
def test_card_diamond_is_not_monster():
c = Card(DIAMOND, 5)
assert c.is_monster() == False
|
import numpy as np
import os
__location__ = os.path.realpath(
os.path.join(os.getcwd(),
os.path.dirname(__file__))
) + "/"
class Dataset():
"""
I want to create a similar object to that one existing in R: Dataframe
"""
def __init__(self, training_file, test_file, header=False):
"""
header: a row with the attribute names, by default is False, otherwise,
the headers are autogenerated.
"""
# traning set
self.training = np.genfromtxt(
training_file, dtype=float,
delimiter=',', skip_header = 1
)[:,0:-1]
# traning labels set
self.training_labels = np.genfromtxt(
training_file, dtype=float,
delimiter=',', skip_header = 1
)[:,-1:].ravel()
# test set
self.test = np.genfromtxt(
test_file, dtype=float,
delimiter=',', skip_header = 1
)[:,0:-1]
# test labels set
self.test_labels = np.genfromtxt(
test_file, dtype=float,
delimiter=',', skip_header = 1
)[:,-1:].ravel()
# attributes
if header:
self.attributes = np.genfromtxt(
test_file, dtype=str,
delimiter=',', max_rows = 1
)
else:
ncols = range(self.training_labels.shape[0])
self.attributes = ["column {}".format(x) for x in nco | ls]
def dummy()
"""
ToDo: It generates dummy data (an small subset to practice)
"""
pass
class Examples(object):
"""
"""
def __init__(self):
"""
"""
pass
def spam(self):
"""
Spam dataset
"""
training_ds = __location__ + "data/spam_training.csv"
test_ds = __location__ + "d | ata/spam_test.csv"
return Dataset(training_ds, test_ds)
def gene_expression(self):
"""
Gene expression dataset
"""
training_ds = __location__ + "data/gene_expression_training.csv"
test_ds = __location__ + "data/gene_expression_test.csv"
return Dataset(training_ds, test_ds, header=True)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, getopt
from datetime import datetime
import math
from gann import *
def print_usage():
print """
classic Gann square: gann.py -o <output file name> -s <square size>
Gann square based on date: gann.py -o <output file name> -a <base date> -b <final date> -m <path to list of dates to mark>
Gann sub square based on date: gann.py -o <output file name> -a <base date> -b <final date> -m <path to list of dates to mark> -r "<left>;<bottom>;<right>;<up>"
input date format: "dd/MM/yyyy"
"""
def main(argv):
cell_size = 30
date_format = "%d/%m/%Y"
# --------------------------------------
output_file_name = ''
marks_file_name = ''
square_size = -1
date_a = None
date_b = None
left, bot, right, up = 0, 0, 0, 0
# --------------------------------------
try:
opts, args = getopt.getopt(argv, "ho:s:a:b:m:r:", ["ofile=", "size=", "a_date=", "b_date=", "mfile=", "rect="])
except getopt.GetoptError:
print_usage()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print_usage()
sys.exit()
elif opt in ("-o", "--ofile"):
output_file_name = arg
elif opt in ("-s", "--size"):
square_size = int(arg)
elif opt in ("-a", "--a_date"):
date_a = datetime.strptime(arg, date_format)
| elif opt in ("-b", "--b_date"):
date_b = datetime.strptime(arg, date_format)
elif opt in ("-m", "--mfile"):
marks_file_name = arg
elif opt in ("-r", "--rect"):
rect = arg.split(';')
try:
left, bot, right, up = int(rect[0]), int(rect[1]), int(rect[2]), int(rect[3])
except ValueError as e:
| print 'Failed to parse range!'
if output_file_name == '':
print_usage()
sys.exit(2)
if square_size != -1:
# classic Gann square
# Info
print "Cells: %i" % (square_size * square_size)
print "Square size: %i" % square_size
print "Cell size: %i" % cell_size
print "Building..."
stream = open(output_file_name, 'w')
create_gann_square_classic(square_size, cell_size, stream)
stream.close()
elif date_a and date_b:
# date based Gann square
delta = date_b - date_a
square_size = int(math.ceil(math.sqrt(delta.days)))
if square_size % 2 == 0:
square_size += 1
# Info
print "Cells: %i" % (square_size * square_size)
print "Square size: %i" % square_size
print "Cell size: %i" % cell_size
# Process
print "Loading data..."
marks = load_marks(marks_file_name)
print "Building..."
stream = open(output_file_name, 'w')
if (left != 0 or bot != 0 or right != 0 or up != 0) and left < right and bot < up:
create_gann_sub_square_dates((left, bot, right+1, up+1), cell_size, date_a, marks, stream)
else:
create_gann_square_dates(square_size, cell_size, date_a, marks, stream)
stream.close()
else:
print_usage()
sys.exit(2)
print "Done. See {0}".format(output_file_name)
if __name__ == "__main__":
main(sys.argv[1:]) |
from m1 import <error descr="Cannot find reference 'foo' in 'm1.pyi'">foo</error>
from m1 import <error descr="Cannot find reference 'bar' in 'm1.pyi'">bar</error>
from m1 im | port bar_imported
from m1 import <error descr="Cannot find reference 'm2' in 'm1.pyi'">m2</error>
from m1 import m2_imported
print(foo, b | ar, bar_imported, m2, m2_imported)
|
#!/usr/bin/python2
# Copyright (C) 2014 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http | ://www.gnu.org/licenses/>.
#
"""FreeIPA platform
FreeIPA is a server for identity, policy, and audit.
"""
from os.path import abspath, dirname
import sys
if __name__ == '__main__':
# include ../ for ipasetup.py
sys.path.append(dirname(dirname(abspath(__file__))))
from ipasetup import ipasetup # noqa: E402
ipasetup(
name="ipaplatform",
doc=__doc__,
package_dir={'ipaplatform': ''},
packages=[
"ipaplatform",
"ipaplatform.base",
| "ipaplatform.fedora",
"ipaplatform.redhat",
"ipaplatform.rhel"
],
)
|
"""
| This is helper module to profile the whole package
in Python 3.7 profiling modules from command line will be supported
and this module will no longer be needed
"""
import cProfile
from .__main__ import main
if __name__ == "__main__":
cProfile.run("ma | in()", filename=".nemchmarker.cprofile")
|
_call(command):
if command == "tor --version":
return TOR_VERSION_OUTPUT.splitlines()
else:
raise ValueError("stem.util.system.call received an unexpected command: %s" % command)
mocking.mock(stem.util.system.call, _mock_call)
version = stem.version.get_system_tor_version()
self.assert_versions_match(version, 0, 2, 2, 35, None, "git-73ff13ab3cc9570d")
self.assertEqual("73ff13ab3cc9570d", version.git_commit)
stem.version.VERSION_CACHE = {}
def test_parsing(self):
"""
Tests parsing by the Version class constructor.
"""
# valid versions with various number of compontents to the version
version = Version("0.1.2.3-tag")
self.assert_versions_match(version, 0, 1, 2, 3, "tag", None)
version = Version("0.1.2.3")
self.assert_versions_match(version, 0, 1, 2, 3, None, None)
version = Version("0.1.2-tag")
self.assert_versions_match(version, 0, 1, 2, None, "tag", None)
version = Version("0.1.2")
self.assert_versions_match(version, 0, 1, 2, None, None, None)
# checks an empty tag
version = Version("0.1.2.3-")
self.assert_versions_match(version, 0, 1, 2, 3, "", None)
version = Version("0.1.2-")
self.assert_versions_match(version, 0, 1, 2, None, "", None)
# check with extra informaton
version = Version("0.1.2.3-tag (git-73ff13ab3cc9570d)")
self.assert_versions_match(version, 0, 1, 2, 3, "tag", "git-73ff13ab3cc9570d")
self.assertEqual("73ff13ab3cc9570d", version.git_commit)
version = Version("0.1.2.3-tag ()")
self.assert_versions_match(version, 0, 1, 2, 3, "tag", "")
version = Version("0.1.2 (git-73ff13ab3cc9570d)")
self.assert_versions_match(version, 0, 1, 2, None, None, "git-73ff13ab3cc9570d")
# checks invalid version strings
self.assertRaises(ValueError, stem.version.Version, "")
self.assertRaises(ValueError, stem.version.Version, "1.2.3.4nodash")
self.assertRaises(ValueError, stem.version.Version, "1.2.3.a")
self.assertRaises(ValueError, stem.version.Version, "1.2.a.4")
self.assertRaises(ValueError, stem.version.Version, "1x2x3x4")
self.assertRaises(ValueError, stem.version.Version, "12.3")
self.assertRaises(ValueError, stem.version.Version, "1.-2.3")
def test_comparison(self):
"""
Tests comparision between Version instances.
"""
# check for basic incrementing in each portion
self.assert_version_is_greater("1.1.2.3-tag", "0.1.2.3-tag")
self.assert_version_is_greater("0.2.2.3-tag", "0.1.2.3-tag")
self.assert_version_is_greater("0.1.3.3-tag", "0.1.2.3-tag")
self.assert_version_is_greater("0.1.2.4-tag", "0.1.2.3-tag")
self.assert_version_is_greater("0.1.2.3-ugg", "0.1.2.3-tag")
self.assert_version_is_equal("0.1.2.3-tag", "0.1.2.3-tag")
# check with common tags
self.assert_version_is_greater("0.1.2.3-beta", "0.1.2.3-alpha")
self.assert_version_is_greater("0.1.2.3-rc", "0.1.2.3-beta")
# checks that a missing patch level equals zero
self.assert_version_is_equal("0.1.2", "0.1.2.0")
self.assert_version_is_equal("0.1.2-tag", "0.1.2.0-tag")
# checks for missing patch or status
self.assert_version_is_greater("0.1.2.3-tag", "0.1.2.3")
self.assert_version_is_greater("0.1.2.3-tag", "0.1.2-tag")
self.assert_version_is_greater("0.1.2.3-tag", "0.1.2")
self.assert_version_is_equal("0.1.2.3", "0.1.2.3")
self.assert_version_is_equal("0.1.2", "0.1.2")
def test_nonversion_comparison(self):
"""
Checks that we can be compared with other types.
In python 3 on only equality comparisons work, greater than and less than
comparisons result in a TypeError.
"""
test_version = Version("0.1.2.3")
self.assertNotEqual(test_version, None)
self.assertNotEqual(test_version, 5)
def test_string(self):
"""
Tests the Version -> string conversion.
"""
# checks conversion with various numbers of arguments
self.assert_string_matches("0.1.2.3-tag")
self.assert_string_matches("0.1.2.3")
self.assert_string_matches("0.1.2")
def test_requirements_greater_than(self):
"""
Checks a VersionRequirements with a single greater_than rule.
"""
requirements = stem.version._VersionRequirements()
requirements.greater_than(Version("0.2.2.36"))
self.assertTrue(Version("0.2.2.36") >= requirements)
self.assertTrue(Version("0.2.2.37") >= requirements)
self.assertTrue(Version("0.2.3.36") >= requirements)
self.assertFalse(Version("0.2.2.35") >= requirements)
self.assertFalse(Version("0.2.1.38") >= requirements)
requirements = stem.version._VersionRequirements()
requirements.greater_than(Version("0.2.2.36"), False)
self.assertFalse(Version("0.2.2.35") >= requirements)
self.assertFalse(Version("0.2.2.36") >= requirements)
self.assertTrue(Version("0.2.2.37") >= requirements)
def test_requirements_less_than(self):
"""
Checks a VersionRequirements with a single less_than rule.
"""
requirements = stem.version._VersionRequirements()
requirements.less_than(Version("0.2.2.36"))
self.assertTrue(V | ersion("0.2.2.36") >= requirements)
self.assertTrue(Version("0.2.2.35") >= requirements)
self.assertTrue(Version("0.2.1.38") >= requirements)
self.assertFalse(Version("0.2.2.37") >= requirements)
self.assertFalse(Version("0.2.3.36") >= requirements)
requirements = stem.version._VersionRequirements()
| requirements.less_than(Version("0.2.2.36"), False)
self.assertFalse(Version("0.2.2.37") >= requirements)
self.assertFalse(Version("0.2.2.36") >= requirements)
self.assertTrue(Version("0.2.2.35") >= requirements)
def test_requirements_in_range(self):
"""
Checks a VersionRequirements with a single in_range rule.
"""
requirements = stem.version._VersionRequirements()
requirements.in_range(Version("0.2.2.36"), Version("0.2.2.38"))
self.assertFalse(Version("0.2.2.35") >= requirements)
self.assertTrue(Version("0.2.2.36") >= requirements)
self.assertTrue(Version("0.2.2.37") >= requirements)
self.assertFalse(Version("0.2.2.38") >= requirements)
# rule for 'anything in the 0.2.2.x series'
requirements = stem.version._VersionRequirements()
requirements.in_range(Version("0.2.2.0"), Version("0.2.3.0"))
for index in xrange(0, 100):
self.assertTrue(Version("0.2.2.%i" % index) >= requirements)
def test_requirements_multiple_rules(self):
"""
Checks a VersionRequirements is the logical 'or' when it has multiple rules.
"""
# rule to say 'anything but the 0.2.2.x series'
requirements = stem.version._VersionRequirements()
requirements.greater_than(Version("0.2.3.0"))
requirements.less_than(Version("0.2.2.0"), False)
self.assertTrue(Version("0.2.3.0") >= requirements)
self.assertFalse(Version("0.2.2.0") >= requirements)
for index in xrange(0, 100):
self.assertFalse(Version("0.2.2.%i" % index) >= requirements)
def assert_versions_match(self, version, major, minor, micro, patch, status, extra):
"""
Asserts that the values for a types.Version instance match the given
values.
"""
self.assertEqual(major, version.major)
self.assertEqual(minor, version.minor)
self.assertEqual(micro, version.micro)
self.assertEqual(patch, version.patch)
self.assertEqual(status, version.status)
self.assertEqual(extra, version.extra)
if extra is None:
self.assertEqual(None, version.git_commit)
def assert_version_is_greater(self, first_version, second_version):
"""
Asserts that the parsed version of the first version is greate than the
second (also checking the inverse).
"""
version1 = Version(first_version)
version2 = Version(second_version)
self.assertEqual(version1 > version2, True)
self.assertEqual(version1 < version2, False)
def assert_version_is_equal(self, first_version, second_version):
"""
Asserts that the parsed version of the first version equals the second.
"""
version1 = Version(first_version)
version2 = Version(second_version)
self.assertEqual(ver |
#!/usr/bin/python3
import os
# import logging
import logger as logger_
WS_SERVER = 'ws://127.0.0.1:4055/ws'
WS_TIMEOUT = 10
APP_DIR = os.path.dirname(__file__)
# SQL_DIR = os.path.join(APP_ | DIR, 'sql')
logger = logger_.rotating_log(os.path.join(
APP_DIR, 'kts_snmp.log'), 'kts_snmp_log')
SNMP_IP = '192.168.222.179'
SNMP_PORT | = 11162
ROUTER_URL = 'http://127.0.0.1/snmp'
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2013-2015 Pervasive Displays, Inc.
# Copyright 2015, Syed Faisal Akber
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import sys
import os
from datetime import datetime
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
import socket
import fcntl
import struct
import Adafruit_Nokia_LCD as LCD
import Adafruit_GPIO.SPI as SPI
def get_ip_address(ifname):
s = socket.socket(socke | t.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
# Raspberry Pi hardware SPI config:
DC = 23
RST = 24
SPI_PORT = 0
SPI_DEVICE = 0
WHITE = 1
BLACK = 0
# fonts are in different places on Raspbian/Angstrom so search
possible_fonts = [
'/usr/share/fonts/truetype/ttf-dejavu/DejaVuSansMono-Bold.ttf', # R.Pi
'/usr/s | hare/fonts/truetype/freefont/FreeMono.ttf', # R.Pi
'/usr/share/fonts/truetype/LiberationMono-Bold.ttf', # B.B
'/usr/share/fonts/truetype/DejaVuSansMono-Bold.ttf' # B.B
'/usr/share/fonts/TTF/FreeMonoBold.ttf', # Arch
'/usr/share/fonts/TTF/DejaVuSans-Bold.ttf' # Arch
]
FONT_FILE = ''
for f in possible_fonts:
if os.path.exists(f):
FONT_FILE = f
break
if '' == FONT_FILE:
raise 'no font file found'
FONT_SIZE = 10
def main():
"""main program - draw and display a test image"""
now = datetime.today()
# Hardware SPI usage:
disp = LCD.PCD8544(DC, RST, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=4000000))
# Initialize library.
disp.begin(contrast=60)
# Clear display.
disp.clear()
disp.display()
# initially set all white background
image = Image.new('1', (84,48), WHITE)
# prepare for drawing
draw = ImageDraw.Draw(image)
width, height = image.size
# font = ImageFont.truetype(FONT_FILE, FONT_SIZE)
font = ImageFont.load_default()
ethaddr = get_ip_address('eth0')
draw.rectangle((0, 0, width, height), fill=WHITE, outline=WHITE)
draw.text((0, 0), '{c:s}'.format(c=ethaddr), fill=BLACK, font=font)
draw.text((5, 10), '{h:02d}:{m:02d}:{s:02d}'.format(h=now.hour, m=now.minute, s=now.second), fill=BLACK, font=font)
# Display image.
disp.image(image)
disp.display()
# main
if "__main__" == __name__:
main()
|
from django.shortcuts import render
from django.conf import settings
from django.http import HttpResponseRedirect, HttpResponse
from django.urls import reverse
from django.views.decorators.csrf import csrf_exemp | t
from fetcher import api
import req | uests
import json
def index(request):
return render(request, 'fetcher/index.html',
{'result': api.status()})
def status(request):
if request.method == 'GET' and request.META.get('CONTENT_TYPE') == 'application/json':
return HttpResponse(json.dumps(api.status()), content_type='application/json')
else:
return HttpResponseRedirect(reverse('fetcher:index'))
def log(request):
if request.method == 'GET' and request.META.get('CONTENT_TYPE') == 'application/json':
return HttpResponse(json.dumps(api.log()), content_type='application/json')
else:
return HttpResponseRedirect(reverse('fetcher:index'))
@csrf_exempt
def force_fetch(request):
if request.method == 'POST' and request.META.get('CONTENT_TYPE') == 'application/json':
return HttpResponse(json.dumps(api.force_fetch()), content_type='application/json')
else:
return HttpResponseRedirect(reverse('fetcher:index'))
@csrf_exempt
def force_sort(request):
if request.method == 'POST' and request.META.get('CONTENT_TYPE') == 'application/json':
return HttpResponse(json.dumps(api.force_sort()), content_type='application/json')
else:
return HttpResponseRedirect(reverse('fetcher:index'))
|
from django import temp | late
register = template.Library()
@register.filter()
def is_assistant_or_coordinator(user, course):
return course.is_assistant_ | or_coordinator(user)
|
:{1}@{2}/command-api\
'.format(self._username, self._password, self._host))
except:
print('There was an error trying to connect')
# run CMD
def _runCMD(self, cli):
if self._connected:
return self._switch.runCmds(1, cli)
else:
self.connect()
return self._switch.runCmds(1, cli)
# run non JSON CMD
def _runCMDText(self, cli):
if self._connected:
return self._switch.runCmds(1, cli, 'text')
else:
self.connect()
return self._switch.runCmds(1, cli, 'text')
@classmethod
def createDataDict(cls, key, value):
return {key: value}
# ----------------------------------------------------------------
# INITIALIZE AND SETUP EAPI
# ----------------------------------------------------------------
# creates connection to switch
def connect(self):
try:
self._switch = self.__connectToSwitch()
self._connected = True
return self._switch
except:
print('Could not connect, error: {0}')
def setLogin(self, username, password):
self._username = username
self._password = password
def initialize(self, host, name):
self._host = host
self._name = name
# ----------------------------------------------------------------
# FIND BASIC INFORMATION, INTERFACES, HOSTNAME, MAC...
# ----------------------------------------------------------------
def getHost(self):
return self.createDataDict('host', self._host)
def _getVersionList(self):
# gets version and converts to a list of Ivalues
# this allows comparisons between software versions
# by calling int(on an index)
# checks if self._version_info is not empy
if not self._version_info:
self.getVersionInfo()
version_list = self._version_info['version'].split('.')
return version_list
# getVersionInfo created to streamline the calling of "show version"
# there was allot of code that repeated it, this way, only one
# call is needed
def getVersionInfo(self):
''' returns a 'show version' output as a dictionary '''
# normaly returns list with dictionary.
version_info = self._runCMD(['show version'])
self._version_info = version_info[0]
# returns only dict of relevant information
def getName(self):
return self.createDataDict('name', self._name)
def getVersion(self):
''' Returns the device running code version as a string '''
# checks if self._version_info is not empy
if not self._version_info:
self.getVersionInfo()
return self.createDataDict('version', self._version_info['version'])
# function returns a dictionary of the interfaces and their status
def getInterfacesStatus(self, mOptions=None):
response = self._runCMD(['show interfaces status'])[
0]['interfaceStatuses']
if mOptions:
for _ in enumerate(response):
for key in response[keys]:
print(response[keys][key])
return response
return response
def getInterfaces(self):
interfaces = self.getInterfacesStatus().keys()
return self.createDataDict('interfaces', interfaces)
def getPlatform(self):
if not self._version_info:
self.getVersionInfo()
return self.createDataDict('platform', self._version_info['modelName'])
def getSerialNumber(self):
if not self._version_info:
self.getVersionInfo()
serial = self._version_info['serialNumber']
serial_number = self.createDataDict('serial_number', serial)
if serial_number['serial_number'] == '':
non_serial = {'serial_number': 'not_found'}
return non_serial
else:
return serial_number
def getUptime(self):
output = self._runCMDText(['show uptime'])[0]['output']
# gets uptime if output is in H:M or (|) in "number Mins|Days"
up_split = re.split(r"up\s+?", output)
uptime = re.match(
r'(^(\d{1,3}:\d{1,3})|^(\d{1,3})\s\w+)', up_split[1]).group(0)
def getCPU(self):
output = self._runCMDText(['show processes top once'])[0]['output']
cpu = re.search(r"\d+\.\d*%(?=us)", output).group(0)
return self.createDataDict('cpu_usage', cpu)
def getHostname(self):
''' Returns the device's none FQDN hostname '''
version_int = self._getVersionList()
if int(version_int[0]) >= 4 >= 13:
output = self._runCMD(['show hostname'])[0]['hostname']
return self.createDataDict('hostname', output)
else:
output = self._runCMDText(
['show lldp local-info'])[0]['output']
host = re.search(
r"(?<=System Name: \").*?(?=\.)", output).group(0)
def getFQDN(self):
'''
Returns the device's FQDN hostname.domain.suffix
has not been added to main.py yet, waiting to make sure
their's support accross platforms
'''
version_int = self._getVersionList()
if int(version_int[0]) >= 4 >= 13:
output = self._runCMD(["show hostname"])[0]['fqdn']
return self.createDataDict('fqdn', output)
else:
output = self._runCMDText(
['show lldp local-info'])[0]['output']
fqdn = re.search(
r"(?<=System Name: \").*?(?=\")", output).group(0)
def getAAA(self):
aaa = self._runCMD(['enable', 'show aaa'])[1]['users']
return aaa
def getFreeMem(self):
# checks if self._version_info is not empy
if not self._version_info:
self.getVersionInfo()
return self.createDataDict('free_memory', self._version_info['memFree'])
def getTotalMem(self):
# checks if self._version_info is not empy
if not self._version_info:
self.getVersionInfo()
return self.cre | ateDataDict('total_memory',
self._version_info['memTotal'])
def getSystemMac(self):
self.getVersionInfo()
|
return self.createDataDict('system_mac', self._version_info['systemMacAddress'])
def getDetails(self):
# moved getVersionInfo() so this information gets refreshed as well
# and to remove the redundancy of __init__
self.getVersionInfo()
items = (
self.getVersion(),
self.getCPU(),
self.getFreeMem(),
self.getTotalMem(),
self.getUptime(),
self.getPlatform(),
self.getSerialNumber(),
self.getHost(),
self.getHostname(),
self.getName(),
self.getSystemMac()
)
details = {}
for item in items:
details.update(item)
# details = {'hostname': hostname, 'connect_ip': connect_ip, 'platform': platform,
# 'version': sh_ver, 'serial_number': serial_number, 'system_uptime': uptime,
# 'cpu_utilization': cpu_utilization, 'free_system_memory': free_memory,
# 'total_sytem_memory': total_memory, 'vendor': 'arista'}
return details
# ------------- |
"""
Test objconfig.writer.Json
"""
from objconfig.writer import Json as JsonWriter
from objconfig.reader import Json as JsonReader
from objconfig.writer import AbstractWriter
from objconfig.writer import WriterInterface
import os
def test_emptyinstantiation_json():
writer = JsonWriter()
assert isinstance(writer, AbstractWriter), "Json not child of AbstractWriter"
assert isinstance(writer, WriterInterface), "Json not child of WriterInterface"
def test_render_json():
writer = JsonWriter()
conf = {
"webhost" : "www.example.com",
"database" : {
"adapter" : "pdo_mysql",
"params" : {
"host" : "db.example.com",
"username" : "dbuser",
"password" : "secret",
"dbname" : "dbproduction"
}
}
}
jsoncontents = writer.toString(conf)
reader = JsonReader()
compconf = reader.fromString(jsoncontents)
assert conf == compconf, "Json improperly rendered"
def test_render_tofile_json():
writer = JsonWrite | r()
conf = {
"webhost" : "www.example.com",
"database" : {
"adapter" : "pdo_mysql",
"params" : {
"host" : "db.example.com",
"username" : "dbuser",
"password" : "secret",
" | dbname" : "dbproduction"
}
}
}
writer.toFile(os.path.join(os.path.dirname(os.path.realpath(__file__)), "test.json"), conf)
reader = JsonReader()
compconf = reader.fromFile(os.path.join(os.path.dirname(os.path.realpath(__file__)), "test.json"))
os.remove(os.path.join(os.path.dirname(os.path.realpath(__file__)), "test.json"))
assert conf == compconf, "Json improperly rendered in file"
|
e
def assert_quad(value_and_err, tabled_value, errTol=1.5e-8):
value, err = value_and_err
assert_allclose(value, tabled_value, atol=err, rtol=0)
if errTol is not None:
assert_array_less(err, errTol)
class TestCtypesQuad(TestCase):
@dec.skipif(_ctypes_missing, msg="Ctypes library could not be found")
def setUp(self):
if sys.platform == 'win32':
if sys.version_info < (3, 5):
file = ctypes.util.find_msvcrt()
else:
file = 'api-ms-win-crt-math-l1-1-0.dll'
elif sys.platform == 'darwin':
file = 'libm.dylib'
else:
file = 'libm.so'
try:
self.lib = ctypes.CDLL(file)
except OSError:
# This test doesn't work on some Linux platforms (Fedora for
# example) that put an ld script in libm.so - see gh-5370
self.skipTest("Ctypes can't import libm.so")
restype = ctypes.c_double
argtypes = (ctypes.c_double,)
for name in ['sin', 'cos', 'tan']:
func = getattr(self.lib, name)
func.restype = restype
func.argtypes = argtypes
@dec.skipif(_ctypes_missing, msg="Ctypes library could not be found")
def test_typical(self):
assert_quad(quad(self.lib.sin, 0, 5), quad(math.sin, 0, 5)[0])
assert_quad(quad(self.lib.cos, 0, 5), quad(math.cos, 0, 5)[0])
assert_quad(quad(self.lib.tan, 0, 1), quad(math.tan, 0, 1)[0])
# @dec.skipif(_ctypes_missing, msg="Ctypes library could not be found")
# This doesn't seem to always work. Need a better way to figure out
# whether the fast path is called.
@dec.knownfailureif(True, msg="Unreliable test, see ticket 1684.")
def test_improvement(self):
import time
start = time.time()
for i in xrange(100):
quad(self.lib.sin, 0, 100)
fast = time.time() - start
start = time.time()
for i in xrange(100):
quad(math.sin, 0, 100)
slow = time.time() - start
assert_(fast < 0.5 * slow, (fast, slow))
class TestMultivariateCtypesQuad(TestCase):
@dec.skipif(_ctypes_missing or _ctypes_multivariate_fail,
msg="Compiled test functions not loaded")
def setUp(self):
self.lib = ctypes.CDLL(clib_test.__file__)
restype = ctypes.c_double
argtypes = (ctypes.c_int, ctypes.c_double)
for name in ['_multivariate_typical', '_multivariate_indefinite',
'_multivariate_sin']:
func = getattr(self.lib, name)
func.restype = restype
func.argtypes = argtypes
@dec.skipif(_ctypes_missing or _ctypes_multivariate_fail,
msg="Compiled test functions not loaded")
def test_typical(self):
# 1) Typical function with two extra arguments:
assert_quad(quad(self.lib._multivariate_typical, 0, pi, (2, 1.8)),
0.30614353532540296487)
@dec.skipif(_ctypes_missing or _ctypes_multivariate_fail,
msg="Compiled test functions not loaded")
def test_indefinite(self):
# 2) Infinite integration limits --- Euler's constant
assert_quad(quad(self.lib._multivariate_indefinite, 0, Inf),
0.577215664901532860606512)
@dec.skipif(_ctypes_missing or _ctypes_multivariate_fail,
msg="Compiled test functions not loaded")
def test_threadsafety(self):
# Ensure multivariate ctypes are threadsafe
def threadsafety(y):
return y + quad(self.lib._multivariate_sin, 0, 1)[0]
assert_quad(quad(threadsafety, 0, 1), 0.9596976941318602)
@dec.skipif(_ctypes_missing or _ctypes_multivariate_fail,
msg="Compiled test functions not loaded")
def test_improvement(self):
def myfunc(x): # Euler's constant integrand
return -exp(-x) * log(x)
import time
start = time.time()
for i in xrange(20):
quad(self.lib._multivariate_indefinite, 0, 100)
fast = time.time() - start
start = time.time()
for i in xrange(20):
quad(myfunc, 0, 100)
slow = time.time() - start
# 2+ times faster speeds generated by nontrivial ctypes
# function (single variable)
assert_(fast < 0.5 * slow, (fast, slow))
class TestQuad(TestCase):
def test_typical(self):
# 1) Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return cos(n * x - z * sin(x)) / pi
assert_quad(quad(myfunc, 0, pi, (2, 1.8)), 0.30614353532540296487)
def test_indefinite(self):
# 2) Infinite integration limits --- Euler's constant
def myfunc(x): # Euler's constant integrand
return -exp(-x) * log(x)
assert_quad(quad(myfunc, 0, Inf), 0.577215664901532860606512)
def test_singular(self):
# 3) Singular points in region of integration.
def myfunc(x):
if 0 < x < 2.5:
return sin(x)
elif 2.5 <= x <= 5.0:
return exp(-x)
else:
return 0.0
assert_quad(quad(myfunc, 0, 10, points=[2.5, 5.0]),
1 - cos(2.5) + exp(-2.5) - exp(-5.0))
def test_sine_weighted_finite(self):
# 4) Sine weighted integral (finite limits)
def myfunc(x, a):
return exp(a * (x - 1))
ome = 2.0 ** 3.4
assert_quad(quad(myfunc, 0, 1, args=20, weight='sin', wvar=ome),
(20 * sin(ome) - ome * cos(ome) + ome * exp(-20)) / (20 ** 2 + ome ** 2))
def test_sine_weighted_infini | te(self):
# 5) Sine weighted integral (infinite limits)
def myfunc(x, a):
return exp(-x * a)
a = 4.0
ome = 3.0
assert_quad(quad(myfunc, 0, Inf, args=a, weight='sin', wvar=ome),
ome / (a ** 2 + ome ** 2))
def test_cosine_weighted_infinite( | self):
# 6) Cosine weighted integral (negative infinite limits)
def myfunc(x, a):
return exp(x * a)
a = 2.5
ome = 2.3
assert_quad(quad(myfunc, -Inf, 0, args=a, weight='cos', wvar=ome),
a / (a ** 2 + ome ** 2))
def test_algebraic_log_weight(self):
# 6) Algebraic-logarithmic weight.
def myfunc(x, a):
return 1 / (1 + x + 2 ** (-a))
a = 1.5
assert_quad(quad(myfunc, -1, 1, args=a, weight='alg',
wvar=(-0.5, -0.5)),
pi / sqrt((1 + 2 ** (-a)) ** 2 - 1))
def test_cauchypv_weight(self):
# 7) Cauchy prinicpal value weighting w(x) = 1/(x-c)
def myfunc(x, a):
return 2.0 ** (-a) / ((x - 1) ** 2 + 4.0 ** (-a))
a = 0.4
tabledValue = ((2.0 ** (-0.4) * log(1.5) -
2.0 ** (-1.4) * log((4.0 ** (-a) + 16) / (4.0 ** (-a) + 1)) -
arctan(2.0 ** (a + 2)) -
arctan(2.0 ** a)) /
(4.0 ** (-a) + 1))
assert_quad(quad(myfunc, 0, 5, args=0.4, weight='cauchy', wvar=2.0),
tabledValue, errTol=1.9e-8)
def test_double_integral(self):
# 8) Double Integral test
def simpfunc(y, x): # Note order of arguments.
return x + y
a, b = 1.0, 2.0
assert_quad(dblquad(simpfunc, a, b, lambda x: x, lambda x: 2 * x),
5 / 6.0 * (b ** 3.0 - a ** 3.0))
def test_double_integral2(self):
def func(x0, x1, t0, t1):
return x0 + x1 + t0 + t1
g = lambda x: x
h = lambda x: 2 * x
args = 1, 2
assert_quad(dblquad(func, 1, 2, g, h, args=args), 35. / 6 + 9 * .5)
def test_triple_integral(self):
# 9) Triple Integral test
def simpfunc(z, y, x, t): # Note order of arguments.
return (x + y + z) * t
a, b = 1.0, 2.0
assert_quad(tplquad(simpfunc, a, b,
lambda x: x, lambda x: 2 * x,
lambda x, y: x - y, lambda x, y: x + y,
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Base package for building Invenio application factories."""
import os
from setuptools import find_packages, setup
readme = open('README.rst').read()
history = open('CHANGES.rst').read()
# Get the version string. Cannot be done with import!
g = {}
with open(os.path.join('invenio_base', 'version.py'), 'rt') as fp:
exec(fp.read(), g)
version = g['__version__']
tests_require = [
'check-manifest>=0.25',
'coverage>=4.0',
'isort>=4.2.2',
'mock>=1.3.0',
'pydocstyle>=2.0.0',
'pytest-cov>=1.8.0',
'pytest-pep8>=1.0.6',
'pytest>=2.8.0',
'invenio-config>=1.0.0',
]
extras_require = {
'docs': [
'Sphinx>=1.4.2',
],
'tests': tests_require,
}
extras_require['all'] = []
for reqs in extras_require.values():
extras_require['all'].extend(reqs)
setup_requires = [
'pytest-runner>=2.6.2',
]
install_requires = [
'blinker>=1.4',
'cookiecutter>=1.2.1',
'Flask>=0.11.1',
]
packages = find_packages()
setup(
name='invenio-base',
version=version,
description=__doc__,
long_description=readme + '\n\n' + history,
keywords='invenio',
license='MIT',
author='CERN',
author_email='info@inveniosoftware.org',
url='https://github.com/inveniosoftware/invenio-base',
packages=packages,
zip_safe=False,
include_package_data=True,
platforms='any',
entry_points={
'console_scripts': [
'inveniomanage = invenio_base.__main__:cli',
],
'flask.commands': [
'instance = invenio_base.cli:instance',
],
},
extras_require=extras_require,
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
cl | assifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 2',
'Programming Langua | ge :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Development Status :: 5 - Production/Stable',
],
)
|
for x in undefined(): # ty<car | et>pe: int
pass | |
"""
MooseGesture Test application
Al Sweigart al@coffeeghost.net
http://coffeeghost.net/2011/05/09/moo | segesture-python-mouse-gestures-module
Run the app and then draw by dragging the mouse. When you release the mouse
button, the gesture you drew will be identified.
This scri | pt requires the MooseGesture library, which you can download from here:
http://coffeeghost.net/moosegesture.py
And also requires Pygame:
http://pygame.org
Copyright 2011, BSD-license.
"""
import pygame, sys, os
from pygame.locals import *
sys.path.append(os.path.abspath('..'))
import moosegesture
# setup constants
WINDOWWIDTH = 600
WINDOWHEIGHT = 600
FPS = 40
TEXTCOLOR = (255, 255, 255) # white
BACKGROUNDCOLOR = (0, 0, 0)# black
POINTSCOLOR = (255, 0, 0) # red
LINECOLOR = (255, 165, 0) # orange
CARDINALCOLOR = (0, 255, 0) # green
DIAGONALCOLOR = (0, 0, 255) # blue
# set up pygame, the window, and the mouse cursor
pygame.init()
mainClock = pygame.time.Clock()
windowSurface = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
pygame.display.set_caption('Mouse Gesture Test')
points = []
mouseDown = False
font = pygame.font.SysFont(None, 24)
strokeText = ''
while True: # main loop
for event in pygame.event.get():
# handle all pygame events
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
if event.type == MOUSEBUTTONDOWN:
# on mouse down, erase the previous line and start drawing a new one
mouseDown = True
if len(points) > 2:
startx, starty = points[0][0], points[0][1]
for i in range(len(points)):
points[i] = (points[i][0] - startx, points[i][1] - starty)
points = []
storkeText = ''
if event.type == MOUSEBUTTONUP:
# try to identify the gesture when the mouse dragging stops
mouseDown = False
strokes = moosegesture.getGesture(points)
segments = moosegesture.getSegments(points)
strokeText = ' '.join(strokes)
textobj = font.render(strokeText, 1, (255,255,255))
textrect = textobj.get_rect()
textrect.topleft = (10, WINDOWHEIGHT - 30)
if event.type == MOUSEMOTION and mouseDown:
# draw the line if the mouse is dragging
points.append( (event.pos[0], event.pos[1]) )
# Draw the window.
windowSurface.fill(BACKGROUNDCOLOR)
if strokeText:
# draw the identified strokes of the last line
windowSurface.blit(textobj, textrect)
# draw points
for x, y in points:
pygame.draw.circle(windowSurface, POINTSCOLOR, (x, y), 2)
if mouseDown:
# draw strokes as unidentified while dragging the mouse
if len(points) > 1:
pygame.draw.lines(windowSurface, LINECOLOR, False, points)
else:
# draw the identified strokes
segNum = 0
curColor = LINECOLOR
for p in range(len(points)-1):
if segNum < len(segments) and segments[segNum][0] == p:
# start of new stroke
if strokes[segNum] in [2, 4, 6, 8]:
curColor = CARDINALCOLOR
elif strokes[segNum] in [1, 3, 7, 9]:
curColor = DIAGONALCOLOR
pygame.draw.line(windowSurface, curColor, points[p], points[p+1])
if segNum < len(segments) and segments[segNum][1] == p:
# end of a stroke
curColor = LINECOLOR
segNum += 1
pygame.display.update()
mainClock.tick(FPS) |
"""
Support for Neato Connected Vaccums switches.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.neato/
"""
import logging
import requests
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.components.neato import NEATO_ROBOTS, NEATO_LOGIN
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['neato']
SWITCH_TYPE_CLEAN = 'clean'
SWITCH_TYPE_SCHEDULE = 'scedule'
SWITCH_TYPES = {
SWITCH_TYPE_CLEAN: ['Clean'],
SWITCH_TYPE_SCHEDULE: ['Schedule']
}
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Neato switches."""
dev = []
for robot in hass.data[NEATO_ROBOTS]:
for type_name in SWITCH_TYPES:
dev.append(NeatoConnectedSwitch(hass, robot, type_name))
_LOGGER.debug("Adding switches %s" | , dev)
add_devices(dev)
class NeatoConnectedSwitch(ToggleEntity):
"""Neato Connected Switches."""
def __init__(self, hass, robot, switch_type):
"""Initialize the Neato Connected switches."""
self.type = switch_type
self.robot = robot
self.neato = hass.data[NEATO_LOGIN]
self._robot_name = '{} {}'.format(
self.robot.name, SWITCH_TYPES[self.type][0])
try:
self._st | ate = self.robot.state
except (requests.exceptions.ConnectionError,
requests.exceptions.HTTPError) as ex:
_LOGGER.warning("Neato connection error: %s", ex)
self._state = None
self._schedule_state = None
self._clean_state = None
def update(self):
"""Update the states of Neato switches."""
_LOGGER.debug("Running switch update")
self.neato.update_robots()
try:
self._state = self.robot.state
except (requests.exceptions.ConnectionError,
requests.exceptions.HTTPError) as ex:
_LOGGER.warning("Neato connection error: %s", ex)
self._state = None
return
_LOGGER.debug('self._state=%s', self._state)
if self.type == SWITCH_TYPE_CLEAN:
if (self.robot.state['action'] == 1 or
self.robot.state['action'] == 2 or
self.robot.state['action'] == 3 and
self.robot.state['state'] == 2):
self._clean_state = STATE_ON
else:
self._clean_state = STATE_OFF
_LOGGER.debug("Clean state: %s", self._clean_state)
if self.type == SWITCH_TYPE_SCHEDULE:
_LOGGER.debug("State: %s", self._state)
if self.robot.schedule_enabled:
self._schedule_state = STATE_ON
else:
self._schedule_state = STATE_OFF
_LOGGER.debug("Shedule state: %s", self._schedule_state)
@property
def name(self):
"""Return the name of the switch."""
return self._robot_name
@property
def available(self):
"""Return True if entity is available."""
return self._state
@property
def is_on(self):
"""Return true if switch is on."""
if self.type == SWITCH_TYPE_CLEAN:
if self._clean_state == STATE_ON:
return True
return False
elif self.type == SWITCH_TYPE_SCHEDULE:
if self._schedule_state == STATE_ON:
return True
return False
def turn_on(self, **kwargs):
"""Turn the switch on."""
if self.type == SWITCH_TYPE_CLEAN:
self.robot.start_cleaning()
elif self.type == SWITCH_TYPE_SCHEDULE:
self.robot.enable_schedule()
def turn_off(self, **kwargs):
"""Turn the switch off."""
if self.type == SWITCH_TYPE_CLEAN:
self.robot.pause_cleaning()
self.robot.send_to_base()
elif self.type == SWITCH_TYPE_SCHEDULE:
self.robot.disable_schedule()
|
ntation build configuration file
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['openstackdocstheme']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Compute API Guide'
bug_tag = u'api-guide'
repository_name = 'openstack/nova'
bug_project = 'nova'
# Must set this variable to include year, month, day, hours, and minutes.
html_last_updated_fmt = '%Y-%m-%d %H:%M'
copyright = u'2015, OpenStack contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.1.0'
# The full version, including alpha/beta/rc tags.
release = '2.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = [openstackdocstheme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '' | , a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d %H:%M'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically corre | ct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'compute-api-guide'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ComputeAPI.tex', u'Compute API Documentation',
u'OpenStack contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'computeapi', u'Compute API Documentation',
[u'OpenStack contributors'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ComputeAPIGuide', u'Compute API Guide',
u'OpenStack contributors', 'APIGuide',
'This guide teaches OpenStack Compute service users concepts about '
'managing resources in an OpenStack cloud with the Compute API.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('kirppu', '0006_item_lost_property'),
]
operations = [
migrations.AlterField(
model_name='clerk',
name='access_key',
field=models.CharField(null=True, validators=[django.core.validators.RegexValidator(b'^[0-9a-fA-F]{14}$', message=b'Must be 14 hex chars.')], max_length=128, blank=True, help_text='Access code assigned to the clerk.', unique=True, verbose_name='Access key value'),
preserve_default=True,
),
migrations.AlterField(
model_name='item',
name='itemtype',
field=models.CharField(default=b'other', max_length=24, choices=[(b'manga-finnish', 'Manga (Finnish)'), (b'manga-english', 'Manga (English)'), (b'manga-other', 'Manga (other language)'), (b'book', 'Book'), (b'magazine', 'Magazine'), (b'movie-tv', 'Movie/TV-series'), (b'game', 'Game'), (b'figurine-plushie', 'Figurine/Plushie'), (b'clothing', 'Clothing'), (b'other', 'Other')]),
preserve_default=True,
),
migrations.AlterField(
model_name='uitext',
name='identifier',
field=models.CharField(help_text='Identifier of the text item', unique=True, max_length=16, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='uitext',
| name='text',
field=models.CharField(help_text='Text item in UI', max_length=163 | 84),
preserve_default=True,
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.