repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
|---|---|---|---|---|---|
pk400/catering
|
catering/migrations/0001_initial.py
|
1
|
1048
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-22 21:44
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
mit
|
sinbazhou/odoo
|
addons/crm_profiling/crm_profiling.py
|
333
|
10527
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp.osv import orm
from openerp.tools.translate import _
def _get_answers(cr, uid, ids):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm profiling’s IDs """
query = """
select distinct(answer)
from profile_question_yes_rel
where profile IN %s"""
cr.execute(query, (tuple(ids),))
ans_yes = [x[0] for x in cr.fetchall()]
query = """
select distinct(answer)
from profile_question_no_rel
where profile IN %s"""
cr.execute(query, (tuple(ids),))
ans_no = [x[0] for x in cr.fetchall()]
return [ans_yes, ans_no]
def _get_parents(cr, uid, ids):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm profiling’s IDs
@return: Get parents's Id """
ids_to_check = ids
cr.execute("""
select distinct(parent_id)
from crm_segmentation
where parent_id is not null
and id IN %s""",(tuple(ids),))
parent_ids = [x[0] for x in cr.fetchall()]
trigger = False
for x in parent_ids:
if x not in ids_to_check:
ids_to_check.append(x)
trigger = True
if trigger:
ids_to_check = _get_parents(cr, uid, ids_to_check)
return ids_to_check
def test_prof(cr, uid, seg_id, pid, answers_ids=None):
""" return True if the partner pid fetch the segmentation rule seg_id
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param seg_id: Segmentaion's ID
@param pid: partner's ID
@param answers_ids: Answers's IDs
"""
ids_to_check = _get_parents(cr, uid, [seg_id])
[yes_answers, no_answers] = _get_answers(cr, uid, ids_to_check)
temp = True
for y_ans in yes_answers:
if y_ans not in answers_ids:
temp = False
break
if temp:
for ans in answers_ids:
if ans in no_answers:
temp = False
break
if temp:
return True
return False
def _recompute_categ(self, cr, uid, pid, answers_ids):
""" Recompute category
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param pid: partner's ID
@param answers_ids: Answers's IDs
"""
ok = []
cr.execute('''
select r.category_id
from res_partner_res_partner_category_rel r left join crm_segmentation s on (r.category_id = s.categ_id)
where r.partner_id = %s and (s.exclusif = false or s.exclusif is null)
''', (pid,))
for x in cr.fetchall():
ok.append(x[0])
query = '''
select id, categ_id
from crm_segmentation
where profiling_active = true'''
if ok != []:
query = query +''' and categ_id not in(%s)'''% ','.join([str(i) for i in ok ])
query = query + ''' order by id '''
cr.execute(query)
segm_cat_ids = cr.fetchall()
for (segm_id, cat_id) in segm_cat_ids:
if test_prof(cr, uid, segm_id, pid, answers_ids):
ok.append(cat_id)
return ok
class question(osv.osv):
""" Question """
_name="crm_profiling.question"
_description= "Question"
_columns={
'name': fields.char("Question", required=True),
'answers_ids': fields.one2many("crm_profiling.answer", "question_id", "Available Answers", copy=True),
}
class questionnaire(osv.osv):
""" Questionnaire """
_name="crm_profiling.questionnaire"
_description= "Questionnaire"
_columns = {
'name': fields.char("Questionnaire", required=True),
'description':fields.text("Description", required=True),
'questions_ids': fields.many2many('crm_profiling.question','profile_questionnaire_quest_rel',\
'questionnaire', 'question', "Questions"),
}
class answer(osv.osv):
_name="crm_profiling.answer"
_description="Answer"
_columns={
"name": fields.char("Answer", required=True),
"question_id": fields.many2one('crm_profiling.question',"Question"),
}
class partner(osv.osv):
_inherit="res.partner"
_columns={
"answers_ids": fields.many2many("crm_profiling.answer","partner_question_rel",\
"partner","answer","Answers"),
}
def _questionnaire_compute(self, cr, uid, answers, context=None):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param data: Get Data
@param context: A standard dictionary for contextual values """
partner_id = context.get('active_id')
query = "select answer from partner_question_rel where partner=%s"
cr.execute(query, (partner_id,))
for x in cr.fetchall():
answers.append(x[0])
self.write(cr, uid, [partner_id], {'answers_ids': [[6, 0, answers]]}, context=context)
return {}
def write(self, cr, uid, ids, vals, context=None):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm profiling’s IDs
@param context: A standard dictionary for contextual values """
if 'answers_ids' in vals:
vals['category_id']=[[6, 0, _recompute_categ(self, cr, uid, ids[0], vals['answers_ids'][0][2])]]
return super(partner, self).write(cr, uid, ids, vals, context=context)
class crm_segmentation(osv.osv):
""" CRM Segmentation """
_inherit="crm.segmentation"
_columns={
"answer_yes": fields.many2many("crm_profiling.answer","profile_question_yes_rel",\
"profile","answer","Included Answers"),
"answer_no": fields.many2many("crm_profiling.answer","profile_question_no_rel",\
"profile","answer","Excluded Answers"),
'parent_id': fields.many2one('crm.segmentation', 'Parent Profile'),
'child_ids': fields.one2many('crm.segmentation', 'parent_id', 'Child Profiles'),
'profiling_active': fields.boolean('Use The Profiling Rules', help='Check\
this box if you want to use this tab as part of the \
segmentation rule. If not checked, the criteria beneath will be ignored')
}
_constraints = [
(osv.osv._check_recursion, 'Error ! You cannot create recursive profiles.', ['parent_id'])
]
def process_continue(self, cr, uid, ids, start=False):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm segmentation’s IDs """
partner_obj = self.pool.get('res.partner')
categs = self.read(cr,uid,ids,['categ_id','exclusif','partner_id', \
'sales_purchase_active', 'profiling_active'])
for categ in categs:
if start:
if categ['exclusif']:
cr.execute('delete from res_partner_res_partner_category_rel where \
category_id=%s', (categ['categ_id'][0],))
partner_obj.invalidate_cache(cr, uid, ['category_id'])
id = categ['id']
cr.execute('select id from res_partner order by id ')
partners = [x[0] for x in cr.fetchall()]
if categ['sales_purchase_active']:
to_remove_list=[]
cr.execute('select id from crm_segmentation_line where segmentation_id=%s', (id,))
line_ids = [x[0] for x in cr.fetchall()]
for pid in partners:
if (not self.pool.get('crm.segmentation.line').test(cr, uid, line_ids, pid)):
to_remove_list.append(pid)
for pid in to_remove_list:
partners.remove(pid)
if categ['profiling_active']:
to_remove_list = []
for pid in partners:
cr.execute('select distinct(answer) from partner_question_rel where partner=%s',(pid,))
answers_ids = [x[0] for x in cr.fetchall()]
if (not test_prof(cr, uid, id, pid, answers_ids)):
to_remove_list.append(pid)
for pid in to_remove_list:
partners.remove(pid)
for partner in partner_obj.browse(cr, uid, partners):
category_ids = [categ_id.id for categ_id in partner.category_id]
if categ['categ_id'][0] not in category_ids:
cr.execute('insert into res_partner_res_partner_category_rel (category_id,partner_id) values (%s,%s)', (categ['categ_id'][0],partner.id))
partner_obj.invalidate_cache(cr, uid, ['category_id'], [partner.id])
self.write(cr, uid, [id], {'state':'not running', 'partner_id':0})
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
alfayez/gnuradio
|
gr-digital/python/qa_lms_equalizer.py
|
16
|
1728
|
#!/usr/bin/env python
#
# Copyright 2006,2007,2010,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import digital_swig
class test_lms_dd_equalizer(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def transform(self, src_data, gain, const):
SRC = gr.vector_source_c(src_data, False)
EQU = digital_swig.lms_dd_equalizer_cc(4, gain, 1, const.base())
DST = gr.vector_sink_c()
self.tb.connect(SRC, EQU, DST)
self.tb.run()
return DST.data()
def test_001_identity(self):
# Constant modulus signal so no adjustments
const = digital_swig.constellation_qpsk()
src_data = const.points()*1000
N = 100 # settling time
expected_data = src_data[N:]
result = self.transform(src_data, 0.1, const)[N:]
self.assertComplexTuplesAlmostEqual(expected_data, result, 5)
if __name__ == "__main__":
gr_unittest.run(test_lms_dd_equalizer, "test_lms_dd_equalizer.xml")
|
gpl-3.0
|
ChonchoFronto/sarah
|
lambda/numpy/tests/test_ctypeslib.py
|
87
|
4097
|
from __future__ import division, absolute_import, print_function
import sys
import numpy as np
from numpy.ctypeslib import ndpointer, load_library
from numpy.distutils.misc_util import get_shared_lib_extension
from numpy.testing import TestCase, run_module_suite, dec
try:
cdll = load_library('multiarray', np.core.multiarray.__file__)
_HAS_CTYPE = True
except ImportError:
_HAS_CTYPE = False
class TestLoadLibrary(TestCase):
@dec.skipif(not _HAS_CTYPE,
"ctypes not available on this python installation")
@dec.knownfailureif(sys.platform ==
'cygwin', "This test is known to fail on cygwin")
def test_basic(self):
try:
# Should succeed
load_library('multiarray', np.core.multiarray.__file__)
except ImportError as e:
msg = ("ctypes is not available on this python: skipping the test"
" (import error was: %s)" % str(e))
print(msg)
@dec.skipif(not _HAS_CTYPE,
"ctypes not available on this python installation")
@dec.knownfailureif(sys.platform ==
'cygwin', "This test is known to fail on cygwin")
def test_basic2(self):
# Regression for #801: load_library with a full library name
# (including extension) does not work.
try:
try:
so = get_shared_lib_extension(is_python_ext=True)
# Should succeed
load_library('multiarray%s' % so, np.core.multiarray.__file__)
except ImportError:
print("No distutils available, skipping test.")
except ImportError as e:
msg = ("ctypes is not available on this python: skipping the test"
" (import error was: %s)" % str(e))
print(msg)
class TestNdpointer(TestCase):
def test_dtype(self):
dt = np.intc
p = ndpointer(dtype=dt)
self.assertTrue(p.from_param(np.array([1], dt)))
dt = '<i4'
p = ndpointer(dtype=dt)
self.assertTrue(p.from_param(np.array([1], dt)))
dt = np.dtype('>i4')
p = ndpointer(dtype=dt)
p.from_param(np.array([1], dt))
self.assertRaises(TypeError, p.from_param,
np.array([1], dt.newbyteorder('swap')))
dtnames = ['x', 'y']
dtformats = [np.intc, np.float64]
dtdescr = {'names': dtnames, 'formats': dtformats}
dt = np.dtype(dtdescr)
p = ndpointer(dtype=dt)
self.assertTrue(p.from_param(np.zeros((10,), dt)))
samedt = np.dtype(dtdescr)
p = ndpointer(dtype=samedt)
self.assertTrue(p.from_param(np.zeros((10,), dt)))
dt2 = np.dtype(dtdescr, align=True)
if dt.itemsize != dt2.itemsize:
self.assertRaises(TypeError, p.from_param, np.zeros((10,), dt2))
else:
self.assertTrue(p.from_param(np.zeros((10,), dt2)))
def test_ndim(self):
p = ndpointer(ndim=0)
self.assertTrue(p.from_param(np.array(1)))
self.assertRaises(TypeError, p.from_param, np.array([1]))
p = ndpointer(ndim=1)
self.assertRaises(TypeError, p.from_param, np.array(1))
self.assertTrue(p.from_param(np.array([1])))
p = ndpointer(ndim=2)
self.assertTrue(p.from_param(np.array([[1]])))
def test_shape(self):
p = ndpointer(shape=(1, 2))
self.assertTrue(p.from_param(np.array([[1, 2]])))
self.assertRaises(TypeError, p.from_param, np.array([[1], [2]]))
p = ndpointer(shape=())
self.assertTrue(p.from_param(np.array(1)))
def test_flags(self):
x = np.array([[1, 2], [3, 4]], order='F')
p = ndpointer(flags='FORTRAN')
self.assertTrue(p.from_param(x))
p = ndpointer(flags='CONTIGUOUS')
self.assertRaises(TypeError, p.from_param, x)
p = ndpointer(flags=x.flags.num)
self.assertTrue(p.from_param(x))
self.assertRaises(TypeError, p.from_param, np.array([[1, 2], [3, 4]]))
if __name__ == "__main__":
run_module_suite()
|
mit
|
fusionbox/satchless
|
satchless/order/forms.py
|
1
|
2605
|
from django import forms
from django.forms.models import modelformset_factory
from django.utils.translation import ugettext_lazy as _
from . import handler
from . import models
class DeliveryMethodForm(forms.ModelForm):
delivery_type = forms.ChoiceField(label=_('Delivery method'), choices=[])
class Meta:
model = models.DeliveryGroup
fields = ('delivery_type',)
def __init__(self, *args, **kwargs):
super(DeliveryMethodForm, self).__init__(*args, **kwargs)
types = handler.delivery_queue.as_choices(delivery_group=self.instance)
self.fields['delivery_type'].choices = types
DeliveryMethodFormset = modelformset_factory(models.DeliveryGroup,
form=DeliveryMethodForm, extra=0)
def get_delivery_details_forms_for_groups(groups, data):
'''
For each delivery group creates a (group, typ, delivery details form) tuple.
If there is no form, the third element is None.
'''
groups_and_forms = []
for group in groups:
delivery_type = group.delivery_type
form = handler.delivery_queue.get_configuration_form(group, data)
groups_and_forms.append((group, delivery_type, form))
return groups_and_forms
class PaymentMethodForm(forms.ModelForm):
payment_type = forms.ChoiceField(choices=())
class Meta:
model = models.Order
fields = ('payment_type',)
def __init__(self, *args, **kwargs):
super(PaymentMethodForm, self).__init__(*args, **kwargs)
types = list(handler.payment_queue.as_choices(order=self.instance))
self.fields['payment_type'].choices = types
def get_payment_details_form(order, data):
return handler.payment_queue.get_configuration_form(order, data)
class BillingForm(forms.ModelForm):
REQUIRED_FIELDS = (
'billing_first_name', 'billing_last_name',
'billing_street_address_1', 'billing_city', 'billing_country_area',
'billing_country', 'billing_postal_code', 'billing_phone'
)
class Meta:
model = models.Order
fields = ('billing_first_name', 'billing_last_name',
'billing_company_name', 'billing_street_address_1',
'billing_street_address_2', 'billing_city',
'billing_country_area', 'billing_postal_code',
'billing_country', 'billing_tax_id',
'billing_phone')
def __init__(self, *args, **kwargs):
super(BillingForm, self).__init__(*args, **kwargs)
for f in self.REQUIRED_FIELDS:
self.fields[f].required = True
|
bsd-3-clause
|
Antiun/odoo
|
addons/mrp/wizard/change_production_qty.py
|
245
|
4852
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class change_production_qty(osv.osv_memory):
_name = 'change.production.qty'
_description = 'Change Quantity of Products'
_columns = {
'product_qty': fields.float('Product Qty', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
}
def default_get(self, cr, uid, fields, context=None):
""" To get default values for the object.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param fields: List of fields for which we want default values
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
if context is None:
context = {}
res = super(change_production_qty, self).default_get(cr, uid, fields, context=context)
prod_obj = self.pool.get('mrp.production')
prod = prod_obj.browse(cr, uid, context.get('active_id'), context=context)
if 'product_qty' in fields:
res.update({'product_qty': prod.product_qty})
return res
def _update_product_to_produce(self, cr, uid, prod, qty, context=None):
move_lines_obj = self.pool.get('stock.move')
for m in prod.move_created_ids:
move_lines_obj.write(cr, uid, [m.id], {'product_uom_qty': qty})
def change_prod_qty(self, cr, uid, ids, context=None):
"""
Changes the Quantity of Product.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return:
"""
record_id = context and context.get('active_id',False)
assert record_id, _('Active Id not found')
prod_obj = self.pool.get('mrp.production')
bom_obj = self.pool.get('mrp.bom')
move_obj = self.pool.get('stock.move')
for wiz_qty in self.browse(cr, uid, ids, context=context):
prod = prod_obj.browse(cr, uid, record_id, context=context)
prod_obj.write(cr, uid, [prod.id], {'product_qty': wiz_qty.product_qty})
prod_obj.action_compute(cr, uid, [prod.id])
for move in prod.move_lines:
bom_point = prod.bom_id
bom_id = prod.bom_id.id
if not bom_point:
bom_id = bom_obj._bom_find(cr, uid, product_id=prod.product_id.id, context=context)
if not bom_id:
raise osv.except_osv(_('Error!'), _("Cannot find bill of material for this product."))
prod_obj.write(cr, uid, [prod.id], {'bom_id': bom_id})
bom_point = bom_obj.browse(cr, uid, [bom_id])[0]
if not bom_id:
raise osv.except_osv(_('Error!'), _("Cannot find bill of material for this product."))
factor = prod.product_qty * prod.product_uom.factor / bom_point.product_uom.factor
product_details, workcenter_details = \
bom_obj._bom_explode(cr, uid, bom_point, prod.product_id, factor / bom_point.product_qty, [], context=context)
for r in product_details:
if r['product_id'] == move.product_id.id:
move_obj.write(cr, uid, [move.id], {'product_uom_qty': r['product_qty']})
if prod.move_prod_id:
move_obj.write(cr, uid, [prod.move_prod_id.id], {'product_uom_qty' : wiz_qty.product_qty})
self._update_product_to_produce(cr, uid, prod, wiz_qty.product_qty, context=context)
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
adoosii/edx-platform
|
lms/djangoapps/dashboard/views.py
|
131
|
3337
|
"""View functions for the LMS Student dashboard"""
from django.http import Http404
from edxmako.shortcuts import render_to_response
from django.db import connection
from student.models import CourseEnrollment
from django.contrib.auth.models import User
def dictfetchall(cursor):
'''Returns a list of all rows from a cursor as a column: result dict.
Borrowed from Django documentation'''
desc = cursor.description
table = []
table.append([col[0] for col in desc])
# ensure response from db is a list, not a tuple (which is returned
# by MySQL backed django instances)
rows_from_cursor = cursor.fetchall()
table = table + [list(row) for row in rows_from_cursor]
return table
def SQL_query_to_list(cursor, query_string): # pylint: disable=invalid-name
"""Returns the raw result of the query"""
cursor.execute(query_string)
raw_result = dictfetchall(cursor)
return raw_result
def dashboard(request):
"""
Slightly less hackish hack to show staff enrollment numbers and other
simple queries.
All queries here should be indexed and simple. Mostly, this means don't
touch courseware_studentmodule, as tempting as it may be.
"""
if not request.user.is_staff:
raise Http404
# results are passed to the template. The template knows how to render
# two types of results: scalars and tables. Scalars should be represented
# as "Visible Title": Value and tables should be lists of lists where each
# inner list represents a single row of the table
results = {"scalars": {}, "tables": {}}
# count how many users we have
results["scalars"]["Unique Usernames"] = User.objects.filter().count()
results["scalars"]["Activated Usernames"] = User.objects.filter(is_active=1).count()
# count how many enrollments we have
results["scalars"]["Total Enrollments Across All Courses"] = CourseEnrollment.objects.filter(is_active=1).count()
# establish a direct connection to the database (for executing raw SQL)
cursor = connection.cursor()
# define the queries that will generate our user-facing tables
# table queries need not take the form of raw SQL, but do in this case since
# the MySQL backend for django isn't very friendly with group by or distinct
table_queries = {}
table_queries["course registrations (current enrollments)"] = """
select
course_id as Course,
count(user_id) as Students
from student_courseenrollment
where is_active=1
group by course_id
order by students desc;"""
table_queries["number of students in each number of classes"] = """
select registrations as 'Registered for __ Classes' ,
count(registrations) as Users
from (select count(user_id) as registrations
from student_courseenrollment
where is_active=1
group by user_id) as registrations_per_user
group by registrations;"""
# add the result for each of the table_queries to the results object
for query in table_queries.keys():
cursor.execute(table_queries[query])
results["tables"][query] = SQL_query_to_list(cursor, table_queries[query])
context = {"results": results}
return render_to_response("admin_dashboard.html", context)
|
agpl-3.0
|
lerouxb/ni
|
core/tokenizer.py
|
1
|
7558
|
import os
import bisect
from pygments.token import Token
from pygments.lexers import get_lexer_by_name, get_lexer_for_filename, \
ClassNotFound
# TODO: these really have to be moved to another file and it should be made
# to be pluggable
def isbacktracetoken_default(ttype, tvalue):
return not ttype in Token.Literal
def isbackspacetoken_html(ttype, tvalue):
if not tvalue:
return False
if ttype in Token.Name.Tag:
if tvalue == '>' or tvalue[0] == '<' and rvalue[-1] == '>':
return True
return False
def isbacktracetoken_css(ttype, tvalue):
return ttype in Token.Punctuation and tvalue == '}'
class Tokenizer(object):
"""
Wraps a lexer and caches tokens and token offsets.
"""
def __init__(self, document):
self.document = document
self.tokens = []
self.offsets = []
self.end = 0 # up to where we lexed last
if document.location:
filename = document.location
try:
# HACK! overrides should come from settings...
if os.path.splitext(filename)[1] == '.html':
# assume django template
self.lexer = get_lexer_by_name('html+django',
stripnl=False,
encoding='utf8')
elif os.path.splitext(filename)[1] == '.py':
# otherwise we end up with the annoying NumPy lexer..
self.lexer = get_lexer_by_name('python',
stripnl=False,
encoding='utf8')
else:
self.lexer = get_lexer_for_filename(filename,
stripnl=False,
encoding='utf8')
except ClassNotFound:
self.lexer = None
else:
self.lexer = None
def update(self, from_offset=None, to_offset=None):
"""
Update the tokens and offsets from from_offset to to_offset.
This should only ever get used from inside Document.
"""
content = self.document.content
if not self.lexer:
self.tokens = [(Token.Text, content)]
self.offsets = [0]
return
# default to_offset to the end of the content
if not to_offset:
to_offset = len(content)
if not self.tokens:
# if we haven't lexed before, make sure we take the long path
from_offset = None
if from_offset == None:
# lex everything
from_offset = 0
self.end = 0
self.tokens = []
self.offsets = []
code = content
else:
# make sure from_offset is actually inside the bit that we already
# have cached
if self.offsets:
last_offset = self.offsets[-1]
else:
last_offset = 0
from_offset = min(last_offset, from_offset)
# HACK!
# Try and "snap" to a token (previous non-literal token, at least
# two back)
if self.lexer.name in ('HTML', 'HTML+Django/Jinja'):
isbacktracetoken = isbacktracetoken_html
elif self.lexer.name == 'CSS':
isbacktracetoken = isbacktracetoken_css
else:
isbacktracetoken = isbacktracetoken_default
index = bisect.bisect_left(self.offsets, from_offset)
index -= 2
if index < 0:
index = 0
else:
while index:
tokentype, value = self.tokens[index]
if isbacktracetoken(tokentype, value):
break
index -= 1
# set self.end, self.tokens and self.offsets (frop the bits from
# the cache that we are replacing, basically)
from_offset = self.offsets[index]
self.end = self.offsets[index]
self.tokens = self.tokens[:index]
self.offsets = self.offsets[:index]
# sanity
if to_offset < from_offset:
to_offset = from_offset
# cut the visible portion
code = content[from_offset:to_offset+1]
# lex the code fragment and add the tokens and their corresponding
# starting offsets (while caching the end position)
for tokentype, value in self.lexer.get_tokens(code):
# hack for python
if tokentype is Token.Name.Builtin.Pseudo and value == 'self':
tokentype = Token.Name.Builtin.Pseudo.Self
self.tokens.append((tokentype, value))
self.offsets.append(self.end)
self.end += len(value.replace('\t', ' '*self.document.tab_size))
def get_normalised_tokens(self, from_line, to_line):
"""
Return tokens for the region extending from from_line to to_line and
the first and last tokens chopped if they extend out of the specified
section.
It typically gets used in drawing routines and should only be called
after update(), because it doesn't update the tokens itself - it just
reads from the cached tokens and offsets.
"""
tokens = self.tokens
offsets = self.offsets
sy = from_line
start_offset = self.document.cursor_pos_to_offset((sy, 0))
ey = to_line+1
end_offset = self.document.cursor_pos_to_offset((ey, 0))
if self.document.offset_to_cursor_pos(end_offset)[0] == ey:
# it didn't get adjusted, so we're not at the end of the file,
# therefore we should go to the start of the previous line
end_offset -= 1
# get the token index that contains the start offset
start_index = bisect.bisect_left(offsets, start_offset)
if start_index and \
start_index >= len(offsets) or offsets[start_index] > start_offset:
start_index -= 1
# get the token index that contains the end offset
end_index = bisect.bisect_left(offsets, end_offset)
if end_index and \
end_index >= len(offsets) or offsets[end_index] > end_offset:
end_index -= 1
# chop the tokens to only include the ones we're interested in
ntokens = tokens[start_index:end_index+1]
# chop the first token if it extends out of the screen
if offsets[start_index] < start_offset:
start_skip = start_offset - offsets[start_index]
ttype, tvalue = ntokens[0]
ntokens[0] = (ttype, tvalue[start_skip:])
# chop the last token if it extens out of the screen
if offsets[end_index] > end_offset:
end_skip = offsets[end_index] - end_offset
ttype, tvalue = ntokens[-1]
ntokens[-1] = (ttype, tvalue[:-end_skip])
# NOTE: I'm pretty sure there's an edge case here involving very long
# tokens, but I can't recreate it right now..
return ntokens
|
mit
|
mozilla/kuma
|
kuma/api/management/commands/unpublish.py
|
1
|
1264
|
"""
Manually schedule the removal of one or more documents from the document API.
"""
from collections import namedtuple
from django.core.management.base import BaseCommand, CommandError
from kuma.api.tasks import unpublish
class Command(BaseCommand):
args = '<document_path document_path ...>'
help = 'Remove one or more documents from the document API'
def add_arguments(self, parser):
parser.add_argument(
'paths',
help='Path to document(s), like /en-US/docs/Web',
nargs='*',
metavar='path')
def handle(self, *args, **options):
Logger = namedtuple('Logger', 'info, error')
log = Logger(info=self.stdout.write, error=self.stderr.write)
paths = options['paths']
if not paths:
raise CommandError('Need at least one document path to remove')
doc_locale_slug_pairs = []
for path in paths:
if path.startswith('/'):
path = path[1:]
locale, sep, slug = path.partition('/')
head, sep, tail = slug.partition('/')
if head == 'docs':
slug = tail
doc_locale_slug_pairs.append((locale, slug))
unpublish(doc_locale_slug_pairs, log=log)
|
mpl-2.0
|
landism/pants
|
tests/python/pants_test/backend/python/test_interpreter_cache.py
|
5
|
5652
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from contextlib import contextmanager
import mock
from pex.package import EggPackage, Package, SourcePackage
from pex.resolver import Unsatisfiable, resolve
from pants.backend.python.interpreter_cache import PythonInterpreter, PythonInterpreterCache
from pants.backend.python.subsystems.python_setup import PythonSetup
from pants.python.python_repos import PythonRepos
from pants.util.contextutil import temporary_dir
from pants_test.base_test import BaseTest
class TestInterpreterCache(BaseTest):
@staticmethod
def _make_bad_requirement(requirement):
"""Turns a requirement that passes into one we know will fail.
E.g. 'CPython==2.7.5' becomes 'CPython==99.7.5'
"""
return str(requirement).replace('==2.', '==99.')
def setUp(self):
super(TestInterpreterCache, self).setUp()
self._interpreter = PythonInterpreter.get()
@contextmanager
def _setup_test(self, constraints=None):
mock_setup = mock.MagicMock().return_value
type(mock_setup).interpreter_constraints = mock.PropertyMock(return_value=constraints)
with temporary_dir() as path:
mock_setup.interpreter_cache_dir = path
cache = PythonInterpreterCache(mock_setup, mock.MagicMock())
def set_interpreters(_):
cache._interpreters.add(self._interpreter)
cache._setup_cached = mock.Mock(side_effect=set_interpreters)
cache._setup_paths = mock.Mock()
yield cache, path
def _do_test(self, constraints, filters, expected):
with self._setup_test(constraints) as (cache, _):
self.assertEqual(cache.setup(filters=filters), expected)
def test_cache_setup_with_no_filters_uses_repo_default_excluded(self):
self._do_test([self._make_bad_requirement(self._interpreter.identity.requirement)], [], [])
def test_cache_setup_with_no_filters_uses_repo_default(self):
self._do_test((b'',), [], [self._interpreter])
def test_cache_setup_with_filter_overrides_repo_default(self):
self._do_test([self._make_bad_requirement(self._interpreter.identity.requirement)],
(str(self._interpreter.identity.requirement), ),
[self._interpreter])
def test_setup_using_eggs(self):
def link_egg(repo_root, requirement):
existing_dist_location = self._interpreter.get_location(requirement)
if existing_dist_location is not None:
existing_dist = Package.from_href(existing_dist_location)
requirement = '{}=={}'.format(existing_dist.name, existing_dist.raw_version)
distributions = resolve([requirement],
interpreter=self._interpreter,
precedence=(EggPackage, SourcePackage))
self.assertEqual(1, len(distributions))
dist_location = distributions[0].location
self.assertRegexpMatches(dist_location, r'\.egg$')
os.symlink(dist_location, os.path.join(repo_root, os.path.basename(dist_location)))
return Package.from_href(dist_location).raw_version
with temporary_dir() as root:
egg_dir = os.path.join(root, 'eggs')
os.makedirs(egg_dir)
setuptools_version = link_egg(egg_dir, 'setuptools')
wheel_version = link_egg(egg_dir, 'wheel')
interpreter_requirement = self._interpreter.identity.requirement
self.context(for_subsystems=[PythonSetup, PythonRepos], options={
PythonSetup.options_scope: {
'interpreter_cache_dir': None,
'pants_workdir': os.path.join(root, 'workdir'),
'constraints': [interpreter_requirement],
'setuptools_version': setuptools_version,
'wheel_version': wheel_version,
},
PythonRepos.options_scope: {
'indexes': [],
'repos': [egg_dir],
}
})
cache = PythonInterpreterCache(PythonSetup.global_instance(), PythonRepos.global_instance())
interpereters = cache.setup(paths=[os.path.dirname(self._interpreter.binary)],
filters=[str(interpreter_requirement)])
self.assertGreater(len(interpereters), 0)
def assert_egg_extra(interpreter, name, version):
location = interpreter.get_location('{}=={}'.format(name, version))
self.assertIsNotNone(location)
self.assertIsInstance(Package.from_href(location), EggPackage)
for interpreter in interpereters:
assert_egg_extra(interpreter, 'setuptools', setuptools_version)
assert_egg_extra(interpreter, 'wheel', wheel_version)
def test_setup_resolve_failure_cleanup(self):
"""Simulates a resolution failure during interpreter setup to avoid partial interpreter caching.
See https://github.com/pantsbuild/pants/issues/2038 for more info.
"""
with mock.patch.object(PythonInterpreterCache, '_resolve') as mock_resolve, \
self._setup_test() as (cache, cache_path):
mock_resolve.side_effect = Unsatisfiable('nope')
with self.assertRaises(Unsatisfiable):
cache._setup_interpreter(self._interpreter, os.path.join(cache_path, 'CPython-2.7.11'))
# Before the bugfix, the above call would leave behind paths in the tmpdir that looked like:
#
# /tmp/tmpUrCSzk/CPython-2.7.11.tmp.a167fc50834a4f00aa280780c3e1ba21
#
self.assertFalse('.tmp.' in ' '.join(os.listdir(cache_path)),
'interpreter cache path contains tmp dirs!')
|
apache-2.0
|
roessland/PRST
|
setup.py
|
1
|
1197
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup, Command, find_packages
class CleanCommand(Command):
"""Custom clean command to tidy up project root."""
# http://stackoverflow.com/questions/3779915/
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system("rm -vrf ./build ./dist ./*.pyc ./*.egg-info MANIFEST coverage.xml Thumbs.db")
# Read file in this dir to string
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "PRST",
#packages = ["prst"],
packages = find_packages(),
version = "0.0.3",
description = "Python Reservoir Simulation Toolbox",
long_description = read("README.md"),
license = "GPLv3",
author = "Andreas Røssland",
author_email = "andreas.roessland@gmail.com",
url = "https://github.com/roessland/PRST",
download_url = "https://github.com/roessland/PRST/tarball/0.0.3",
keywords = ["MRST", "reservoir", "simulation", "PDEs"],
classifiers = [
"Development Status :: 2 - Pre-Alpha",
],
cmdclass = {
"clean": CleanCommand,
}
)
|
gpl-3.0
|
ritchyteam/odoo
|
addons/account_test/account_test.py
|
342
|
2169
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2005-2006 TINY SPRL. (http://tiny.be) All Rights Reserved.
#
# $Id: product_expiry.py 4304 2006-10-25 09:54:51Z ged $
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
from openerp.osv import fields, osv
CODE_EXEC_DEFAULT = '''\
res = []
cr.execute("select id, code from account_journal")
for record in cr.dictfetchall():
res.append(record['code'])
result = res
'''
class accounting_assert_test(osv.osv):
_name = "accounting.assert.test"
_order = "sequence"
_columns = {
'name': fields.char('Test Name', required=True, select=True, translate=True),
'desc': fields.text('Test Description', select=True, translate=True),
'code_exec': fields.text('Python code', required=True),
'active': fields.boolean('Active'),
'sequence': fields.integer('Sequence'),
}
_defaults = {
'code_exec': CODE_EXEC_DEFAULT,
'active': True,
'sequence': 10,
}
|
agpl-3.0
|
mars-knowsnothing/amos-bot
|
src/Lib/site-packages/pip/_vendor/requests/api.py
|
362
|
5794
|
# -*- coding: utf-8 -*-
"""
requests.api
~~~~~~~~~~~~
This module implements the Requests API.
:copyright: (c) 2012 by Kenneth Reitz.
:license: Apache2, see LICENSE for more details.
"""
from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload.
``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')``
or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string
defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers
to add for the file.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send data
before giving up, as a float, or a :ref:`(connect timeout, read
timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) whether the SSL cert will be verified. A CA_BUNDLE path can also be provided. Defaults to ``True``.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
:return: :class:`Response <Response>` object
:rtype: requests.Response
Usage::
>>> import requests
>>> req = requests.request('GET', 'http://httpbin.org/get')
<Response [200]>
"""
# By using the 'with' statement we are sure the session is closed, thus we
# avoid leaving sockets open which can trigger a ResourceWarning in some
# cases, and look like a memory leak in others.
with sessions.Session() as session:
return session.request(method=method, url=url, **kwargs)
def get(url, params=None, **kwargs):
"""Sends a GET request.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, params=params, **kwargs)
def options(url, **kwargs):
"""Sends a OPTIONS request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('options', url, **kwargs)
def head(url, **kwargs):
"""Sends a HEAD request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', False)
return request('head', url, **kwargs)
def post(url, data=None, json=None, **kwargs):
"""Sends a POST request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('post', url, data=data, json=json, **kwargs)
def put(url, data=None, **kwargs):
"""Sends a PUT request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
"""Sends a PATCH request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
"""Sends a DELETE request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('delete', url, **kwargs)
|
gpl-3.0
|
rdeheele/odoo
|
addons/account_followup/account_followup.py
|
93
|
28777
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import api
from openerp.osv import fields, osv
from lxml import etree
from openerp.tools.translate import _
class followup(osv.osv):
_name = 'account_followup.followup'
_description = 'Account Follow-up'
_rec_name = 'name'
_columns = {
'followup_line': fields.one2many('account_followup.followup.line', 'followup_id', 'Follow-up', copy=True),
'company_id': fields.many2one('res.company', 'Company', required=True),
'name': fields.related('company_id', 'name', string = "Name", readonly=True, type="char"),
}
_defaults = {
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'account_followup.followup', context=c),
}
_sql_constraints = [('company_uniq', 'unique(company_id)', 'Only one follow-up per company is allowed')]
class followup_line(osv.osv):
def _get_default_template(self, cr, uid, ids, context=None):
try:
return self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account_followup', 'email_template_account_followup_default')[1]
except ValueError:
return False
_name = 'account_followup.followup.line'
_description = 'Follow-up Criteria'
_columns = {
'name': fields.char('Follow-Up Action', required=True),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of follow-up lines."),
'delay': fields.integer('Due Days', help="The number of days after the due date of the invoice to wait before sending the reminder. Could be negative if you want to send a polite alert beforehand.", required=True),
'followup_id': fields.many2one('account_followup.followup', 'Follow Ups', required=True, ondelete="cascade"),
'description': fields.text('Printed Message', translate=True),
'send_email':fields.boolean('Send an Email', help="When processing, it will send an email"),
'send_letter':fields.boolean('Send a Letter', help="When processing, it will print a letter"),
'manual_action':fields.boolean('Manual Action', help="When processing, it will set the manual action to be taken for that customer. "),
'manual_action_note':fields.text('Action To Do', placeholder="e.g. Give a phone call, check with others , ..."),
'manual_action_responsible_id':fields.many2one('res.users', 'Assign a Responsible', ondelete='set null'),
'email_template_id':fields.many2one('email.template', 'Email Template', ondelete='set null'),
}
_order = 'delay'
_sql_constraints = [('days_uniq', 'unique(followup_id, delay)', 'Days of the follow-up levels must be different')]
_defaults = {
'send_email': True,
'send_letter': True,
'manual_action':False,
'description': """
Dear %(partner_name)s,
Exception made if there was a mistake of ours, it seems that the following amount stays unpaid. Please, take appropriate measures in order to carry out this payment in the next 8 days.
Would your payment have been carried out after this mail was sent, please ignore this message. Do not hesitate to contact our accounting department.
Best Regards,
""",
'email_template_id': _get_default_template,
}
def _check_description(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context=context):
if line.description:
try:
line.description % {'partner_name': '', 'date':'', 'user_signature': '', 'company_name': ''}
except:
return False
return True
_constraints = [
(_check_description, 'Your description is invalid, use the right legend or %% if you want to use the percent character.', ['description']),
]
class account_move_line(osv.osv):
def _get_result(self, cr, uid, ids, name, arg, context=None):
res = {}
for aml in self.browse(cr, uid, ids, context=context):
res[aml.id] = aml.debit - aml.credit
return res
_inherit = 'account.move.line'
_columns = {
'followup_line_id': fields.many2one('account_followup.followup.line', 'Follow-up Level',
ondelete='restrict'), #restrict deletion of the followup line
'followup_date': fields.date('Latest Follow-up', select=True),
'result':fields.function(_get_result, type='float', method=True,
string="Balance") #'balance' field is not the same
}
class res_partner(osv.osv):
def fields_view_get(self, cr, uid, view_id=None, view_type=None, context=None, toolbar=False, submenu=False):
res = super(res_partner, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context,
toolbar=toolbar, submenu=submenu)
context = context or {}
if view_type == 'form' and context.get('Followupfirst'):
doc = etree.XML(res['arch'], parser=None, base_url=None)
first_node = doc.xpath("//page[@name='followup_tab']")
root = first_node[0].getparent()
root.insert(0, first_node[0])
res['arch'] = etree.tostring(doc, encoding="utf-8")
return res
def _get_latest(self, cr, uid, ids, names, arg, context=None, company_id=None):
res={}
if company_id == None:
company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id
else:
company = self.pool.get('res.company').browse(cr, uid, company_id, context=context)
for partner in self.browse(cr, uid, ids, context=context):
amls = partner.unreconciled_aml_ids
latest_date = False
latest_level = False
latest_days = False
latest_level_without_lit = False
latest_days_without_lit = False
for aml in amls:
if (aml.company_id == company) and (aml.followup_line_id != False) and (not latest_days or latest_days < aml.followup_line_id.delay):
latest_days = aml.followup_line_id.delay
latest_level = aml.followup_line_id.id
if (aml.company_id == company) and (not latest_date or latest_date < aml.followup_date):
latest_date = aml.followup_date
if (aml.company_id == company) and (aml.blocked == False) and (aml.followup_line_id != False and
(not latest_days_without_lit or latest_days_without_lit < aml.followup_line_id.delay)):
latest_days_without_lit = aml.followup_line_id.delay
latest_level_without_lit = aml.followup_line_id.id
res[partner.id] = {'latest_followup_date': latest_date,
'latest_followup_level_id': latest_level,
'latest_followup_level_id_without_lit': latest_level_without_lit}
return res
@api.cr_uid_ids_context
def do_partner_manual_action(self, cr, uid, partner_ids, context=None):
#partner_ids -> res.partner
for partner in self.browse(cr, uid, partner_ids, context=context):
#Check action: check if the action was not empty, if not add
action_text= ""
if partner.payment_next_action:
action_text = (partner.payment_next_action or '') + "\n" + (partner.latest_followup_level_id_without_lit.manual_action_note or '')
else:
action_text = partner.latest_followup_level_id_without_lit.manual_action_note or ''
#Check date: only change when it did not exist already
action_date = partner.payment_next_action_date or fields.date.context_today(self, cr, uid, context=context)
# Check responsible: if partner has not got a responsible already, take from follow-up
responsible_id = False
if partner.payment_responsible_id:
responsible_id = partner.payment_responsible_id.id
else:
p = partner.latest_followup_level_id_without_lit.manual_action_responsible_id
responsible_id = p and p.id or False
self.write(cr, uid, [partner.id], {'payment_next_action_date': action_date,
'payment_next_action': action_text,
'payment_responsible_id': responsible_id})
def do_partner_print(self, cr, uid, wizard_partner_ids, data, context=None):
#wizard_partner_ids are ids from special view, not from res.partner
if not wizard_partner_ids:
return {}
data['partner_ids'] = wizard_partner_ids
datas = {
'ids': wizard_partner_ids,
'model': 'account_followup.followup',
'form': data
}
return self.pool['report'].get_action(cr, uid, [], 'account_followup.report_followup', data=datas, context=context)
@api.cr_uid_ids_context
def do_partner_mail(self, cr, uid, partner_ids, context=None):
if context is None:
context = {}
ctx = context.copy()
ctx['followup'] = True
#partner_ids are res.partner ids
# If not defined by latest follow-up level, it will be the default template if it can find it
mtp = self.pool.get('email.template')
unknown_mails = 0
for partner in self.browse(cr, uid, partner_ids, context=ctx):
if partner.email and partner.email.strip():
level = partner.latest_followup_level_id_without_lit
if level and level.send_email and level.email_template_id and level.email_template_id.id:
mtp.send_mail(cr, uid, level.email_template_id.id, partner.id, context=ctx)
else:
mail_template_id = self.pool.get('ir.model.data').get_object_reference(cr, uid,
'account_followup', 'email_template_account_followup_default')
mtp.send_mail(cr, uid, mail_template_id[1], partner.id, context=ctx)
else:
unknown_mails = unknown_mails + 1
action_text = _("Email not sent because of email address of partner not filled in")
if partner.payment_next_action_date:
payment_action_date = min(fields.date.context_today(self, cr, uid, context=ctx), partner.payment_next_action_date)
else:
payment_action_date = fields.date.context_today(self, cr, uid, context=ctx)
if partner.payment_next_action:
payment_next_action = partner.payment_next_action + " \n " + action_text
else:
payment_next_action = action_text
self.write(cr, uid, [partner.id], {'payment_next_action_date': payment_action_date,
'payment_next_action': payment_next_action}, context=ctx)
return unknown_mails
def get_followup_table_html(self, cr, uid, ids, context=None):
""" Build the html tables to be included in emails send to partners,
when reminding them their overdue invoices.
:param ids: [id] of the partner for whom we are building the tables
:rtype: string
"""
from report import account_followup_print
assert len(ids) == 1
if context is None:
context = {}
partner = self.browse(cr, uid, ids[0], context=context)
#copy the context to not change global context. Overwrite it because _() looks for the lang in local variable 'context'.
#Set the language to use = the partner language
context = dict(context, lang=partner.lang)
followup_table = ''
if partner.unreconciled_aml_ids:
company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id
current_date = fields.date.context_today(self, cr, uid, context=context)
rml_parse = account_followup_print.report_rappel(cr, uid, "followup_rml_parser")
final_res = rml_parse._lines_get_with_partner(partner, company.id)
for currency_dict in final_res:
currency = currency_dict.get('line', [{'currency_id': company.currency_id}])[0]['currency_id']
followup_table += '''
<table border="2" width=100%%>
<tr>
<td>''' + _("Invoice Date") + '''</td>
<td>''' + _("Description") + '''</td>
<td>''' + _("Reference") + '''</td>
<td>''' + _("Due Date") + '''</td>
<td>''' + _("Amount") + " (%s)" % (currency.symbol) + '''</td>
<td>''' + _("Lit.") + '''</td>
</tr>
'''
total = 0
for aml in currency_dict['line']:
block = aml['blocked'] and 'X' or ' '
total += aml['balance']
strbegin = "<TD>"
strend = "</TD>"
date = aml['date_maturity'] or aml['date']
if date <= current_date and aml['balance'] > 0:
strbegin = "<TD><B>"
strend = "</B></TD>"
followup_table +="<TR>" + strbegin + str(aml['date']) + strend + strbegin + aml['name'] + strend + strbegin + (aml['ref'] or '') + strend + strbegin + str(date) + strend + strbegin + str(aml['balance']) + strend + strbegin + block + strend + "</TR>"
total = reduce(lambda x, y: x+y['balance'], currency_dict['line'], 0.00)
total = rml_parse.formatLang(total, dp='Account', currency_obj=currency)
followup_table += '''<tr> </tr>
</table>
<center>''' + _("Amount due") + ''' : %s </center>''' % (total)
return followup_table
def write(self, cr, uid, ids, vals, context=None):
if vals.get("payment_responsible_id", False):
for part in self.browse(cr, uid, ids, context=context):
if part.payment_responsible_id <> vals["payment_responsible_id"]:
#Find partner_id of user put as responsible
responsible_partner_id = self.pool.get("res.users").browse(cr, uid, vals['payment_responsible_id'], context=context).partner_id.id
self.pool.get("mail.thread").message_post(cr, uid, 0,
body = _("You became responsible to do the next action for the payment follow-up of") + " <b><a href='#id=" + str(part.id) + "&view_type=form&model=res.partner'> " + part.name + " </a></b>",
type = 'comment',
subtype = "mail.mt_comment", context = context,
model = 'res.partner', res_id = part.id,
partner_ids = [responsible_partner_id])
return super(res_partner, self).write(cr, uid, ids, vals, context=context)
def action_done(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'payment_next_action_date': False, 'payment_next_action':'', 'payment_responsible_id': False}, context=context)
def do_button_print(self, cr, uid, ids, context=None):
assert(len(ids) == 1)
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
#search if the partner has accounting entries to print. If not, it may not be present in the
#psql view the report is based on, so we need to stop the user here.
if not self.pool.get('account.move.line').search(cr, uid, [
('partner_id', '=', ids[0]),
('account_id.type', '=', 'receivable'),
('reconcile_id', '=', False),
('state', '!=', 'draft'),
('company_id', '=', company_id),
], context=context):
raise osv.except_osv(_('Error!'),_("The partner does not have any accounting entries to print in the overdue report for the current company."))
self.message_post(cr, uid, [ids[0]], body=_('Printed overdue payments report'), context=context)
#build the id of this partner in the psql view. Could be replaced by a search with [('company_id', '=', company_id),('partner_id', '=', ids[0])]
wizard_partner_ids = [ids[0] * 10000 + company_id]
followup_ids = self.pool.get('account_followup.followup').search(cr, uid, [('company_id', '=', company_id)], context=context)
if not followup_ids:
raise osv.except_osv(_('Error!'),_("There is no followup plan defined for the current company."))
data = {
'date': fields.date.today(),
'followup_id': followup_ids[0],
}
#call the print overdue report on this partner
return self.do_partner_print(cr, uid, wizard_partner_ids, data, context=context)
def _get_amounts_and_date(self, cr, uid, ids, name, arg, context=None):
'''
Function that computes values for the followup functional fields. Note that 'payment_amount_due'
is similar to 'credit' field on res.partner except it filters on user's company.
'''
res = {}
company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id
current_date = fields.date.context_today(self, cr, uid, context=context)
for partner in self.browse(cr, uid, ids, context=context):
worst_due_date = False
amount_due = amount_overdue = 0.0
for aml in partner.unreconciled_aml_ids:
if (aml.company_id == company):
date_maturity = aml.date_maturity or aml.date
if not worst_due_date or date_maturity < worst_due_date:
worst_due_date = date_maturity
amount_due += aml.result
if (date_maturity <= current_date):
amount_overdue += aml.result
res[partner.id] = {'payment_amount_due': amount_due,
'payment_amount_overdue': amount_overdue,
'payment_earliest_due_date': worst_due_date}
return res
def _get_followup_overdue_query(self, cr, uid, args, overdue_only=False, context=None):
'''
This function is used to build the query and arguments to use when making a search on functional fields
* payment_amount_due
* payment_amount_overdue
Basically, the query is exactly the same except that for overdue there is an extra clause in the WHERE.
:param args: arguments given to the search in the usual domain notation (list of tuples)
:param overdue_only: option to add the extra argument to filter on overdue accounting entries or not
:returns: a tuple with
* the query to execute as first element
* the arguments for the execution of this query
:rtype: (string, [])
'''
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
having_where_clause = ' AND '.join(map(lambda x: '(SUM(bal2) %s %%s)' % (x[1]), args))
having_values = [x[2] for x in args]
query = self.pool.get('account.move.line')._query_get(cr, uid, context=context)
overdue_only_str = overdue_only and 'AND date_maturity <= NOW()' or ''
return ('''SELECT pid AS partner_id, SUM(bal2) FROM
(SELECT CASE WHEN bal IS NOT NULL THEN bal
ELSE 0.0 END AS bal2, p.id as pid FROM
(SELECT (debit-credit) AS bal, partner_id
FROM account_move_line l
WHERE account_id IN
(SELECT id FROM account_account
WHERE type=\'receivable\' AND active)
''' + overdue_only_str + '''
AND reconcile_id IS NULL
AND company_id = %s
AND ''' + query + ''') AS l
RIGHT JOIN res_partner p
ON p.id = partner_id ) AS pl
GROUP BY pid HAVING ''' + having_where_clause, [company_id] + having_values)
def _payment_overdue_search(self, cr, uid, obj, name, args, context=None):
if not args:
return []
query, query_args = self._get_followup_overdue_query(cr, uid, args, overdue_only=True, context=context)
cr.execute(query, query_args)
res = cr.fetchall()
if not res:
return [('id','=','0')]
return [('id','in', [x[0] for x in res])]
def _payment_earliest_date_search(self, cr, uid, obj, name, args, context=None):
if not args:
return []
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
having_where_clause = ' AND '.join(map(lambda x: '(MIN(l.date_maturity) %s %%s)' % (x[1]), args))
having_values = [x[2] for x in args]
query = self.pool.get('account.move.line')._query_get(cr, uid, context=context)
cr.execute('SELECT partner_id FROM account_move_line l '\
'WHERE account_id IN '\
'(SELECT id FROM account_account '\
'WHERE type=\'receivable\' AND active) '\
'AND l.company_id = %s '
'AND reconcile_id IS NULL '\
'AND '+query+' '\
'AND partner_id IS NOT NULL '\
'GROUP BY partner_id HAVING '+ having_where_clause,
[company_id] + having_values)
res = cr.fetchall()
if not res:
return [('id','=','0')]
return [('id','in', [x[0] for x in res])]
def _payment_due_search(self, cr, uid, obj, name, args, context=None):
if not args:
return []
query, query_args = self._get_followup_overdue_query(cr, uid, args, overdue_only=False, context=context)
cr.execute(query, query_args)
res = cr.fetchall()
if not res:
return [('id','=','0')]
return [('id','in', [x[0] for x in res])]
def _get_partners(self, cr, uid, ids, context=None):
#this function search for the partners linked to all account.move.line 'ids' that have been changed
partners = set()
for aml in self.browse(cr, uid, ids, context=context):
if aml.partner_id:
partners.add(aml.partner_id.id)
return list(partners)
_inherit = "res.partner"
_columns = {
'payment_responsible_id':fields.many2one('res.users', ondelete='set null', string='Follow-up Responsible',
help="Optionally you can assign a user to this field, which will make him responsible for the action.",
track_visibility="onchange", copy=False),
'payment_note':fields.text('Customer Payment Promise', help="Payment Note", track_visibility="onchange", copy=False),
'payment_next_action':fields.text('Next Action', copy=False,
help="This is the next action to be taken. It will automatically be set when the partner gets a follow-up level that requires a manual action. ",
track_visibility="onchange"),
'payment_next_action_date': fields.date('Next Action Date', copy=False,
help="This is when the manual follow-up is needed. "
"The date will be set to the current date when the partner "
"gets a follow-up level that requires a manual action. "
"Can be practical to set manually e.g. to see if he keeps "
"his promises."),
'unreconciled_aml_ids':fields.one2many('account.move.line', 'partner_id', domain=['&', ('reconcile_id', '=', False), '&',
('account_id.active','=', True), '&', ('account_id.type', '=', 'receivable'), ('state', '!=', 'draft')]),
'latest_followup_date':fields.function(_get_latest, method=True, type='date', string="Latest Follow-up Date",
help="Latest date that the follow-up level of the partner was changed",
store=False, multi="latest"),
'latest_followup_level_id':fields.function(_get_latest, method=True,
type='many2one', relation='account_followup.followup.line', string="Latest Follow-up Level",
help="The maximum follow-up level",
store={
'res.partner': (lambda self, cr, uid, ids, c: ids,[],10),
'account.move.line': (_get_partners, ['followup_line_id'], 10),
},
multi="latest"),
'latest_followup_level_id_without_lit':fields.function(_get_latest, method=True,
type='many2one', relation='account_followup.followup.line', string="Latest Follow-up Level without litigation",
help="The maximum follow-up level without taking into account the account move lines with litigation",
store={
'res.partner': (lambda self, cr, uid, ids, c: ids,[],10),
'account.move.line': (_get_partners, ['followup_line_id'], 10),
},
multi="latest"),
'payment_amount_due':fields.function(_get_amounts_and_date,
type='float', string="Amount Due",
store = False, multi="followup",
fnct_search=_payment_due_search),
'payment_amount_overdue':fields.function(_get_amounts_and_date,
type='float', string="Amount Overdue",
store = False, multi="followup",
fnct_search = _payment_overdue_search),
'payment_earliest_due_date':fields.function(_get_amounts_and_date,
type='date',
string = "Worst Due Date",
multi="followup",
fnct_search=_payment_earliest_date_search),
}
class account_config_settings(osv.TransientModel):
_name = 'account.config.settings'
_inherit = 'account.config.settings'
def open_followup_level_form(self, cr, uid, ids, context=None):
res_ids = self.pool.get('account_followup.followup').search(cr, uid, [], context=context)
return {
'type': 'ir.actions.act_window',
'name': 'Payment Follow-ups',
'res_model': 'account_followup.followup',
'res_id': res_ids and res_ids[0] or False,
'view_mode': 'form,tree',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
alu042/edx-platform
|
openedx/core/djangoapps/content/course_overviews/management/commands/generate_course_overview.py
|
7
|
1608
|
"""
Command to load course overviews.
"""
import logging
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from xmodule.modulestore.django import modulestore
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
log = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Example usage:
$ ./manage.py lms generate_course_overview --all --settings=devstack
$ ./manage.py lms generate_course_overview 'edX/DemoX/Demo_Course' --settings=devstack
"""
args = '<course_id course_id ...>'
help = 'Generates and stores course overview for one or more courses.'
def add_arguments(self, parser):
"""
Add arguments to the command parser.
"""
parser.add_argument(
'--all',
action='store_true',
dest='all',
default=False,
help='Generate course overview for all courses.',
)
def handle(self, *args, **options):
if options['all']:
course_keys = [course.id for course in modulestore().get_course_summaries()]
else:
if len(args) < 1:
raise CommandError('At least one course or --all must be specified.')
try:
course_keys = [CourseKey.from_string(arg) for arg in args]
except InvalidKeyError:
raise CommandError('Invalid key specified.')
CourseOverview.get_select_courses(course_keys)
|
agpl-3.0
|
woobe/h2o
|
py/test_junit.py
|
2
|
1458
|
import unittest, time, sys
import h2o
class TestJUnit(unittest.TestCase):
def test_A_all_junit(self):
try:
h2o.build_cloud(node_count=2, java_heap_GB=3)
# we don't have the port or ip configuration here
# that util/h2o.py does? Keep this in synch with spawn_h2o there.
# also don't have --nosigar here?
(ps, stdout, stderr) = h2o.spawn_cmd('junit', [
'java',
'-Xms3G',
'-Xmx3G',
'-Dh2o.arg.ice_root='+h2o.tmp_dir('ice.'),
'-Dh2o.arg.name='+h2o.cloud_name(),
'-Dh2o.arg.ip='+h2o.get_ip_address(),
'-Dh2o.arg.port=54666',
'-ea', '-jar', h2o.find_file('target/h2o.jar'),
'-mainClass', 'org.junit.runner.JUnitCore',
# The all test suite
'water.suites.AllTestsSuite'
])
rc = ps.wait(None)
out = file(stdout).read()
err = file(stderr).read()
if rc is None:
ps.terminate()
raise Exception("junit timed out.\nstdout:\n%s\n\nstderr:\n%s" % (out, err))
elif rc != 0:
raise Exception("junit failed.\nstdout:\n%s\n\nstderr:\n%s" % (out, err))
finally:
h2o.tear_down_cloud()
if __name__ == '__main__':
h2o.unit_main()
|
apache-2.0
|
MoritzS/django
|
tests/auth_tests/client.py
|
85
|
1414
|
import re
from django.contrib.auth.views import (
INTERNAL_RESET_SESSION_TOKEN, INTERNAL_RESET_URL_TOKEN,
)
from django.test import Client
def extract_token_from_url(url):
token_search = re.search(r'/reset/.*/(.+?)/', url)
if token_search:
return token_search.group(1)
class PasswordResetConfirmClient(Client):
"""
This client eases testing the password reset flow by emulating the
PasswordResetConfirmView's redirect and saving of the reset token in the
user's session. This request puts 'my-token' in the session and redirects
to '/reset/bla/set-password/':
>>> client = PasswordResetConfirmClient()
>>> client.get('/reset/bla/my-token/')
"""
def _get_password_reset_confirm_redirect_url(self, url):
token = extract_token_from_url(url)
if not token:
return url
# Add the token to the session
session = self.session
session[INTERNAL_RESET_SESSION_TOKEN] = token
session.save()
return url.replace(token, INTERNAL_RESET_URL_TOKEN)
def get(self, path, *args, **kwargs):
redirect_url = self._get_password_reset_confirm_redirect_url(path)
return super().get(redirect_url, *args, **kwargs)
def post(self, path, *args, **kwargs):
redirect_url = self._get_password_reset_confirm_redirect_url(path)
return super().post(redirect_url, *args, **kwargs)
|
bsd-3-clause
|
TangHao1987/intellij-community
|
python/lib/Lib/quopri.py
|
424
|
6969
|
#! /usr/bin/env python
"""Conversions to/from quoted-printable transport encoding as per RFC 1521."""
# (Dec 1991 version).
__all__ = ["encode", "decode", "encodestring", "decodestring"]
ESCAPE = '='
MAXLINESIZE = 76
HEX = '0123456789ABCDEF'
EMPTYSTRING = ''
try:
from binascii import a2b_qp, b2a_qp
except ImportError:
a2b_qp = None
b2a_qp = None
def needsquoting(c, quotetabs, header):
"""Decide whether a particular character needs to be quoted.
The 'quotetabs' flag indicates whether embedded tabs and spaces should be
quoted. Note that line-ending tabs and spaces are always encoded, as per
RFC 1521.
"""
if c in ' \t':
return quotetabs
# if header, we have to escape _ because _ is used to escape space
if c == '_':
return header
return c == ESCAPE or not (' ' <= c <= '~')
def quote(c):
"""Quote a single character."""
i = ord(c)
return ESCAPE + HEX[i//16] + HEX[i%16]
def encode(input, output, quotetabs, header = 0):
"""Read 'input', apply quoted-printable encoding, and write to 'output'.
'input' and 'output' are files with readline() and write() methods.
The 'quotetabs' flag indicates whether embedded tabs and spaces should be
quoted. Note that line-ending tabs and spaces are always encoded, as per
RFC 1521.
The 'header' flag indicates whether we are encoding spaces as _ as per
RFC 1522.
"""
if b2a_qp is not None:
data = input.read()
odata = b2a_qp(data, quotetabs = quotetabs, header = header)
output.write(odata)
return
def write(s, output=output, lineEnd='\n'):
# RFC 1521 requires that the line ending in a space or tab must have
# that trailing character encoded.
if s and s[-1:] in ' \t':
output.write(s[:-1] + quote(s[-1]) + lineEnd)
elif s == '.':
output.write(quote(s) + lineEnd)
else:
output.write(s + lineEnd)
prevline = None
while 1:
line = input.readline()
if not line:
break
outline = []
# Strip off any readline induced trailing newline
stripped = ''
if line[-1:] == '\n':
line = line[:-1]
stripped = '\n'
# Calculate the un-length-limited encoded line
for c in line:
if needsquoting(c, quotetabs, header):
c = quote(c)
if header and c == ' ':
outline.append('_')
else:
outline.append(c)
# First, write out the previous line
if prevline is not None:
write(prevline)
# Now see if we need any soft line breaks because of RFC-imposed
# length limitations. Then do the thisline->prevline dance.
thisline = EMPTYSTRING.join(outline)
while len(thisline) > MAXLINESIZE:
# Don't forget to include the soft line break `=' sign in the
# length calculation!
write(thisline[:MAXLINESIZE-1], lineEnd='=\n')
thisline = thisline[MAXLINESIZE-1:]
# Write out the current line
prevline = thisline
# Write out the last line, without a trailing newline
if prevline is not None:
write(prevline, lineEnd=stripped)
def encodestring(s, quotetabs = 0, header = 0):
if b2a_qp is not None:
return b2a_qp(s, quotetabs = quotetabs, header = header)
from cStringIO import StringIO
infp = StringIO(s)
outfp = StringIO()
encode(infp, outfp, quotetabs, header)
return outfp.getvalue()
def decode(input, output, header = 0):
"""Read 'input', apply quoted-printable decoding, and write to 'output'.
'input' and 'output' are files with readline() and write() methods.
If 'header' is true, decode underscore as space (per RFC 1522)."""
if a2b_qp is not None:
data = input.read()
odata = a2b_qp(data, header = header)
output.write(odata)
return
new = ''
while 1:
line = input.readline()
if not line: break
i, n = 0, len(line)
if n > 0 and line[n-1] == '\n':
partial = 0; n = n-1
# Strip trailing whitespace
while n > 0 and line[n-1] in " \t\r":
n = n-1
else:
partial = 1
while i < n:
c = line[i]
if c == '_' and header:
new = new + ' '; i = i+1
elif c != ESCAPE:
new = new + c; i = i+1
elif i+1 == n and not partial:
partial = 1; break
elif i+1 < n and line[i+1] == ESCAPE:
new = new + ESCAPE; i = i+2
elif i+2 < n and ishex(line[i+1]) and ishex(line[i+2]):
new = new + chr(unhex(line[i+1:i+3])); i = i+3
else: # Bad escape sequence -- leave it in
new = new + c; i = i+1
if not partial:
output.write(new + '\n')
new = ''
if new:
output.write(new)
def decodestring(s, header = 0):
if a2b_qp is not None:
return a2b_qp(s, header = header)
from cStringIO import StringIO
infp = StringIO(s)
outfp = StringIO()
decode(infp, outfp, header = header)
return outfp.getvalue()
# Other helper functions
def ishex(c):
"""Return true if the character 'c' is a hexadecimal digit."""
return '0' <= c <= '9' or 'a' <= c <= 'f' or 'A' <= c <= 'F'
def unhex(s):
"""Get the integer value of a hexadecimal number."""
bits = 0
for c in s:
if '0' <= c <= '9':
i = ord('0')
elif 'a' <= c <= 'f':
i = ord('a')-10
elif 'A' <= c <= 'F':
i = ord('A')-10
else:
break
bits = bits*16 + (ord(c) - i)
return bits
def main():
import sys
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'td')
except getopt.error, msg:
sys.stdout = sys.stderr
print msg
print "usage: quopri [-t | -d] [file] ..."
print "-t: quote tabs"
print "-d: decode; default encode"
sys.exit(2)
deco = 0
tabs = 0
for o, a in opts:
if o == '-t': tabs = 1
if o == '-d': deco = 1
if tabs and deco:
sys.stdout = sys.stderr
print "-t and -d are mutually exclusive"
sys.exit(2)
if not args: args = ['-']
sts = 0
for file in args:
if file == '-':
fp = sys.stdin
else:
try:
fp = open(file)
except IOError, msg:
sys.stderr.write("%s: can't open (%s)\n" % (file, msg))
sts = 1
continue
if deco:
decode(fp, sys.stdout)
else:
encode(fp, sys.stdout, tabs)
if fp is not sys.stdin:
fp.close()
if sts:
sys.exit(sts)
if __name__ == '__main__':
main()
|
apache-2.0
|
samfoo/servo
|
tests/wpt/css-tests/tools/wptserve/wptserve/pipes.py
|
180
|
13830
|
from cgi import escape
import gzip as gzip_module
import re
import time
import types
import uuid
from cStringIO import StringIO
def resolve_content(response):
rv = "".join(item for item in response.iter_content())
if type(rv) == unicode:
rv = rv.encode(response.encoding)
return rv
class Pipeline(object):
pipes = {}
def __init__(self, pipe_string):
self.pipe_functions = self.parse(pipe_string)
def parse(self, pipe_string):
functions = []
for item in PipeTokenizer().tokenize(pipe_string):
if not item:
break
if item[0] == "function":
functions.append((self.pipes[item[1]], []))
elif item[0] == "argument":
functions[-1][1].append(item[1])
return functions
def __call__(self, request, response):
for func, args in self.pipe_functions:
response = func(request, response, *args)
return response
class PipeTokenizer(object):
def __init__(self):
#This whole class can likely be replaced by some regexps
self.state = None
def tokenize(self, string):
self.string = string
self.state = self.func_name_state
self._index = 0
while self.state:
yield self.state()
yield None
def get_char(self):
if self._index >= len(self.string):
return None
rv = self.string[self._index]
self._index += 1
return rv
def func_name_state(self):
rv = ""
while True:
char = self.get_char()
if char is None:
self.state = None
if rv:
return ("function", rv)
else:
return None
elif char == "(":
self.state = self.argument_state
return ("function", rv)
elif char == "|":
if rv:
return ("function", rv)
else:
rv += char
def argument_state(self):
rv = ""
while True:
char = self.get_char()
if char is None:
self.state = None
return ("argument", rv)
elif char == "\\":
rv += self.get_escape()
if rv is None:
#This should perhaps be an error instead
return ("argument", rv)
elif char == ",":
return ("argument", rv)
elif char == ")":
self.state = self.func_name_state
return ("argument", rv)
else:
rv += char
def get_escape(self):
char = self.get_char()
escapes = {"n": "\n",
"r": "\r",
"t": "\t"}
return escapes.get(char, char)
class pipe(object):
def __init__(self, *arg_converters):
self.arg_converters = arg_converters
self.max_args = len(self.arg_converters)
self.min_args = 0
opt_seen = False
for item in self.arg_converters:
if not opt_seen:
if isinstance(item, opt):
opt_seen = True
else:
self.min_args += 1
else:
if not isinstance(item, opt):
raise ValueError("Non-optional argument cannot follow optional argument")
def __call__(self, f):
def inner(request, response, *args):
if not (self.min_args <= len(args) <= self.max_args):
raise ValueError("Expected between %d and %d args, got %d" %
(self.min_args, self.max_args, len(args)))
arg_values = tuple(f(x) for f, x in zip(self.arg_converters, args))
return f(request, response, *arg_values)
Pipeline.pipes[f.__name__] = inner
#We actually want the undecorated function in the main namespace
return f
class opt(object):
def __init__(self, f):
self.f = f
def __call__(self, arg):
return self.f(arg)
def nullable(func):
def inner(arg):
if arg.lower() == "null":
return None
else:
return func(arg)
return inner
def boolean(arg):
if arg.lower() in ("true", "1"):
return True
elif arg.lower() in ("false", "0"):
return False
raise ValueError
@pipe(int)
def status(request, response, code):
"""Alter the status code.
:param code: Status code to use for the response."""
response.status = code
return response
@pipe(str, str, opt(boolean))
def header(request, response, name, value, append=False):
"""Set a HTTP header.
Replaces any existing HTTP header of the same name unless
append is set, in which case the header is appended without
replacement.
:param name: Name of the header to set.
:param value: Value to use for the header.
:param append: True if existing headers should not be replaced
"""
if not append:
response.headers.set(name, value)
else:
response.headers.append(name, value)
return response
@pipe(str)
def trickle(request, response, delays):
"""Send the response in parts, with time delays.
:param delays: A string of delays and amounts, in bytes, of the
response to send. Each component is separated by
a colon. Amounts in bytes are plain integers, whilst
delays are floats prefixed with a single d e.g.
d1:100:d2
Would cause a 1 second delay, would then send 100 bytes
of the file, and then cause a 2 second delay, before sending
the remainder of the file.
If the last token is of the form rN, instead of sending the
remainder of the file, the previous N instructions will be
repeated until the whole file has been sent e.g.
d1:100:d2:r2
Causes a delay of 1s, then 100 bytes to be sent, then a 2s delay
and then a further 100 bytes followed by a two second delay
until the response has been fully sent.
"""
def parse_delays():
parts = delays.split(":")
rv = []
for item in parts:
if item.startswith("d"):
item_type = "delay"
item = item[1:]
value = float(item)
elif item.startswith("r"):
item_type = "repeat"
value = int(item[1:])
if not value % 2 == 0:
raise ValueError
else:
item_type = "bytes"
value = int(item)
if len(rv) and rv[-1][0] == item_type:
rv[-1][1] += value
else:
rv.append((item_type, value))
return rv
delays = parse_delays()
if not delays:
return response
content = resolve_content(response)
modified_content = []
offset = [0]
def sleep(seconds):
def inner():
time.sleep(seconds)
return ""
return inner
def add_content(delays, repeat=False):
for i, (item_type, value) in enumerate(delays):
if item_type == "bytes":
modified_content.append(content[offset[0]:offset[0] + value])
offset[0] += value
elif item_type == "delay":
modified_content.append(sleep(value))
elif item_type == "repeat":
assert i == len(delays) - 1
while offset[0] < len(content):
add_content(delays[-(value + 1):-1], True)
if not repeat and offset[0] < len(content):
modified_content.append(content[offset[0]:])
add_content(delays)
response.content = modified_content
return response
@pipe(nullable(int), opt(nullable(int)))
def slice(request, response, start, end=None):
"""Send a byte range of the response body
:param start: The starting offset. Follows python semantics including
negative numbers.
:param end: The ending offset, again with python semantics and None
(spelled "null" in a query string) to indicate the end of
the file.
"""
content = resolve_content(response)
response.content = content[start:end]
return response
class ReplacementTokenizer(object):
def ident(scanner, token):
return ("ident", token)
def index(scanner, token):
token = token[1:-1]
try:
token = int(token)
except ValueError:
token = unicode(token, "utf8")
return ("index", token)
def var(scanner, token):
token = token[:-1]
return ("var", token)
def tokenize(self, string):
return self.scanner.scan(string)[0]
scanner = re.Scanner([(r"\$\w+:", var),
(r"\$?\w+(?:\(\))?", ident),
(r"\[[^\]]*\]", index)])
class FirstWrapper(object):
def __init__(self, params):
self.params = params
def __getitem__(self, key):
try:
return self.params.first(key)
except KeyError:
return ""
@pipe()
def sub(request, response):
"""Substitute environment information about the server and request into the script.
The format is a very limited template language. Substitutions are
enclosed by {{ and }}. There are several avaliable substitutions:
host
A simple string value and represents the primary host from which the
tests are being run.
domains
A dictionary of available domains indexed by subdomain name.
ports
A dictionary of lists of ports indexed by protocol.
location
A dictionary of parts of the request URL. Valid keys are
'server, 'scheme', 'host', 'hostname', 'port', 'path' and 'query'.
'server' is scheme://host:port, 'host' is hostname:port, and query
includes the leading '?', but other delimiters are omitted.
headers
A dictionary of HTTP headers in the request.
GET
A dictionary of query parameters supplied with the request.
uuid()
A pesudo-random UUID suitable for usage with stash
So for example in a setup running on localhost with a www
subdomain and a http server on ports 80 and 81::
{{host}} => localhost
{{domains[www]}} => www.localhost
{{ports[http][1]}} => 81
It is also possible to assign a value to a variable name, which must start with
the $ character, using the ":" syntax e.g.
{{$id:uuid()}
Later substitutions in the same file may then refer to the variable
by name e.g.
{{$id}}
"""
content = resolve_content(response)
new_content = template(request, content)
response.content = new_content
return response
def template(request, content):
#TODO: There basically isn't any error handling here
tokenizer = ReplacementTokenizer()
variables = {}
def config_replacement(match):
content, = match.groups()
tokens = tokenizer.tokenize(content)
if tokens[0][0] == "var":
variable = tokens[0][1]
tokens = tokens[1:]
else:
variable = None
assert tokens[0][0] == "ident" and all(item[0] == "index" for item in tokens[1:]), tokens
field = tokens[0][1]
if field in variables:
value = variables[field]
elif field == "headers":
value = request.headers
elif field == "GET":
value = FirstWrapper(request.GET)
elif field in request.server.config:
value = request.server.config[tokens[0][1]]
elif field == "location":
value = {"server": "%s://%s:%s" % (request.url_parts.scheme,
request.url_parts.hostname,
request.url_parts.port),
"scheme": request.url_parts.scheme,
"host": "%s:%s" % (request.url_parts.hostname,
request.url_parts.port),
"hostname": request.url_parts.hostname,
"port": request.url_parts.port,
"path": request.url_parts.path,
"query": "?%s" % request.url_parts.query}
elif field == "uuid()":
value = str(uuid.uuid4())
else:
raise Exception("Undefined template variable %s" % field)
for item in tokens[1:]:
value = value[item[1]]
assert isinstance(value, (int,) + types.StringTypes), tokens
if variable is not None:
variables[variable] = value
#Should possibly support escaping for other contexts e.g. script
#TODO: read the encoding of the response
return escape(unicode(value)).encode("utf-8")
template_regexp = re.compile(r"{{([^}]*)}}")
new_content, count = template_regexp.subn(config_replacement, content)
return new_content
@pipe()
def gzip(request, response):
"""This pipe gzip-encodes response data.
It sets (or overwrites) these HTTP headers:
Content-Encoding is set to gzip
Content-Length is set to the length of the compressed content
"""
content = resolve_content(response)
response.headers.set("Content-Encoding", "gzip")
out = StringIO()
with gzip_module.GzipFile(fileobj=out, mode="w") as f:
f.write(content)
response.content = out.getvalue()
response.headers.set("Content-Length", len(response.content))
return response
|
mpl-2.0
|
alexrao/YouCompleteMe
|
third_party/ycmd/third_party/requests/requests/packages/urllib3/contrib/ntlmpool.py
|
1010
|
4507
|
"""
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
from logging import getLogger
from ntlm import ntlm
from urllib3 import HTTPSConnectionPool
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = 'https'
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split('\\', 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s' %
(self.num_connections, self.host, self.authurl))
headers = {}
headers['Connection'] = 'Keep-Alive'
req_header = 'Authorization'
resp_header = 'www-authenticate'
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = (
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.getheaders())
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % reshdr)
log.debug('Response data: %s [...]' % res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(', ')
auth_header_value = None
for s in auth_header_values:
if s[:5] == 'NTLM ':
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception('Unexpected %s response header: %s' %
(resp_header, reshdr[resp_header]))
# Send authentication message
ServerChallenge, NegotiateFlags = \
ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
self.user,
self.domain,
self.pw,
NegotiateFlags)
headers[req_header] = 'NTLM %s' % auth_msg
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % dict(res.getheaders()))
log.debug('Response data: %s [...]' % res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception('Server rejected request: wrong '
'username or password')
raise Exception('Wrong server response: %s %s' %
(res.status, res.reason))
res.fp = None
log.debug('Connection established')
return conn
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True):
if headers is None:
headers = {}
headers['Connection'] = 'Keep-Alive'
return super(NTLMConnectionPool, self).urlopen(method, url, body,
headers, retries,
redirect,
assert_same_host)
|
gpl-3.0
|
benanne/theano-tutorial
|
3_logistic_regression.py
|
2
|
1394
|
import theano
import theano.tensor as T
import numpy as np
import matplotlib.pyplot as plt
plt.ion()
import load
# load data
x_train, t_train, x_test, t_test = load.cifar10(dtype=theano.config.floatX)
labels_test = np.argmax(t_test, axis=1)
# visualize data
plt.imshow(x_train[0].reshape(32, 32), cmap=plt.cm.gray)
# define symbolic Theano variables
x = T.matrix()
t = T.matrix()
# define model: logistic regression
def floatX(x):
return np.asarray(x, dtype=theano.config.floatX)
def init_weights(shape):
return theano.shared(floatX(np.random.randn(*shape) * 0.1))
def model(x, w):
return T.nnet.softmax(T.dot(x, w))
w = init_weights((32 * 32, 10))
p_y_given_x = model(x, w)
y = T.argmax(p_y_given_x, axis=1)
cost = T.mean(T.nnet.categorical_crossentropy(p_y_given_x, t))
g = T.grad(cost, w)
updates = [(w, w - g * 0.001)]
# compile theano functions
train = theano.function([x, t], cost, updates=updates)
predict = theano.function([x], y)
# train model
batch_size = 50
for i in range(100):
print "iteration %d" % (i + 1)
for start in range(0, len(x_train), batch_size):
x_batch = x_train[start:start + batch_size]
t_batch = t_train[start:start + batch_size]
cost = train(x_batch, t_batch)
predictions_test = predict(x_test)
accuracy = np.mean(predictions_test == labels_test)
print "accuracy: %.5f" % accuracy
print
|
mit
|
ateoto/django-recipebook
|
recipebook/admin.py
|
1
|
1161
|
from django.contrib import admin
from django.contrib.contenttypes.models import ContentType
from recipebook.models import (
Ingredient, Recipe, RecipeIngredient, IngredientLine
)
class IngredientLineInline(admin.TabularInline):
model = IngredientLine
def get_formset(self, request, obj=None, **kwargs):
self.parent_obj = obj
return super(IngredientLineInline, self).get_formset(request, obj, **kwargs)
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if self.parent_obj:
parent_ct = ContentType.objects.get_for_model(self.parent_obj.__class__)
recipe_ct = ContentType.objects.get_for_model(Recipe)
if parent_ct == recipe_ct:
if db_field.name == 'ingredient':
kwargs['queryset'] = RecipeIngredient.objects.all().exclude(content_type=recipe_ct, object_id=self.parent_obj.id)
return super(IngredientLineInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class RecipeAdmin(admin.ModelAdmin):
inlines = (IngredientLineInline,)
admin.site.register(Ingredient)
admin.site.register(Recipe, RecipeAdmin)
|
mit
|
xuleiboy1234/autoTitle
|
tensorflow/tensorflow/contrib/saved_model/python/saved_model/signature_def_utils.py
|
113
|
1664
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SignatureDef utility functions implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def get_signature_def_by_key(meta_graph_def, signature_def_key):
"""Utility function to get a SignatureDef protocol buffer by its key.
Args:
meta_graph_def: MetaGraphDef protocol buffer with the SignatureDefMap to
look up.
signature_def_key: Key of the SignatureDef protocol buffer to find in the
SignatureDefMap.
Returns:
A SignatureDef protocol buffer corresponding to the supplied key, if it
exists.
Raises:
ValueError: If no entry corresponding to the supplied key is found in the
SignatureDefMap of the MetaGraphDef.
"""
if signature_def_key not in meta_graph_def.signature_def:
raise ValueError("No SignatureDef with key '%s' found in MetaGraphDef." %
signature_def_key)
return meta_graph_def.signature_def[signature_def_key]
|
mit
|
henryfjordan/django
|
django/contrib/flatpages/models.py
|
318
|
1556
|
from __future__ import unicode_literals
from django.contrib.sites.models import Site
from django.core.urlresolvers import get_script_prefix
from django.db import models
from django.utils.encoding import iri_to_uri, python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
@python_2_unicode_compatible
class FlatPage(models.Model):
url = models.CharField(_('URL'), max_length=100, db_index=True)
title = models.CharField(_('title'), max_length=200)
content = models.TextField(_('content'), blank=True)
enable_comments = models.BooleanField(_('enable comments'), default=False)
template_name = models.CharField(_('template name'), max_length=70, blank=True,
help_text=_(
"Example: 'flatpages/contact_page.html'. If this isn't provided, "
"the system will use 'flatpages/default.html'."
),
)
registration_required = models.BooleanField(_('registration required'),
help_text=_("If this is checked, only logged-in users will be able to view the page."),
default=False)
sites = models.ManyToManyField(Site, verbose_name=_('sites'))
class Meta:
db_table = 'django_flatpage'
verbose_name = _('flat page')
verbose_name_plural = _('flat pages')
ordering = ('url',)
def __str__(self):
return "%s -- %s" % (self.url, self.title)
def get_absolute_url(self):
# Handle script prefix manually because we bypass reverse()
return iri_to_uri(get_script_prefix().rstrip('/') + self.url)
|
bsd-3-clause
|
sanchezfauste/bPortal
|
portal/module_definitions/calls.py
|
1
|
1413
|
#######################################################################
# bPortal is a SuiteCRM portal written using django project.
# Copyright (C) 2017-2018 BTACTIC, SCCL
# Copyright (C) 2017-2018 Marc Sanchez Fauste
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#######################################################################
from .module_definition import ModuleDefinition
from .link_type import LinkType
class Calls(ModuleDefinition):
@property
def name(self):
return 'Calls'
@property
def contacts_link_type(self):
return LinkType.RELATIONSHIP
@property
def contacts_link_name(self):
return 'calls'
@property
def accounts_link_type(self):
return LinkType.RELATIONSHIP
@property
def accounts_link_name(self):
return 'calls'
|
gpl-3.0
|
pvagner/orca
|
src/orca/scripts/apps/notify-osd/script.py
|
4
|
3108
|
# Orca
#
# Copyright 2009 Eitan Isaacson
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
""" Custom script for The notify-osd"""
__id__ = ""
__version__ = ""
__date__ = ""
__copyright__ = "Copyright (c) 2009 Eitan Isaacson"
__license__ = "LGPL"
import orca.messages as messages
import orca.scripts.default as default
import orca.settings as settings
import orca.speech as speech
import orca.notification_messages as notification_messages
########################################################################
# #
# The notify-osd script class. #
# #
########################################################################
class Script(default.Script):
def onValueChanged(self, event):
try:
ivalue = event.source.queryValue()
value = int(ivalue.currentValue)
except NotImplementedError:
value = -1
if value >= 0:
speech.speak(str(value), None, True)
self.displayBrailleMessage("%s" % value,
flashTime=settings.brailleFlashTime)
def onNameChanged(self, event):
"""Callback for object:property-change:accessible-name events."""
try:
ivalue = event.source.queryValue()
value = ivalue.currentValue
except NotImplementedError:
value = -1
utterances = []
message = ""
if value < 0:
utterances.append(messages.NOTIFICATION)
utterances.append(self.voices.get(settings.SYSTEM_VOICE))
message = '%s %s' % (event.source.name, event.source.description)
utterances.append(message)
utterances.append(self.voices.get(settings.DEFAULT_VOICE))
else:
# A gauge notification, e.g. the Ubuntu volume notification that
# appears when you press the multimedia keys.
#
message = '%s %d' % (event.source.name, value)
utterances.append(message)
utterances.append(self.voices.get(settings.SYSTEM_VOICE))
speech.speak(utterances, None, True)
self.displayBrailleMessage(message, flashTime=settings.brailleFlashTime)
notification_messages.saveMessage(message)
|
lgpl-2.1
|
Foxfanmedium/python_training
|
OnlineCoursera/mail_ru/Python_1/Week_3/playground/env/Lib/site-packages/pip/index.py
|
336
|
39950
|
"""Routines related to PyPI, indexes"""
from __future__ import absolute_import
import logging
import cgi
from collections import namedtuple
import itertools
import sys
import os
import re
import mimetypes
import posixpath
import warnings
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip.compat import ipaddress
from pip.utils import (
cached_property, splitext, normalize_path,
ARCHIVE_EXTENSIONS, SUPPORTED_EXTENSIONS,
)
from pip.utils.deprecation import RemovedInPip10Warning
from pip.utils.logging import indent_log
from pip.utils.packaging import check_requires_python
from pip.exceptions import (
DistributionNotFound, BestVersionAlreadyInstalled, InvalidWheelFilename,
UnsupportedWheel,
)
from pip.download import HAS_TLS, is_url, path_to_url, url_to_path
from pip.wheel import Wheel, wheel_ext
from pip.pep425tags import get_supported
from pip._vendor import html5lib, requests, six
from pip._vendor.packaging.version import parse as parse_version
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.packaging import specifiers
from pip._vendor.requests.exceptions import SSLError
from pip._vendor.distlib.compat import unescape
__all__ = ['FormatControl', 'fmt_ctl_handle_mutual_exclude', 'PackageFinder']
SECURE_ORIGINS = [
# protocol, hostname, port
# Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC)
("https", "*", "*"),
("*", "localhost", "*"),
("*", "127.0.0.0/8", "*"),
("*", "::1/128", "*"),
("file", "*", None),
# ssh is always secure.
("ssh", "*", "*"),
]
logger = logging.getLogger(__name__)
class InstallationCandidate(object):
def __init__(self, project, version, location):
self.project = project
self.version = parse_version(version)
self.location = location
self._key = (self.project, self.version, self.location)
def __repr__(self):
return "<InstallationCandidate({0!r}, {1!r}, {2!r})>".format(
self.project, self.version, self.location,
)
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, InstallationCandidate):
return NotImplemented
return method(self._key, other._key)
class PackageFinder(object):
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links.
"""
def __init__(self, find_links, index_urls, allow_all_prereleases=False,
trusted_hosts=None, process_dependency_links=False,
session=None, format_control=None, platform=None,
versions=None, abi=None, implementation=None):
"""Create a PackageFinder.
:param format_control: A FormatControl object or None. Used to control
the selection of source packages / binary packages when consulting
the index and links.
:param platform: A string or None. If None, searches for packages
that are supported by the current system. Otherwise, will find
packages that can be built on the platform passed in. These
packages will only be downloaded for distribution: they will
not be built locally.
:param versions: A list of strings or None. This is passed directly
to pep425tags.py in the get_supported() method.
:param abi: A string or None. This is passed directly
to pep425tags.py in the get_supported() method.
:param implementation: A string or None. This is passed directly
to pep425tags.py in the get_supported() method.
"""
if session is None:
raise TypeError(
"PackageFinder() missing 1 required keyword argument: "
"'session'"
)
# Build find_links. If an argument starts with ~, it may be
# a local file relative to a home directory. So try normalizing
# it and if it exists, use the normalized version.
# This is deliberately conservative - it might be fine just to
# blindly normalize anything starting with a ~...
self.find_links = []
for link in find_links:
if link.startswith('~'):
new_link = normalize_path(link)
if os.path.exists(new_link):
link = new_link
self.find_links.append(link)
self.index_urls = index_urls
self.dependency_links = []
# These are boring links that have already been logged somehow:
self.logged_links = set()
self.format_control = format_control or FormatControl(set(), set())
# Domains that we won't emit warnings for when not using HTTPS
self.secure_origins = [
("*", host, "*")
for host in (trusted_hosts if trusted_hosts else [])
]
# Do we want to allow _all_ pre-releases?
self.allow_all_prereleases = allow_all_prereleases
# Do we process dependency links?
self.process_dependency_links = process_dependency_links
# The Session we'll use to make requests
self.session = session
# The valid tags to check potential found wheel candidates against
self.valid_tags = get_supported(
versions=versions,
platform=platform,
abi=abi,
impl=implementation,
)
# If we don't have TLS enabled, then WARN if anyplace we're looking
# relies on TLS.
if not HAS_TLS:
for link in itertools.chain(self.index_urls, self.find_links):
parsed = urllib_parse.urlparse(link)
if parsed.scheme == "https":
logger.warning(
"pip is configured with locations that require "
"TLS/SSL, however the ssl module in Python is not "
"available."
)
break
def add_dependency_links(self, links):
# # FIXME: this shouldn't be global list this, it should only
# # apply to requirements of the package that specifies the
# # dependency_links value
# # FIXME: also, we should track comes_from (i.e., use Link)
if self.process_dependency_links:
warnings.warn(
"Dependency Links processing has been deprecated and will be "
"removed in a future release.",
RemovedInPip10Warning,
)
self.dependency_links.extend(links)
@staticmethod
def _sort_locations(locations, expand_dir=False):
"""
Sort locations into "files" (archives) and "urls", and return
a pair of lists (files,urls)
"""
files = []
urls = []
# puts the url for the given file path into the appropriate list
def sort_path(path):
url = path_to_url(path)
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith('file:')
if is_local_path or is_file_url:
if is_local_path:
path = url
else:
path = url_to_path(url)
if os.path.isdir(path):
if expand_dir:
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url:
urls.append(url)
elif os.path.isfile(path):
sort_path(path)
else:
logger.warning(
"Url '%s' is ignored: it is neither a file "
"nor a directory.", url)
elif is_url(url):
# Only add url with clear scheme
urls.append(url)
else:
logger.warning(
"Url '%s' is ignored. It is either a non-existing "
"path or lacks a specific scheme.", url)
return files, urls
def _candidate_sort_key(self, candidate):
"""
Function used to generate link sort key for link tuples.
The greater the return value, the more preferred it is.
If not finding wheels, then sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min(self.valid_tags)
3. source archives
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
support_num = len(self.valid_tags)
if candidate.location.is_wheel:
# can raise InvalidWheelFilename
wheel = Wheel(candidate.location.filename)
if not wheel.supported(self.valid_tags):
raise UnsupportedWheel(
"%s is not a supported wheel for this platform. It "
"can't be sorted." % wheel.filename
)
pri = -(wheel.support_index_min(self.valid_tags))
else: # sdist
pri = -(support_num)
return (candidate.version, pri)
def _validate_secure_origin(self, logger, location):
# Determine if this url used a secure transport mechanism
parsed = urllib_parse.urlparse(str(location))
origin = (parsed.scheme, parsed.hostname, parsed.port)
# The protocol to use to see if the protocol matches.
# Don't count the repository type as part of the protocol: in
# cases such as "git+ssh", only use "ssh". (I.e., Only verify against
# the last scheme.)
protocol = origin[0].rsplit('+', 1)[-1]
# Determine if our origin is a secure origin by looking through our
# hardcoded list of secure origins, as well as any additional ones
# configured on this PackageFinder instance.
for secure_origin in (SECURE_ORIGINS + self.secure_origins):
if protocol != secure_origin[0] and secure_origin[0] != "*":
continue
try:
# We need to do this decode dance to ensure that we have a
# unicode object, even on Python 2.x.
addr = ipaddress.ip_address(
origin[1]
if (
isinstance(origin[1], six.text_type) or
origin[1] is None
)
else origin[1].decode("utf8")
)
network = ipaddress.ip_network(
secure_origin[1]
if isinstance(secure_origin[1], six.text_type)
else secure_origin[1].decode("utf8")
)
except ValueError:
# We don't have both a valid address or a valid network, so
# we'll check this origin against hostnames.
if (origin[1] and
origin[1].lower() != secure_origin[1].lower() and
secure_origin[1] != "*"):
continue
else:
# We have a valid address and network, so see if the address
# is contained within the network.
if addr not in network:
continue
# Check to see if the port patches
if (origin[2] != secure_origin[2] and
secure_origin[2] != "*" and
secure_origin[2] is not None):
continue
# If we've gotten here, then this origin matches the current
# secure origin and we should return True
return True
# If we've gotten to this point, then the origin isn't secure and we
# will not accept it as a valid location to search. We will however
# log a warning that we are ignoring it.
logger.warning(
"The repository located at %s is not a trusted or secure host and "
"is being ignored. If this repository is available via HTTPS it "
"is recommended to use HTTPS instead, otherwise you may silence "
"this warning and allow it anyways with '--trusted-host %s'.",
parsed.hostname,
parsed.hostname,
)
return False
def _get_index_urls_locations(self, project_name):
"""Returns the locations found via self.index_urls
Checks the url_name on the main (first in the list) index and
use this url_name to produce all locations
"""
def mkurl_pypi_url(url):
loc = posixpath.join(
url,
urllib_parse.quote(canonicalize_name(project_name)))
# For maximum compatibility with easy_install, ensure the path
# ends in a trailing slash. Although this isn't in the spec
# (and PyPI can handle it without the slash) some other index
# implementations might break if they relied on easy_install's
# behavior.
if not loc.endswith('/'):
loc = loc + '/'
return loc
return [mkurl_pypi_url(url) for url in self.index_urls]
def find_all_candidates(self, project_name):
"""Find all available InstallationCandidate for project_name
This checks index_urls, find_links and dependency_links.
All versions found are returned as an InstallationCandidate list.
See _link_package_versions for details on which files are accepted
"""
index_locations = self._get_index_urls_locations(project_name)
index_file_loc, index_url_loc = self._sort_locations(index_locations)
fl_file_loc, fl_url_loc = self._sort_locations(
self.find_links, expand_dir=True)
dep_file_loc, dep_url_loc = self._sort_locations(self.dependency_links)
file_locations = (
Link(url) for url in itertools.chain(
index_file_loc, fl_file_loc, dep_file_loc)
)
# We trust every url that the user has given us whether it was given
# via --index-url or --find-links
# We explicitly do not trust links that came from dependency_links
# We want to filter out any thing which does not have a secure origin.
url_locations = [
link for link in itertools.chain(
(Link(url) for url in index_url_loc),
(Link(url) for url in fl_url_loc),
(Link(url) for url in dep_url_loc),
)
if self._validate_secure_origin(logger, link)
]
logger.debug('%d location(s) to search for versions of %s:',
len(url_locations), project_name)
for location in url_locations:
logger.debug('* %s', location)
canonical_name = canonicalize_name(project_name)
formats = fmt_ctl_formats(self.format_control, canonical_name)
search = Search(project_name, canonical_name, formats)
find_links_versions = self._package_versions(
# We trust every directly linked archive in find_links
(Link(url, '-f') for url in self.find_links),
search
)
page_versions = []
for page in self._get_pages(url_locations, project_name):
logger.debug('Analyzing links from page %s', page.url)
with indent_log():
page_versions.extend(
self._package_versions(page.links, search)
)
dependency_versions = self._package_versions(
(Link(url) for url in self.dependency_links), search
)
if dependency_versions:
logger.debug(
'dependency_links found: %s',
', '.join([
version.location.url for version in dependency_versions
])
)
file_versions = self._package_versions(file_locations, search)
if file_versions:
file_versions.sort(reverse=True)
logger.debug(
'Local files found: %s',
', '.join([
url_to_path(candidate.location.url)
for candidate in file_versions
])
)
# This is an intentional priority ordering
return (
file_versions + find_links_versions + page_versions +
dependency_versions
)
def find_requirement(self, req, upgrade):
"""Try to find a Link matching req
Expects req, an InstallRequirement and upgrade, a boolean
Returns a Link if found,
Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise
"""
all_candidates = self.find_all_candidates(req.name)
# Filter out anything which doesn't match our specifier
compatible_versions = set(
req.specifier.filter(
# We turn the version object into a str here because otherwise
# when we're debundled but setuptools isn't, Python will see
# packaging.version.Version and
# pkg_resources._vendor.packaging.version.Version as different
# types. This way we'll use a str as a common data interchange
# format. If we stop using the pkg_resources provided specifier
# and start using our own, we can drop the cast to str().
[str(c.version) for c in all_candidates],
prereleases=(
self.allow_all_prereleases
if self.allow_all_prereleases else None
),
)
)
applicable_candidates = [
# Again, converting to str to deal with debundling.
c for c in all_candidates if str(c.version) in compatible_versions
]
if applicable_candidates:
best_candidate = max(applicable_candidates,
key=self._candidate_sort_key)
else:
best_candidate = None
if req.satisfied_by is not None:
installed_version = parse_version(req.satisfied_by.version)
else:
installed_version = None
if installed_version is None and best_candidate is None:
logger.critical(
'Could not find a version that satisfies the requirement %s '
'(from versions: %s)',
req,
', '.join(
sorted(
set(str(c.version) for c in all_candidates),
key=parse_version,
)
)
)
raise DistributionNotFound(
'No matching distribution found for %s' % req
)
best_installed = False
if installed_version and (
best_candidate is None or
best_candidate.version <= installed_version):
best_installed = True
if not upgrade and installed_version is not None:
if best_installed:
logger.debug(
'Existing installed version (%s) is most up-to-date and '
'satisfies requirement',
installed_version,
)
else:
logger.debug(
'Existing installed version (%s) satisfies requirement '
'(most up-to-date version is %s)',
installed_version,
best_candidate.version,
)
return None
if best_installed:
# We have an existing version, and its the best version
logger.debug(
'Installed version (%s) is most up-to-date (past versions: '
'%s)',
installed_version,
', '.join(sorted(compatible_versions, key=parse_version)) or
"none",
)
raise BestVersionAlreadyInstalled
logger.debug(
'Using version %s (newest of versions: %s)',
best_candidate.version,
', '.join(sorted(compatible_versions, key=parse_version))
)
return best_candidate.location
def _get_pages(self, locations, project_name):
"""
Yields (page, page_url) from the given locations, skipping
locations that have errors.
"""
seen = set()
for location in locations:
if location in seen:
continue
seen.add(location)
page = self._get_page(location)
if page is None:
continue
yield page
_py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
def _sort_links(self, links):
"""
Returns elements of links in order, non-egg links first, egg links
second, while eliminating duplicates
"""
eggs, no_eggs = [], []
seen = set()
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _package_versions(self, links, search):
result = []
for link in self._sort_links(links):
v = self._link_package_versions(link, search)
if v is not None:
result.append(v)
return result
def _log_skipped_link(self, link, reason):
if link not in self.logged_links:
logger.debug('Skipping link %s; %s', link, reason)
self.logged_links.add(link)
def _link_package_versions(self, link, search):
"""Return an InstallationCandidate or None"""
version = None
if link.egg_fragment:
egg_info = link.egg_fragment
ext = link.ext
else:
egg_info, ext = link.splitext()
if not ext:
self._log_skipped_link(link, 'not a file')
return
if ext not in SUPPORTED_EXTENSIONS:
self._log_skipped_link(
link, 'unsupported archive format: %s' % ext)
return
if "binary" not in search.formats and ext == wheel_ext:
self._log_skipped_link(
link, 'No binaries permitted for %s' % search.supplied)
return
if "macosx10" in link.path and ext == '.zip':
self._log_skipped_link(link, 'macosx10 one')
return
if ext == wheel_ext:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
self._log_skipped_link(link, 'invalid wheel filename')
return
if canonicalize_name(wheel.name) != search.canonical:
self._log_skipped_link(
link, 'wrong project name (not %s)' % search.supplied)
return
if not wheel.supported(self.valid_tags):
self._log_skipped_link(
link, 'it is not compatible with this Python')
return
version = wheel.version
# This should be up by the search.ok_binary check, but see issue 2700.
if "source" not in search.formats and ext != wheel_ext:
self._log_skipped_link(
link, 'No sources permitted for %s' % search.supplied)
return
if not version:
version = egg_info_matches(egg_info, search.supplied, link)
if version is None:
self._log_skipped_link(
link, 'wrong project name (not %s)' % search.supplied)
return
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != sys.version[:3]:
self._log_skipped_link(
link, 'Python version is incorrect')
return
try:
support_this_python = check_requires_python(link.requires_python)
except specifiers.InvalidSpecifier:
logger.debug("Package %s has an invalid Requires-Python entry: %s",
link.filename, link.requires_python)
support_this_python = True
if not support_this_python:
logger.debug("The package %s is incompatible with the python"
"version in use. Acceptable python versions are:%s",
link, link.requires_python)
return
logger.debug('Found link %s, version: %s', link, version)
return InstallationCandidate(search.supplied, version, link)
def _get_page(self, link):
return HTMLPage.get_page(link, session=self.session)
def egg_info_matches(
egg_info, search_name, link,
_egg_info_re=re.compile(r'([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.I)):
"""Pull the version part out of a string.
:param egg_info: The string to parse. E.g. foo-2.1
:param search_name: The name of the package this belongs to. None to
infer the name. Note that this cannot unambiguously parse strings
like foo-2-2 which might be foo, 2-2 or foo-2, 2.
:param link: The link the string came from, for logging on failure.
"""
match = _egg_info_re.search(egg_info)
if not match:
logger.debug('Could not parse version from link: %s', link)
return None
if search_name is None:
full_match = match.group(0)
return full_match[full_match.index('-'):]
name = match.group(0).lower()
# To match the "safe" name that pkg_resources creates:
name = name.replace('_', '-')
# project name and version must be separated by a dash
look_for = search_name.lower() + "-"
if name.startswith(look_for):
return match.group(0)[len(look_for):]
else:
return None
class HTMLPage(object):
"""Represents one page, along with its URL"""
def __init__(self, content, url, headers=None):
# Determine if we have any encoding information in our headers
encoding = None
if headers and "Content-Type" in headers:
content_type, params = cgi.parse_header(headers["Content-Type"])
if "charset" in params:
encoding = params['charset']
self.content = content
self.parsed = html5lib.parse(
self.content,
transport_encoding=encoding,
namespaceHTMLElements=False,
)
self.url = url
self.headers = headers
def __str__(self):
return self.url
@classmethod
def get_page(cls, link, skip_archives=True, session=None):
if session is None:
raise TypeError(
"get_page() missing 1 required keyword argument: 'session'"
)
url = link.url
url = url.split('#', 1)[0]
# Check for VCS schemes that do not support lookup as web pages.
from pip.vcs import VcsSupport
for scheme in VcsSupport.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
logger.debug('Cannot look at %s URL %s', scheme, link)
return None
try:
if skip_archives:
filename = link.filename
for bad_ext in ARCHIVE_EXTENSIONS:
if filename.endswith(bad_ext):
content_type = cls._get_content_type(
url, session=session,
)
if content_type.lower().startswith('text/html'):
break
else:
logger.debug(
'Skipping page %s because of Content-Type: %s',
link,
content_type,
)
return
logger.debug('Getting page %s', url)
# Tack index.html onto file:// URLs that point to directories
(scheme, netloc, path, params, query, fragment) = \
urllib_parse.urlparse(url)
if (scheme == 'file' and
os.path.isdir(urllib_request.url2pathname(path))):
# add trailing slash if not present so urljoin doesn't trim
# final segment
if not url.endswith('/'):
url += '/'
url = urllib_parse.urljoin(url, 'index.html')
logger.debug(' file: URL is directory, getting %s', url)
resp = session.get(
url,
headers={
"Accept": "text/html",
"Cache-Control": "max-age=600",
},
)
resp.raise_for_status()
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement of an url. Unless we issue a HEAD request on every
# url we cannot know ahead of time for sure if something is HTML
# or not. However we can check after we've downloaded it.
content_type = resp.headers.get('Content-Type', 'unknown')
if not content_type.lower().startswith("text/html"):
logger.debug(
'Skipping page %s because of Content-Type: %s',
link,
content_type,
)
return
inst = cls(resp.content, resp.url, resp.headers)
except requests.HTTPError as exc:
cls._handle_fail(link, exc, url)
except SSLError as exc:
reason = ("There was a problem confirming the ssl certificate: "
"%s" % exc)
cls._handle_fail(link, reason, url, meth=logger.info)
except requests.ConnectionError as exc:
cls._handle_fail(link, "connection error: %s" % exc, url)
except requests.Timeout:
cls._handle_fail(link, "timed out", url)
else:
return inst
@staticmethod
def _handle_fail(link, reason, url, meth=None):
if meth is None:
meth = logger.debug
meth("Could not fetch URL %s: %s - skipping", link, reason)
@staticmethod
def _get_content_type(url, session):
"""Get the Content-Type of the given url, using a HEAD request"""
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url)
if scheme not in ('http', 'https'):
# FIXME: some warning or something?
# assertion error?
return ''
resp = session.head(url, allow_redirects=True)
resp.raise_for_status()
return resp.headers.get("Content-Type", "")
@cached_property
def base_url(self):
bases = [
x for x in self.parsed.findall(".//base")
if x.get("href") is not None
]
if bases and bases[0].get("href"):
return bases[0].get("href")
else:
return self.url
@property
def links(self):
"""Yields all links in the page"""
for anchor in self.parsed.findall(".//a"):
if anchor.get("href"):
href = anchor.get("href")
url = self.clean_link(
urllib_parse.urljoin(self.base_url, href)
)
pyrequire = anchor.get('data-requires-python')
pyrequire = unescape(pyrequire) if pyrequire else None
yield Link(url, self, requires_python=pyrequire)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
def clean_link(self, url):
"""Makes sure a link is fully encoded. That is, if a ' ' shows up in
the link, it will be rewritten to %20 (while not over-quoting
% or other characters)."""
return self._clean_re.sub(
lambda match: '%%%2x' % ord(match.group(0)), url)
class Link(object):
def __init__(self, url, comes_from=None, requires_python=None):
"""
Object representing a parsed link from https://pypi.python.org/simple/*
url:
url of the resource pointed to (href of the link)
comes_from:
instance of HTMLPage where the link was found, or string.
requires_python:
String containing the `Requires-Python` metadata field, specified
in PEP 345. This may be specified by a data-requires-python
attribute in the HTML link tag, as described in PEP 503.
"""
# url can be a UNC windows share
if url.startswith('\\\\'):
url = path_to_url(url)
self.url = url
self.comes_from = comes_from
self.requires_python = requires_python if requires_python else None
def __str__(self):
if self.requires_python:
rp = ' (requires-python:%s)' % self.requires_python
else:
rp = ''
if self.comes_from:
return '%s (from %s)%s' % (self.url, self.comes_from, rp)
else:
return str(self.url)
def __repr__(self):
return '<Link %s>' % self
def __eq__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url == other.url
def __ne__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url != other.url
def __lt__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url < other.url
def __le__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url <= other.url
def __gt__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url > other.url
def __ge__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url >= other.url
def __hash__(self):
return hash(self.url)
@property
def filename(self):
_, netloc, path, _, _ = urllib_parse.urlsplit(self.url)
name = posixpath.basename(path.rstrip('/')) or netloc
name = urllib_parse.unquote(name)
assert name, ('URL %r produced no filename' % self.url)
return name
@property
def scheme(self):
return urllib_parse.urlsplit(self.url)[0]
@property
def netloc(self):
return urllib_parse.urlsplit(self.url)[1]
@property
def path(self):
return urllib_parse.unquote(urllib_parse.urlsplit(self.url)[2])
def splitext(self):
return splitext(posixpath.basename(self.path.rstrip('/')))
@property
def ext(self):
return self.splitext()[1]
@property
def url_without_fragment(self):
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(self.url)
return urllib_parse.urlunsplit((scheme, netloc, path, query, None))
_egg_fragment_re = re.compile(r'[#&]egg=([^&]*)')
@property
def egg_fragment(self):
match = self._egg_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_subdirectory_fragment_re = re.compile(r'[#&]subdirectory=([^&]*)')
@property
def subdirectory_fragment(self):
match = self._subdirectory_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_hash_re = re.compile(
r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)'
)
@property
def hash(self):
match = self._hash_re.search(self.url)
if match:
return match.group(2)
return None
@property
def hash_name(self):
match = self._hash_re.search(self.url)
if match:
return match.group(1)
return None
@property
def show_url(self):
return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0])
@property
def is_wheel(self):
return self.ext == wheel_ext
@property
def is_artifact(self):
"""
Determines if this points to an actual artifact (e.g. a tarball) or if
it points to an "abstract" thing like a path or a VCS location.
"""
from pip.vcs import vcs
if self.scheme in vcs.all_schemes:
return False
return True
FormatControl = namedtuple('FormatControl', 'no_binary only_binary')
"""This object has two fields, no_binary and only_binary.
If a field is falsy, it isn't set. If it is {':all:'}, it should match all
packages except those listed in the other field. Only one field can be set
to {':all:'} at a time. The rest of the time exact package name matches
are listed, with any given package only showing up in one field at a time.
"""
def fmt_ctl_handle_mutual_exclude(value, target, other):
new = value.split(',')
while ':all:' in new:
other.clear()
target.clear()
target.add(':all:')
del new[:new.index(':all:') + 1]
if ':none:' not in new:
# Without a none, we want to discard everything as :all: covers it
return
for name in new:
if name == ':none:':
target.clear()
continue
name = canonicalize_name(name)
other.discard(name)
target.add(name)
def fmt_ctl_formats(fmt_ctl, canonical_name):
result = set(["binary", "source"])
if canonical_name in fmt_ctl.only_binary:
result.discard('source')
elif canonical_name in fmt_ctl.no_binary:
result.discard('binary')
elif ':all:' in fmt_ctl.only_binary:
result.discard('source')
elif ':all:' in fmt_ctl.no_binary:
result.discard('binary')
return frozenset(result)
def fmt_ctl_no_binary(fmt_ctl):
fmt_ctl_handle_mutual_exclude(
':all:', fmt_ctl.no_binary, fmt_ctl.only_binary)
def fmt_ctl_no_use_wheel(fmt_ctl):
fmt_ctl_no_binary(fmt_ctl)
warnings.warn(
'--no-use-wheel is deprecated and will be removed in the future. '
' Please use --no-binary :all: instead.', RemovedInPip10Warning,
stacklevel=2)
Search = namedtuple('Search', 'supplied canonical formats')
"""Capture key aspects of a search.
:attribute supplied: The user supplied package.
:attribute canonical: The canonical package name.
:attribute formats: The formats allowed for this package. Should be a set
with 'binary' or 'source' or both in it.
"""
|
apache-2.0
|
joebowen/LogMyRocket_API
|
LogMyRocket/libraries/sys_packages/requests/requests/packages/chardet/hebrewprober.py
|
2929
|
13359
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Shy Shalom
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe, eDetecting
from .compat import wrap_ord
# This prober doesn't actually recognize a language or a charset.
# It is a helper prober for the use of the Hebrew model probers
### General ideas of the Hebrew charset recognition ###
#
# Four main charsets exist in Hebrew:
# "ISO-8859-8" - Visual Hebrew
# "windows-1255" - Logical Hebrew
# "ISO-8859-8-I" - Logical Hebrew
# "x-mac-hebrew" - ?? Logical Hebrew ??
#
# Both "ISO" charsets use a completely identical set of code points, whereas
# "windows-1255" and "x-mac-hebrew" are two different proper supersets of
# these code points. windows-1255 defines additional characters in the range
# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific
# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6.
# x-mac-hebrew defines similar additional code points but with a different
# mapping.
#
# As far as an average Hebrew text with no diacritics is concerned, all four
# charsets are identical with respect to code points. Meaning that for the
# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters
# (including final letters).
#
# The dominant difference between these charsets is their directionality.
# "Visual" directionality means that the text is ordered as if the renderer is
# not aware of a BIDI rendering algorithm. The renderer sees the text and
# draws it from left to right. The text itself when ordered naturally is read
# backwards. A buffer of Visual Hebrew generally looks like so:
# "[last word of first line spelled backwards] [whole line ordered backwards
# and spelled backwards] [first word of first line spelled backwards]
# [end of line] [last word of second line] ... etc' "
# adding punctuation marks, numbers and English text to visual text is
# naturally also "visual" and from left to right.
#
# "Logical" directionality means the text is ordered "naturally" according to
# the order it is read. It is the responsibility of the renderer to display
# the text from right to left. A BIDI algorithm is used to place general
# punctuation marks, numbers and English text in the text.
#
# Texts in x-mac-hebrew are almost impossible to find on the Internet. From
# what little evidence I could find, it seems that its general directionality
# is Logical.
#
# To sum up all of the above, the Hebrew probing mechanism knows about two
# charsets:
# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are
# backwards while line order is natural. For charset recognition purposes
# the line order is unimportant (In fact, for this implementation, even
# word order is unimportant).
# Logical Hebrew - "windows-1255" - normal, naturally ordered text.
#
# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be
# specifically identified.
# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew
# that contain special punctuation marks or diacritics is displayed with
# some unconverted characters showing as question marks. This problem might
# be corrected using another model prober for x-mac-hebrew. Due to the fact
# that x-mac-hebrew texts are so rare, writing another model prober isn't
# worth the effort and performance hit.
#
#### The Prober ####
#
# The prober is divided between two SBCharSetProbers and a HebrewProber,
# all of which are managed, created, fed data, inquired and deleted by the
# SBCSGroupProber. The two SBCharSetProbers identify that the text is in
# fact some kind of Hebrew, Logical or Visual. The final decision about which
# one is it is made by the HebrewProber by combining final-letter scores
# with the scores of the two SBCharSetProbers to produce a final answer.
#
# The SBCSGroupProber is responsible for stripping the original text of HTML
# tags, English characters, numbers, low-ASCII punctuation characters, spaces
# and new lines. It reduces any sequence of such characters to a single space.
# The buffer fed to each prober in the SBCS group prober is pure text in
# high-ASCII.
# The two SBCharSetProbers (model probers) share the same language model:
# Win1255Model.
# The first SBCharSetProber uses the model normally as any other
# SBCharSetProber does, to recognize windows-1255, upon which this model was
# built. The second SBCharSetProber is told to make the pair-of-letter
# lookup in the language model backwards. This in practice exactly simulates
# a visual Hebrew model using the windows-1255 logical Hebrew model.
#
# The HebrewProber is not using any language model. All it does is look for
# final-letter evidence suggesting the text is either logical Hebrew or visual
# Hebrew. Disjointed from the model probers, the results of the HebrewProber
# alone are meaningless. HebrewProber always returns 0.00 as confidence
# since it never identifies a charset by itself. Instead, the pointer to the
# HebrewProber is passed to the model probers as a helper "Name Prober".
# When the Group prober receives a positive identification from any prober,
# it asks for the name of the charset identified. If the prober queried is a
# Hebrew model prober, the model prober forwards the call to the
# HebrewProber to make the final decision. In the HebrewProber, the
# decision is made according to the final-letters scores maintained and Both
# model probers scores. The answer is returned in the form of the name of the
# charset identified, either "windows-1255" or "ISO-8859-8".
# windows-1255 / ISO-8859-8 code points of interest
FINAL_KAF = 0xea
NORMAL_KAF = 0xeb
FINAL_MEM = 0xed
NORMAL_MEM = 0xee
FINAL_NUN = 0xef
NORMAL_NUN = 0xf0
FINAL_PE = 0xf3
NORMAL_PE = 0xf4
FINAL_TSADI = 0xf5
NORMAL_TSADI = 0xf6
# Minimum Visual vs Logical final letter score difference.
# If the difference is below this, don't rely solely on the final letter score
# distance.
MIN_FINAL_CHAR_DISTANCE = 5
# Minimum Visual vs Logical model score difference.
# If the difference is below this, don't rely at all on the model score
# distance.
MIN_MODEL_DISTANCE = 0.01
VISUAL_HEBREW_NAME = "ISO-8859-8"
LOGICAL_HEBREW_NAME = "windows-1255"
class HebrewProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mLogicalProber = None
self._mVisualProber = None
self.reset()
def reset(self):
self._mFinalCharLogicalScore = 0
self._mFinalCharVisualScore = 0
# The two last characters seen in the previous buffer,
# mPrev and mBeforePrev are initialized to space in order to simulate
# a word delimiter at the beginning of the data
self._mPrev = ' '
self._mBeforePrev = ' '
# These probers are owned by the group prober.
def set_model_probers(self, logicalProber, visualProber):
self._mLogicalProber = logicalProber
self._mVisualProber = visualProber
def is_final(self, c):
return wrap_ord(c) in [FINAL_KAF, FINAL_MEM, FINAL_NUN, FINAL_PE,
FINAL_TSADI]
def is_non_final(self, c):
# The normal Tsadi is not a good Non-Final letter due to words like
# 'lechotet' (to chat) containing an apostrophe after the tsadi. This
# apostrophe is converted to a space in FilterWithoutEnglishLetters
# causing the Non-Final tsadi to appear at an end of a word even
# though this is not the case in the original text.
# The letters Pe and Kaf rarely display a related behavior of not being
# a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak'
# for example legally end with a Non-Final Pe or Kaf. However, the
# benefit of these letters as Non-Final letters outweighs the damage
# since these words are quite rare.
return wrap_ord(c) in [NORMAL_KAF, NORMAL_MEM, NORMAL_NUN, NORMAL_PE]
def feed(self, aBuf):
# Final letter analysis for logical-visual decision.
# Look for evidence that the received buffer is either logical Hebrew
# or visual Hebrew.
# The following cases are checked:
# 1) A word longer than 1 letter, ending with a final letter. This is
# an indication that the text is laid out "naturally" since the
# final letter really appears at the end. +1 for logical score.
# 2) A word longer than 1 letter, ending with a Non-Final letter. In
# normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi,
# should not end with the Non-Final form of that letter. Exceptions
# to this rule are mentioned above in isNonFinal(). This is an
# indication that the text is laid out backwards. +1 for visual
# score
# 3) A word longer than 1 letter, starting with a final letter. Final
# letters should not appear at the beginning of a word. This is an
# indication that the text is laid out backwards. +1 for visual
# score.
#
# The visual score and logical score are accumulated throughout the
# text and are finally checked against each other in GetCharSetName().
# No checking for final letters in the middle of words is done since
# that case is not an indication for either Logical or Visual text.
#
# We automatically filter out all 7-bit characters (replace them with
# spaces) so the word boundary detection works properly. [MAP]
if self.get_state() == eNotMe:
# Both model probers say it's not them. No reason to continue.
return eNotMe
aBuf = self.filter_high_bit_only(aBuf)
for cur in aBuf:
if cur == ' ':
# We stand on a space - a word just ended
if self._mBeforePrev != ' ':
# next-to-last char was not a space so self._mPrev is not a
# 1 letter word
if self.is_final(self._mPrev):
# case (1) [-2:not space][-1:final letter][cur:space]
self._mFinalCharLogicalScore += 1
elif self.is_non_final(self._mPrev):
# case (2) [-2:not space][-1:Non-Final letter][
# cur:space]
self._mFinalCharVisualScore += 1
else:
# Not standing on a space
if ((self._mBeforePrev == ' ') and
(self.is_final(self._mPrev)) and (cur != ' ')):
# case (3) [-2:space][-1:final letter][cur:not space]
self._mFinalCharVisualScore += 1
self._mBeforePrev = self._mPrev
self._mPrev = cur
# Forever detecting, till the end or until both model probers return
# eNotMe (handled above)
return eDetecting
def get_charset_name(self):
# Make the decision: is it Logical or Visual?
# If the final letter score distance is dominant enough, rely on it.
finalsub = self._mFinalCharLogicalScore - self._mFinalCharVisualScore
if finalsub >= MIN_FINAL_CHAR_DISTANCE:
return LOGICAL_HEBREW_NAME
if finalsub <= -MIN_FINAL_CHAR_DISTANCE:
return VISUAL_HEBREW_NAME
# It's not dominant enough, try to rely on the model scores instead.
modelsub = (self._mLogicalProber.get_confidence()
- self._mVisualProber.get_confidence())
if modelsub > MIN_MODEL_DISTANCE:
return LOGICAL_HEBREW_NAME
if modelsub < -MIN_MODEL_DISTANCE:
return VISUAL_HEBREW_NAME
# Still no good, back to final letter distance, maybe it'll save the
# day.
if finalsub < 0.0:
return VISUAL_HEBREW_NAME
# (finalsub > 0 - Logical) or (don't know what to do) default to
# Logical.
return LOGICAL_HEBREW_NAME
def get_state(self):
# Remain active as long as any of the model probers are active.
if (self._mLogicalProber.get_state() == eNotMe) and \
(self._mVisualProber.get_state() == eNotMe):
return eNotMe
return eDetecting
|
gpl-3.0
|
hungtt57/matchmaker
|
lib/python2.7/site-packages/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py
|
2360
|
3778
|
"""The match_hostname() function from Python 3.3.3, essential when using SSL."""
# Note: This file is under the PSF license as the code comes from the python
# stdlib. http://docs.python.org/3/license.html
import re
__version__ = '3.4.0.2'
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
|
mit
|
rpatel3001/RU-Interested
|
serve.py
|
1
|
6781
|
from flask import *
from flask_wtf import FlaskForm
from flask_bootstrap import Bootstrap
from wtforms import validators, TextField, IntegerField, SubmitField, SelectField, SelectMultipleField
from datetime import datetime
import psycopg2
import os
import urllib.parse as urlparse
import subprocess
import time
from math import floor, ceil
daysOfWeek = ["M","M","T","W","TH","F","S"]
#init postgresql table
if os.environ.get('HEROKU'):
urlparse.uses_netloc.append("postgres")
url = urlparse.urlparse(os.environ["DATABASE_URL"])
conn = psycopg2.connect(
database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port
)
else:
conn = psycopg2.connect("dbname=database user=postgres")
app = Flask(__name__)
app.secret_key = "dsvasdvavasverbijbiujrenv0982ygf7328ibh"
Bootstrap(app)
cur = conn.cursor()
cur.execute("SELECT * FROM rooms")
temp = cur.fetchall()
rooms = [dict(zip(("building","campus", "buildingcode", "roomnum", "capacity"),t)) for t in temp]
buildings = set([(x["building"], x["buildingcode"]) for x in rooms])
buildings = [{"name":x[0], "code":x[1]} for x in buildings]
cur.execute("SELECT * FROM departments")
temp = cur.fetchall()
departments = [dict(zip(("name","code"),t)) for t in temp]
def get_classes(campus, day, start, end, reqb=[''], reqs=['']):
c = conn.cursor()
c.execute("SELECT * FROM courses WHERE starttime BETWEEN %s AND %s AND campus = %s AND day = %s ", (start, end, campus, day))
temp = c.fetchall()
classes = [dict(zip(("title","room","department","day","startTime","endTime", "building","deptcode","coursecode","campus"),r)) for r in temp]
classes = [x for x in classes if (reqs == [''] or x["deptcode"] in reqs) and (reqb == [''] or x["building"] in reqb)]
for c in classes:
if c["startTime"] > 1300:
c["startTime"] = str(c["startTime"]-1200)
c["startTime"] = c["startTime"][:-2] + ":" + c["startTime"][-2:] + " PM"
else:
c["startTime"] = str(c["startTime"])
c["startTime"] = c["startTime"][:-2] + ":" + c["startTime"][-2:] + " AM"
if c["endTime"] > 1300:
c["endTime"] = str(c["endTime"]-1200)
c["endTime"] = c["endTime"][:-2] + ":" + c["endTime"][-2:] + " PM"
else:
c["endTime"] = str(c["endTime"])
c["endTime"] = c["endTime"][:-2] + ":" + c["endTime"][-2:] + " AM"
return classes
def get_buildings():
return buildings
def get_departments():
return departments
@app.route('/api')
def info():
return render_template("api_info.html")
@app.route('/api/buildings')
def send_buildings():
return jsonify(get_buildings())
@app.route('/api/departments')
def send_departments():
return jsonify(get_departments())
@app.route('/api/classes/<string:campus>/<string:day>/<int:start>/<int:end>', methods=['GET'])
def send_classes(campus, day, start, end):
return jsonify(get_classes(campus, day, start, end, request.args.get('buildings', default="").split(','), request.args.get('departments', default="").split(',')))
@app.route('/', methods=['GET', 'POST'])
def submit():
form = SpecifierForm()
if not form.building.data:
form.building.data = ['']
if not form.department.data:
form.department.data = ['']
classes = get_classes(form.campus.data, form.day.data, form.startTime.data, form.endTime.data)
form.department.choices = [(x["code"], x["name"]) for x in departments if x["code"] in [y["deptcode"] for y in classes]]
classes = [x for x in classes if form.department.data == [''] or x["deptcode"] in form.department.data]
form.building.choices = [(b["code"], b["name"]) for b in buildings if b["code"] in [x["building"] for x in classes]]
classes = [x for x in classes if form.building.data == [''] or x["building"] in form.building.data]
return render_template("main.html", form=form, results=classes)
class SpecifierForm(FlaskForm):
campus = SelectField('campus', choices=[("CAC", "College Avenue"), ("BUS", "Busch"), ("LIV", "Livingston"), ("CD", "Cook/Douglas")], default='CAC')
building = SelectMultipleField("building")
department = SelectMultipleField("department")
times = [(800,"8:00 AM"),(830,"8:30 AM"),(900,"9:00 AM"),(930,"9:30 AM"),(1000,"10:00 AM"),(1030,"10:30 AM"),
(1100,"11:00 AM"),(1130,"11:30 AM"),(1200,"12:00 PM"),(1230,"12:30 PM"),(1300,"1:00 PM"),(1330,"1:30 PM"),
(1400,"2:00 PM"),(1430,"2:30 PM"),(1500,"3:00 PM"),(1530,"3:30 PM"),(1600,"4:00 PM"),(1630,"4:30 PM"),
(1700,"5:00 PM"),(1730,"5:30 PM"),(1800,"6:00 PM"),(1830,"6:30 PM"),(1900,"7:00 PM"),(1930,"7:30 PM"),
(2000,"8:00 PM"),(2030,"8:30 PM"),(2100,"9:00 PM"),(2130,"9:30 PM"),(2200,"10:00 PM"),(2230,"10:30 PM")]
currDay = daysOfWeek[int(datetime.now().strftime("%w"))]
currTime = int(int(datetime.now().strftime("%H%M")) / 100) * 100
day = SelectField('startTime', choices=[("M", "Monday"),("T", "Tuesday"),("W", "Wednesday"),("TH", "Thursday"),("F", "Friday"),("S","Saturday")], default=currDay);
startTime = SelectField('startTime', choices=times, default=currTime);
endTime = SelectField('endTime', choices=times, default=currTime+130);
@app.route('/api/esp_report', methods=['POST'])
def esp_report():
c = conn.cursor()
data = (float(request.values['temp']), float(request.values['hum']), float(request.values['pres']))
c.execute("INSERT INTO esp_data (temp, hum, pres) VALUES (%f, %f, %f)"%data)
conn.commit()
return jsonify(success=True)
def rnd(x, base):
mi = min(x)
ma = max(x)
mi = base * floor(float(mi) / base)
ma = base * ceil(float(ma) / base)
return [mi, ma]
@app.route('/api/esp_data')
def esp_data():
c = conn.cursor()
interval = int(request.values.get('interval', 2))
c.execute("SELECT * FROM esp_data WHERE time > (NOW() - INTERVAL '%d HOURS') ORDER BY id DESC"%interval)
ret = [dict(zip(['id', 'time', 'temp', 'hum', 'pres'], x)) for x in c.fetchall()][::-1]
temps = [r['temp'] for r in ret]
hums = [r['hum'] for r in ret]
press = [r['pres'] for r in ret]
times = [r['time'].strftime("%Y-%m-%dT%H:%M:%S") for r in ret]
axes = rnd(temps, 10)
axes += rnd(hums, 2)
axes += rnd(press, .5)
return jsonify(times=times, temps=temps, hums=hums, press=press, axes=axes, extra=None)
@app.route('/api/esp_view')
def esp_view():
return render_template('esp_view.html')
@app.route('/api/updateblog', methods=['POST'])
def updateblog():
subprocess.call(['rm', '-rf', 'output'], cwd='/opt/blog')
subprocess.call(['git', 'pull'], cwd='/opt/blog')
subprocess.call(['sudo', 'chown', '-R', 'rajan:rajan', '.'], cwd='/opt/blog')
try:
res = subprocess.check_output(['/usr/local/bin/pelican', '-vD', 'content'], cwd='/opt/blog')
return res
except CalledProcessError as e:
return e.output
def flash_errors(form):
for field, errors in form.errors.items():
for error in errors:
flash(u"%s error - %s" % (getattr(form, field).label.text, error))
|
mit
|
simongoffin/website_version
|
openerp/report/printscreen/__init__.py
|
381
|
1203
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import ps_list
import ps_form
""" A special report, that is automatically formatted to look like the
screen contents of Form/List Views.
"""
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
epssy/hue
|
apps/filebrowser/src/filebrowser/lib/xxd_test.py
|
29
|
2104
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import logging
import StringIO
import random
import xxd
from subprocess import Popen, PIPE
logger = logging.getLogger(__name__)
LENGTH = 1024*10 # 10KB
class XxdTest(unittest.TestCase):
def test_mask_not_alphanumeric(self):
self.assertEquals( (1, ". X"), xxd.mask_not_alphanumeric("\n X"))
def test_mask_not_printable(self):
self.assertEquals( (2, "..@"), xxd.mask_not_alphanumeric("\xff\x90\x40"))
def test_compare_to_xxd(self):
"""
Runs xxd on some random text, and compares output with our xxd.
It's conceivable that this isn't portable: xxd may have different
default options.
To be honest, this test was written after this was working.
I tested using a temporary file and a side-by-side diff tool (vimdiff).
"""
# /dev/random tends to hang on Linux, so we use python instead.
# It's inefficient, but it's not terrible.
random_text = "".join(chr(random.getrandbits(8)) for _ in range(LENGTH))
p = Popen(["xxd"], shell=True, stdin=PIPE, stdout=PIPE, close_fds=True)
(stdin, stderr) = p.communicate(random_text)
self.assertFalse(stderr)
output = StringIO.StringIO()
xxd.main(StringIO.StringIO(random_text), output)
self.assertEquals(stdin, output.getvalue())
if __name__ == "__main__":
unittest.main()
|
apache-2.0
|
Andypsamp/CODjunit
|
beetsplug/filefilter.py
|
25
|
2794
|
# This file is part of beets.
# Copyright 2015, Malte Ried.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Filter imported files using a regular expression.
"""
import re
from beets import config
from beets.plugins import BeetsPlugin
from beets.importer import SingletonImportTask
class FileFilterPlugin(BeetsPlugin):
def __init__(self):
super(FileFilterPlugin, self).__init__()
self.register_listener('import_task_created',
self.import_task_created_event)
self.config.add({
'path': '.*'
})
self.path_album_regex = \
self.path_singleton_regex = \
re.compile(self.config['path'].get())
if 'album_path' in self.config:
self.path_album_regex = re.compile(self.config['album_path'].get())
if 'singleton_path' in self.config:
self.path_singleton_regex = re.compile(
self.config['singleton_path'].get())
def import_task_created_event(self, session, task):
if task.items and len(task.items) > 0:
items_to_import = []
for item in task.items:
if self.file_filter(item['path']):
items_to_import.append(item)
if len(items_to_import) > 0:
task.items = items_to_import
else:
# Returning an empty list of tasks from the handler
# drops the task from the rest of the importer pipeline.
return []
elif isinstance(task, SingletonImportTask):
if not self.file_filter(task.item['path']):
return []
# If not filtered, return the original task unchanged.
return [task]
def file_filter(self, full_path):
"""Checks if the configured regular expressions allow the import
of the file given in full_path.
"""
import_config = dict(config['import'])
if 'singletons' not in import_config or not import_config[
'singletons']:
# Album
return self.path_album_regex.match(full_path) is not None
else:
# Singleton
return self.path_singleton_regex.match(full_path) is not None
|
mit
|
mozilla/inventory
|
vendor-local/src/django-tastytools/tastytools/test/definitions/fields.py
|
5
|
8851
|
""" Defines the generator function for field test cases """
from django.test import TestCase
from tastytools.test.client import Client, MultiTestCase, create_multi_meta
from datetime import datetime
from helpers import prepare_test_post_data
import random
class FieldNotSupportedException(Exception):
pass
def generate(api, setUp=None):
""" Generates a set of tests for every Field of every Resource"""
if setUp is None:
def user_setUp(*args, **kwargs):
return
else:
user_setUp = setUp
class UnderResourceFields(MultiTestCase):
""" Prototype class for the genration of tests cases for the fields
of every Resource
"""
@staticmethod
def generate_field_test_data(field):
field_classname = field.__class__.__name__
if field_classname == 'CharField':
bad_value = "abcd"
elif field_classname == "IntegerField":
bad_value = 12345
elif field_classname == "ToManyField":
(uri, res) = field.to_class().create_test_resource()
return [uri]
elif field_classname == "ToOneField" or field_classname == "ForeignKey":
(uri, res) = field.to_class().create_test_resource()
return [uri]
elif field_classname == "DateField":
return datetime.now()
elif field_classname == "DictField":
return {}
else:
raise FieldNotSupportedException(field_classname)
return bad_value
@staticmethod
def multi_post_missing_fields_nice_response(
self, resource_name, resource, field_name, field):
""" For each field in the resource, removes it from the resource
and post a request with the missing field, then it verifies that
no ugly response is given, like a 500 status code, or a non-json
response
"""
if resource.can_create():
post_data = prepare_test_post_data(self, resource)
try:
del post_data[field_name]
except:
return
response = self.client.post(resource.get_resource_list_uri(),
post_data)
for code in [401, 404, 500]:
msg = "%s returns a %s response when issuing a POST with" \
" missing %s - %s"
msg %= (resource_name, code, field_name, response.content)
self.assertNotEqual(code, response.status_code, msg)
header, content_type = response._headers['content-type']
if len(response.content) > 0:
msg = "Bad content type when POSTing a %s with missing %s:" \
"%s (%s)=> %s"
msg %= (resource_name, field_name, content_type,
response.status_code, response.content)
self.assertTrue(
content_type.startswith('application/json'), msg)
@staticmethod
def multi_help(self, resource_name, resource, field_name, field):
""" Verifies that every field has a help_text set """
if field_name == 'id':
return
if field.help_text == field.__class__.help_text:
msg = "Missing help text for %s.%s resource field."
msg %= (resource_name, field_name)
self.assertTrue(False, msg)
@staticmethod
def multi_readonly_post(self, resource_name, resource, field_name,
field):
""" for every read only field, tries to change it's value through
POST and verifies it didn't
"""
if field.readonly and resource.can_create():
post_data = resource.get_test_post_data()
try:
bad_value = UnderResourceFields.generate_field_test_data(field)
except FieldNotSupportedException:
return
post_data[field_name] = bad_value
post_response = self.client.post(
resource.get_resource_list_uri(),
post_data, parse='json')
if post_response.status_code == 201:
location = post_response['Location']
get_response = self.client.get(location, parse='json')
msg = "Could not read posted resource (%d)\n%s"
msg %= (get_response.status_code, get_response.content)
self.assertEqual(get_response.status_code, 200, msg)
msg = "%s.%s can be set by a POST request even though"\
" it's readonly!."
msg %= (resource_name, field_name)
self.assertNotEqual(get_response.get(field_name, ''),
bad_value, msg)
@staticmethod
def multi_max_length_post(self, resource_name, resource, field_name,
field):
max_length = getattr(field, "max_length", None)
if max_length is not None and resource.can_create():
request_data = resource.get_test_post_data()
request_data[field_name] = \
UnderResourceFields.generate_string_by_length(
field.max_length + 1)
msg = "%s.%s max length exceeded, and did not return"\
" a bad request error"
msg %= (resource_name, field_name)
post_response = self.client.post(
resource.get_resource_list_uri(),
data=request_data, parse='json')
self.assertEqual(post_response.status_code, 400, msg)
data = post_response.data['errors']
has_error = False
for error in data:
if error['name'] == "MaxLengthExceeded":
has_error = True
if has_error is False:
msg = '%s.%s max length exceeded, but MaxLengthExceeded error is not being reported'
msg %= (resource_name, field_name)
self.assertTrue(False, msg)
@staticmethod
def multi_readonly_patch(self, resource_name, resource, field_name,
field):
""" for every read only field, tries to change it's value through
PATCH and verifies it didn't
"""
client = Client()
if field.readonly and resource.can_patch():
#Create a resource to modify it
(location, obj) = resource.create_test_resource()
bad_value = UnderResourceFields.generate_field_test_data(field)
#attempt to PATCH
patch_data = {}
patch_data[field_name] = bad_value
self.client.patch(location, patch_data, parse='json')
get_response = client.get(location, parse='json')
msg = "%s.%s can be changed by a PATCH and it's readonly!\n%s"
msg %= (resource_name, field_name, get_response)
self.assertTrue(get_response.data is not None,
"No response data from %s \nWith data: %s" %
(location, patch_data))
self.assertNotEqual(get_response.data.get(field_name, None),
bad_value, msg)
@staticmethod
def generate_arguments():
args = []
for resource_name, resource in api._registry.items():
if hasattr(resource._meta, "testdata"):
for field_name, field in resource.fields.items():
args.append((resource_name, resource, field_name,
field))
return args
@staticmethod
def generate_test_name(resource_name, resource, field_name, field):
return "_".join([resource_name, field_name])
@staticmethod
def generate_string_by_length(length):
string = ""
for i in range(length):
rand_num = random.randint(0, 35)
if rand_num < 10:
char = str(rand_num)
else:
char = chr(55 + rand_num)
string += char
return string
@staticmethod
def setUp(self, *args, **kwargs):
self.client = Client()
user_setUp(self, *args, **kwargs)
class TestResourceFields(TestCase):
__metaclass__ = create_multi_meta(UnderResourceFields)
return TestResourceFields
|
bsd-3-clause
|
hail-is/hail
|
hail/python/hailtop/hailctl/dataproc/resources/init_notebook.py
|
1
|
6766
|
#!/opt/conda/default/bin/python3
import json
import os
import subprocess as sp
import sys
import errno
from subprocess import check_output
assert sys.version_info > (3, 0), sys.version_info
if sys.version_info >= (3, 7):
def safe_call(*args, **kwargs):
sp.run(args, capture_output=True, check=True, **kwargs)
else:
def safe_call(*args, **kwargs):
try:
sp.check_output(args, stderr=sp.STDOUT, **kwargs)
except sp.CalledProcessError as e:
print(e.output.decode())
raise e
def get_metadata(key):
return check_output(['/usr/share/google/get_metadata_value', 'attributes/{}'.format(key)]).decode()
def mkdir_if_not_exists(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# get role of machine (master or worker)
role = get_metadata('dataproc-role')
if role == 'Master':
# additional packages to install
pip_pkgs = [
'setuptools',
'mkl<2020',
'lxml<5',
'google-cloud-storage==1.25.*',
'https://github.com/hail-is/jgscm/archive/v0.1.12+hail.zip',
'ipykernel==4.10.*',
'ipywidgets==7.4.*',
'jupyter-console==6.0.*',
'nbconvert==5.5.*',
'notebook==5.7.*',
'qtconsole==4.5.*'
]
# add user-requested packages
try:
user_pkgs = get_metadata('PKGS')
except Exception:
pass
else:
pip_pkgs.extend(user_pkgs.split('|'))
print('pip packages are {}'.format(pip_pkgs))
command = ['pip', 'install']
command.extend(pip_pkgs)
safe_call(*command)
print('getting metadata')
wheel_path = get_metadata('WHEEL')
wheel_name = wheel_path.split('/')[-1]
print('copying wheel')
safe_call('gsutil', 'cp', wheel_path, f'/home/hail/{wheel_name}')
safe_call('pip', 'install', '--no-dependencies', f'/home/hail/{wheel_name}')
print('setting environment')
spark_lib_base = '/usr/lib/spark/python/lib/'
files_to_add = [os.path.join(spark_lib_base, x) for x in os.listdir(spark_lib_base) if x.endswith('.zip')]
env_to_set = {
'PYTHONHASHSEED': '0',
'PYTHONPATH': ':'.join(files_to_add),
'SPARK_HOME': '/usr/lib/spark/',
'PYSPARK_PYTHON': '/opt/conda/default/bin/python',
'PYSPARK_DRIVER_PYTHON': '/opt/conda/default/bin/python',
}
# VEP ENV
try:
vep_config_uri = get_metadata('VEP_CONFIG_URI')
except Exception:
pass
else:
env_to_set["VEP_CONFIG_URI"] = vep_config_uri
print('setting environment')
for e, value in env_to_set.items():
safe_call('/bin/sh', '-c',
'set -ex; echo "export {}={}" | tee -a /etc/environment /usr/lib/spark/conf/spark-env.sh'.format(e, value))
hail_jar = sp.check_output([
'/bin/sh', '-c',
'set -ex; python3 -m pip show hail | grep Location | sed "s/Location: //"'
]).decode('ascii').strip() + '/hail/backend/hail-all-spark.jar'
conf_to_set = [
'spark.executorEnv.PYTHONHASHSEED=0',
'spark.app.name=Hail',
# the below are necessary to make 'submit' work
'spark.jars={}'.format(hail_jar),
'spark.driver.extraClassPath={}'.format(hail_jar),
'spark.executor.extraClassPath=./hail-all-spark.jar',
]
print('setting spark-defaults.conf')
with open('/etc/spark/conf/spark-defaults.conf', 'a') as out:
out.write('\n')
for c in conf_to_set:
out.write(c)
out.write('\n')
# create Jupyter kernel spec file
kernel = {
'argv': [
'/opt/conda/default/bin/python',
'-m',
'ipykernel',
'-f',
'{connection_file}'
],
'display_name': 'Hail',
'language': 'python',
'env': {
**env_to_set,
'HAIL_SPARK_MONITOR': '1',
'SPARK_MONITOR_UI': 'http://localhost:8088/proxy/%APP_ID%',
}
}
# write kernel spec file to default Jupyter kernel directory
mkdir_if_not_exists('/opt/conda/default/share/jupyter/kernels/hail/')
with open('/opt/conda/default/share/jupyter/kernels/hail/kernel.json', 'w') as f:
json.dump(kernel, f)
# create Jupyter configuration file
mkdir_if_not_exists('/opt/conda/default/etc/jupyter/')
with open('/opt/conda/default/etc/jupyter/jupyter_notebook_config.py', 'w') as f:
opts = [
'c.Application.log_level = "DEBUG"',
'c.NotebookApp.ip = "127.0.0.1"',
'c.NotebookApp.open_browser = False',
'c.NotebookApp.port = 8123',
'c.NotebookApp.token = ""',
'c.NotebookApp.contents_manager_class = "jgscm.GoogleStorageContentManager"'
]
f.write('\n'.join(opts) + '\n')
print('copying spark monitor')
spark_monitor_gs = 'gs://hail-common/sparkmonitor-3b2bc8c22921f5c920fc7370f3a160d820db1f51/sparkmonitor-0.0.11-py3-none-any.whl'
spark_monitor_wheel = '/home/hail/' + spark_monitor_gs.split('/')[-1]
safe_call('gsutil', 'cp', spark_monitor_gs, spark_monitor_wheel)
safe_call('pip', 'install', spark_monitor_wheel)
# setup jupyter-spark extension
safe_call('/opt/conda/default/bin/jupyter', 'serverextension', 'enable', '--user', '--py', 'sparkmonitor')
safe_call('/opt/conda/default/bin/jupyter', 'nbextension', 'install', '--user', '--py', 'sparkmonitor')
safe_call('/opt/conda/default/bin/jupyter', 'nbextension', 'enable', '--user', '--py', 'sparkmonitor')
safe_call('/opt/conda/default/bin/jupyter', 'nbextension', 'enable', '--user', '--py', 'widgetsnbextension')
safe_call("""ipython profile create && echo "c.InteractiveShellApp.extensions.append('sparkmonitor.kernelextension')" >> $(ipython profile locate default)/ipython_kernel_config.py""", shell=True)
# create systemd service file for Jupyter notebook server process
with open('/lib/systemd/system/jupyter.service', 'w') as f:
opts = [
'[Unit]',
'Description=Jupyter Notebook',
'After=hadoop-yarn-resourcemanager.service',
'[Service]',
'Type=simple',
'User=root',
'Group=root',
'WorkingDirectory=/home/hail/',
'ExecStart=/opt/conda/default/bin/python /opt/conda/default/bin/jupyter notebook --allow-root',
'Restart=always',
'RestartSec=1',
'[Install]',
'WantedBy=multi-user.target'
]
f.write('\n'.join(opts) + '\n')
# add Jupyter service to autorun and start it
safe_call('systemctl', 'daemon-reload')
safe_call('systemctl', 'enable', 'jupyter')
safe_call('service', 'jupyter', 'start')
|
mit
|
GoogleCloudPlatform/training-data-analyst
|
courses/machine_learning/deepdive/08_image/flowersmodeltpu/trainer/preprocess.py
|
4
|
8254
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Beam pipeline to create TFRecord files from JPEG files stored on GCS.
These are the TFRecord format expected by the resnet and amoebanet models.
Example usage:
python -m preprocess.py \
--train_csv gs://cloud-ml-data/img/flower_photos/train_set.csv \
--validation_csv gs://cloud-ml-data/img/flower_photos/eval_set.csv \
--labels_file /tmp/labels.txt \
--project_id $PROJECT \
--output_dir gs://${BUCKET}/tpu/imgclass/data
The format of the CSV files is:
URL-of-image,label
And the format of the labels_file is simply a list of strings one-per-line.
"""
from __future__ import print_function
import argparse
import datetime
import os
import shutil
import subprocess
import sys
import apache_beam as beam
import tensorflow as tf
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label_int, label_str, height,
width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label_int: integer, identifier for ground truth (0-based)
label_str: string, identifier for ground truth, e.g., 'daisy'
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label_int +
1), # model expects 1-based
'image/class/synset': _bytes_feature(label_str),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(os.path.basename(filename)),
'image/encoded': _bytes_feature(image_buffer)
}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def decode_jpeg(self, image_data):
image = self._sess.run(
self._decode_jpeg, feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def __del__(self):
self._sess.close()
def _get_image_data(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'r') as ifp:
image_data = ifp.read()
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def convert_to_example(csvline, categories):
"""Parse a line of CSV file and convert to TF Record.
Args:
csvline: line from input CSV file
categories: list of labels
Yields:
serialized TF example if the label is in categories
"""
filename, label = csvline.encode('ascii', 'ignore').split(',')
if label in categories:
# ignore labels not in categories list
coder = ImageCoder()
image_buffer, height, width = _get_image_data(filename, coder)
del coder
example = _convert_to_example(filename, image_buffer,
categories.index(label), label, height, width)
yield example.SerializeToString()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--train_csv',
# pylint: disable=line-too-long
help=
'Path to input. Each line of input has two fields image-file-name and label separated by a comma',
required=True)
parser.add_argument(
'--validation_csv',
# pylint: disable=line-too-long
help=
'Path to input. Each line of input has two fields image-file-name and label separated by a comma',
required=True)
parser.add_argument(
'--labels_file',
help='Path to file containing list of labels, one per line',
required=True)
parser.add_argument(
'--project_id',
help='ID (not name) of your project. Ignored by DirectRunner',
required=True)
parser.add_argument(
'--runner',
help='If omitted, uses DataFlowRunner if output_dir starts with gs://',
default=None)
parser.add_argument(
'--output_dir', help='Top-level directory for TF Records', required=True)
args = parser.parse_args()
arguments = args.__dict__
JOBNAME = (
'preprocess-images-' + datetime.datetime.now().strftime('%y%m%d-%H%M%S'))
PROJECT = arguments['project_id']
OUTPUT_DIR = arguments['output_dir']
# set RUNNER using command-line arg or based on output_dir path
on_cloud = OUTPUT_DIR.startswith('gs://')
if arguments['runner']:
RUNNER = arguments['runner']
else:
RUNNER = 'DataflowRunner' if on_cloud else 'DirectRunner'
# clean-up output directory since Beam will name files 0000-of-0004 etc.
# and this could cause confusion if earlier run has 0000-of-0005, for eg
if on_cloud:
try:
subprocess.check_call('gsutil -m rm -r {}'.format(OUTPUT_DIR).split())
except subprocess.CalledProcessError:
pass
else:
shutil.rmtree(OUTPUT_DIR, ignore_errors=True)
os.makedirs(OUTPUT_DIR)
# read list of labels
with tf.gfile.FastGFile(arguments['labels_file'], 'r') as f:
LABELS = [line.rstrip() for line in f]
print('Read in {} labels, from {} to {}'.format(
len(LABELS), LABELS[0], LABELS[-1]))
if len(LABELS) < 2:
print('Require at least two labels')
sys.exit(-1)
# set up Beam pipeline to convert images to TF Records
options = {
'staging_location': os.path.join(OUTPUT_DIR, 'tmp', 'staging'),
'temp_location': os.path.join(OUTPUT_DIR, 'tmp'),
'job_name': JOBNAME,
'project': PROJECT,
'teardown_policy': 'TEARDOWN_ALWAYS',
'save_main_session': True
}
opts = beam.pipeline.PipelineOptions(flags=[], **options)
with beam.Pipeline(RUNNER, options=opts) as p:
# BEAM tasks
for step in ['train', 'validation']:
_ = (
p
| '{}_read_csv'.format(step) >> beam.io.ReadFromText(
arguments['{}_csv'.format(step)])
| '{}_convert'.format(step) >>
beam.FlatMap(lambda line: convert_to_example(line, LABELS))
| '{}_write_tfr'.format(step) >> beam.io.tfrecordio.WriteToTFRecord(
os.path.join(OUTPUT_DIR, step)))
|
apache-2.0
|
pedro2d10/SickRage-FR
|
lib/requests/packages/chardet/sbcharsetprober.py
|
2927
|
4793
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
from .compat import wrap_ord
SAMPLE_SIZE = 64
SB_ENOUGH_REL_THRESHOLD = 1024
POSITIVE_SHORTCUT_THRESHOLD = 0.95
NEGATIVE_SHORTCUT_THRESHOLD = 0.05
SYMBOL_CAT_ORDER = 250
NUMBER_OF_SEQ_CAT = 4
POSITIVE_CAT = NUMBER_OF_SEQ_CAT - 1
#NEGATIVE_CAT = 0
class SingleByteCharSetProber(CharSetProber):
def __init__(self, model, reversed=False, nameProber=None):
CharSetProber.__init__(self)
self._mModel = model
# TRUE if we need to reverse every pair in the model lookup
self._mReversed = reversed
# Optional auxiliary prober for name decision
self._mNameProber = nameProber
self.reset()
def reset(self):
CharSetProber.reset(self)
# char order of last character
self._mLastOrder = 255
self._mSeqCounters = [0] * NUMBER_OF_SEQ_CAT
self._mTotalSeqs = 0
self._mTotalChar = 0
# characters that fall in our sampling range
self._mFreqChar = 0
def get_charset_name(self):
if self._mNameProber:
return self._mNameProber.get_charset_name()
else:
return self._mModel['charsetName']
def feed(self, aBuf):
if not self._mModel['keepEnglishLetter']:
aBuf = self.filter_without_english_letters(aBuf)
aLen = len(aBuf)
if not aLen:
return self.get_state()
for c in aBuf:
order = self._mModel['charToOrderMap'][wrap_ord(c)]
if order < SYMBOL_CAT_ORDER:
self._mTotalChar += 1
if order < SAMPLE_SIZE:
self._mFreqChar += 1
if self._mLastOrder < SAMPLE_SIZE:
self._mTotalSeqs += 1
if not self._mReversed:
i = (self._mLastOrder * SAMPLE_SIZE) + order
model = self._mModel['precedenceMatrix'][i]
else: # reverse the order of the letters in the lookup
i = (order * SAMPLE_SIZE) + self._mLastOrder
model = self._mModel['precedenceMatrix'][i]
self._mSeqCounters[model] += 1
self._mLastOrder = order
if self.get_state() == constants.eDetecting:
if self._mTotalSeqs > SB_ENOUGH_REL_THRESHOLD:
cf = self.get_confidence()
if cf > POSITIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, we have a'
'winner\n' %
(self._mModel['charsetName'], cf))
self._mState = constants.eFoundIt
elif cf < NEGATIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, below negative'
'shortcut threshhold %s\n' %
(self._mModel['charsetName'], cf,
NEGATIVE_SHORTCUT_THRESHOLD))
self._mState = constants.eNotMe
return self.get_state()
def get_confidence(self):
r = 0.01
if self._mTotalSeqs > 0:
r = ((1.0 * self._mSeqCounters[POSITIVE_CAT]) / self._mTotalSeqs
/ self._mModel['mTypicalPositiveRatio'])
r = r * self._mFreqChar / self._mTotalChar
if r >= 1.0:
r = 0.99
return r
|
gpl-3.0
|
simobasso/ansible
|
lib/ansible/inventory/group.py
|
66
|
4669
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.utils.debug import debug
class Group:
''' a group of ansible hosts '''
#__slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ]
def __init__(self, name=None):
self.depth = 0
self.name = name
self.hosts = []
self.vars = {}
self.child_groups = []
self.parent_groups = []
self._hosts_cache = None
#self.clear_hosts_cache()
#if self.name is None:
# raise Exception("group name is required")
def __repr__(self):
return self.get_name()
def __getstate__(self):
return self.serialize()
def __setstate__(self, data):
return self.deserialize(data)
def serialize(self):
parent_groups = []
for parent in self.parent_groups:
parent_groups.append(parent.serialize())
result = dict(
name=self.name,
vars=self.vars.copy(),
parent_groups=parent_groups,
depth=self.depth,
)
return result
def deserialize(self, data):
self.__init__()
self.name = data.get('name')
self.vars = data.get('vars', dict())
self.depth = data.get('depth', 0)
parent_groups = data.get('parent_groups', [])
for parent_data in parent_groups:
g = Group()
g.deserialize(parent_data)
self.parent_groups.append(g)
def get_name(self):
return self.name
def add_child_group(self, group):
if self == group:
raise Exception("can't add group to itself")
# don't add if it's already there
if not group in self.child_groups:
self.child_groups.append(group)
# update the depth of the child
group.depth = max([self.depth+1, group.depth])
# update the depth of the grandchildren
group._check_children_depth()
# now add self to child's parent_groups list, but only if there
# isn't already a group with the same name
if not self.name in [g.name for g in group.parent_groups]:
group.parent_groups.append(self)
self.clear_hosts_cache()
def _check_children_depth(self):
try:
for group in self.child_groups:
group.depth = max([self.depth+1, group.depth])
group._check_children_depth()
except RuntimeError:
raise AnsibleError("The group named '%s' has a recursive dependency loop." % self.name)
def add_host(self, host):
self.hosts.append(host)
host.add_group(self)
self.clear_hosts_cache()
def set_variable(self, key, value):
self.vars[key] = value
def clear_hosts_cache(self):
self._hosts_cache = None
for g in self.parent_groups:
g.clear_hosts_cache()
def get_hosts(self):
if self._hosts_cache is None:
self._hosts_cache = self._get_hosts()
return self._hosts_cache
def _get_hosts(self):
hosts = []
seen = {}
for kid in self.child_groups:
kid_hosts = kid.get_hosts()
for kk in kid_hosts:
if kk not in seen:
seen[kk] = 1
hosts.append(kk)
for mine in self.hosts:
if mine not in seen:
seen[mine] = 1
hosts.append(mine)
return hosts
def get_vars(self):
return self.vars.copy()
def _get_ancestors(self):
results = {}
for g in self.parent_groups:
results[g.name] = g
results.update(g._get_ancestors())
return results
def get_ancestors(self):
return self._get_ancestors().values()
|
gpl-3.0
|
p4datasystems/CarnotKEdist
|
dist/Lib/ensurepip/__init__.py
|
7
|
6655
|
#!/usr/bin/env python2
from __future__ import print_function
import os
import os.path
import pkgutil
import shutil
import sys
import tempfile
__all__ = ["version", "bootstrap"]
_SETUPTOOLS_VERSION = "18.4"
_PIP_VERSION = "7.1.2"
# pip currently requires ssl support, so we try to provide a nicer
# error message when that is missing (http://bugs.python.org/issue19744)
_MISSING_SSL_MESSAGE = ("pip {} requires SSL/TLS".format(_PIP_VERSION))
try:
import ssl
except ImportError:
ssl = None
def _require_ssl_for_pip():
raise RuntimeError(_MISSING_SSL_MESSAGE)
else:
def _require_ssl_for_pip():
pass
_PROJECTS = [
("setuptools", _SETUPTOOLS_VERSION),
("pip", _PIP_VERSION),
]
def _run_pip(args, additional_paths=None):
# Add our bundled software to the sys.path so we can import it
if additional_paths is not None:
sys.path = additional_paths + sys.path
# Install the bundled software
import pip
pip.main(args)
def version():
"""
Returns a string specifying the bundled version of pip.
"""
return _PIP_VERSION
def _disable_pip_configuration_settings():
# We deliberately ignore all pip environment variables
# when invoking pip
# See http://bugs.python.org/issue19734 for details
keys_to_remove = [k for k in os.environ if k.startswith("PIP_")]
for k in keys_to_remove:
del os.environ[k]
# We also ignore the settings in the default pip configuration file
# See http://bugs.python.org/issue20053 for details
os.environ['PIP_CONFIG_FILE'] = os.devnull
def bootstrap(root=None, upgrade=False, user=False,
altinstall=False, default_pip=True,
verbosity=0):
"""
Bootstrap pip into the current Python installation (or the given root
directory).
Note that calling this function will alter both sys.path and os.environ.
"""
if altinstall and default_pip:
raise ValueError("Cannot use altinstall and default_pip together")
_require_ssl_for_pip()
_disable_pip_configuration_settings()
# By default, installing pip and setuptools installs all of the
# following scripts (X.Y == running Python version):
#
# pip, pipX, pipX.Y, easy_install, easy_install-X.Y
#
# pip 1.5+ allows ensurepip to request that some of those be left out
if altinstall:
# omit pip, pipX and easy_install
os.environ["ENSUREPIP_OPTIONS"] = "altinstall"
elif not default_pip:
# omit pip and easy_install
os.environ["ENSUREPIP_OPTIONS"] = "install"
tmpdir = tempfile.mkdtemp()
try:
# Put our bundled wheels into a temporary directory and construct the
# additional paths that need added to sys.path
additional_paths = []
for project, version in _PROJECTS:
wheel_name = "{}-{}-py2.py3-none-any.whl".format(project, version)
whl = pkgutil.get_data(
"ensurepip",
"_bundled/{}".format(wheel_name),
)
with open(os.path.join(tmpdir, wheel_name), "wb") as fp:
fp.write(whl)
additional_paths.append(os.path.join(tmpdir, wheel_name))
# Construct the arguments to be passed to the pip command
args = ["install", "--no-index", "--find-links", tmpdir]
if root:
args += ["--root", root]
if upgrade:
args += ["--upgrade"]
if user:
args += ["--user"]
if verbosity:
args += ["-" + "v" * verbosity]
_run_pip(args + [p[0] for p in _PROJECTS], additional_paths)
finally:
shutil.rmtree(tmpdir, ignore_errors=True)
def _uninstall_helper(verbosity=0):
"""Helper to support a clean default uninstall process on Windows
Note that calling this function may alter os.environ.
"""
# Nothing to do if pip was never installed, or has been removed
try:
import pip
except ImportError:
return
# If the pip version doesn't match the bundled one, leave it alone
if pip.__version__ != _PIP_VERSION:
msg = ("ensurepip will only uninstall a matching version "
"({!r} installed, {!r} bundled)")
print(msg.format(pip.__version__, _PIP_VERSION), file=sys.stderr)
return
_require_ssl_for_pip()
_disable_pip_configuration_settings()
# Construct the arguments to be passed to the pip command
args = ["uninstall", "-y"]
if verbosity:
args += ["-" + "v" * verbosity]
_run_pip(args + [p[0] for p in reversed(_PROJECTS)])
def _main(argv=None):
if ssl is None:
print("Ignoring ensurepip failure: {}".format(_MISSING_SSL_MESSAGE),
file=sys.stderr)
return
import argparse
parser = argparse.ArgumentParser(prog="python -m ensurepip")
parser.add_argument(
"--version",
action="version",
version="pip {}".format(version()),
help="Show the version of pip that is bundled with this Python.",
)
parser.add_argument(
"-v", "--verbose",
action="count",
default=0,
dest="verbosity",
help=("Give more output. Option is additive, and can be used up to 3 "
"times."),
)
parser.add_argument(
"-U", "--upgrade",
action="store_true",
default=False,
help="Upgrade pip and dependencies, even if already installed.",
)
parser.add_argument(
"--user",
action="store_true",
default=False,
help="Install using the user scheme.",
)
parser.add_argument(
"--root",
default=None,
help="Install everything relative to this alternate root directory.",
)
parser.add_argument(
"--altinstall",
action="store_true",
default=False,
help=("Make an alternate install, installing only the X.Y versioned"
"scripts (Default: pipX, pipX.Y, easy_install-X.Y)"),
)
parser.add_argument(
"--default-pip",
action="store_true",
default=True,
dest="default_pip",
help=argparse.SUPPRESS,
)
parser.add_argument(
"--no-default-pip",
action="store_false",
dest="default_pip",
help=("Make a non default install, installing only the X and X.Y "
"versioned scripts."),
)
args = parser.parse_args(argv)
bootstrap(
root=args.root,
upgrade=args.upgrade,
user=args.user,
verbosity=args.verbosity,
altinstall=args.altinstall,
default_pip=args.default_pip,
)
|
apache-2.0
|
cfriedt/gnuradio
|
gr-digital/python/digital/qa_map.py
|
57
|
1778
|
#!/usr/bin/env python
#
# Copyright 2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, digital, blocks
class test_map(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def helper(self, symbols):
src_data = [0, 1, 2, 3, 0, 1, 2, 3]
expected_data = map(lambda x: symbols[x], src_data)
src = blocks.vector_source_b(src_data)
op = digital.map_bb(symbols)
dst = blocks.vector_sink_b()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = list(dst.data())
self.assertEqual(expected_data, result_data)
def test_001(self):
symbols = [0, 0, 0, 0]
self.helper(symbols)
def test_002(self):
symbols = [3, 2, 1, 0]
self.helper(symbols)
def test_003(self):
symbols = [8-1, 32-1, 128, 256-1]
self.helper(symbols)
if __name__ == '__main__':
gr_unittest.run(test_map, "test_map.xml")
|
gpl-3.0
|
tovmeod/anaf
|
anaf/sales/api/urls.py
|
1
|
2683
|
# -*- coding: utf-8 -*-
from anaf.sales.api import handlers
from django.conf.urls import url, patterns
from anaf.core.api.auth import auth_engine
from anaf.core.api.doc import documentation_view
from anaf.core.api.resource import CsrfExemptResource
ad = {'authentication': auth_engine}
# sales resources
saleStatusResource = CsrfExemptResource(
handler=handlers.SaleStatusHandler, **ad)
productResource = CsrfExemptResource(handler=handlers.ProductHandler, **ad)
sourceResource = CsrfExemptResource(handler=handlers.SaleSourceHandler, **ad)
leadResource = CsrfExemptResource(handler=handlers.LeadHandler, **ad)
opportunityResource = CsrfExemptResource(
handler=handlers.OpportunityHandler, **ad)
orderResource = CsrfExemptResource(handler=handlers.SaleOrderHandler, **ad)
subscriptionResource = CsrfExemptResource(
handler=handlers.SubscriptionHandler, **ad)
orderedProductResource = CsrfExemptResource(
handler=handlers.OrderedProductHandler, **ad)
urlpatterns = patterns('',
# Sales
url(r'^doc$', documentation_view, kwargs={'module': handlers}, name="api_sales_doc"),
url(r'^statuses$', saleStatusResource, name="api_sales_status"),
url(r'^status/(?P<object_ptr>\d+)', saleStatusResource, name="api_sales_status"),
url(r'^products$', productResource, name="api_sales_products"),
url(r'^product/(?P<object_ptr>\d+)', productResource, name="api_sales_products"),
url(r'^sources$', sourceResource, name="api_sales_sources"),
url(r'^source/(?P<object_ptr>\d+)', sourceResource, name="api_sales_sources"),
url(r'^leads$', leadResource, name="api_sales_leads"),
url(r'^lead/(?P<object_ptr>\d+)', leadResource, name="api_sales_leads"),
url(r'^opportunities$', opportunityResource, name="api_sales_opportunities"),
url(r'^opportunity/(?P<object_ptr>\d+)', opportunityResource, name="api_sales_opportunities"),
url(r'^orders$', orderResource, name="api_sales_orders"),
url(r'^order/(?P<object_ptr>\d+)', orderResource, name="api_sales_orders"),
url(r'^subscriptions$', subscriptionResource, name="api_sales_subscriptions"),
url(r'^subscription/(?P<object_ptr>\d+)', subscriptionResource, name="api_sales_subscriptions"),
url(r'^ordered_product/(?P<object_ptr>\d+)', orderedProductResource,
name="api_sales_ordered_products"),
)
|
bsd-3-clause
|
amanharitsh123/zulip
|
zerver/webhooks/airbrake/view.py
|
3
|
1508
|
# Webhooks for external integrations.
from typing import Dict, Any, Text
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from zerver.lib.actions import check_send_stream_message
from zerver.lib.response import json_success, json_error
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
from zerver.models import UserProfile
AIRBRAKE_SUBJECT_TEMPLATE = '{project_name}'
AIRBRAKE_MESSAGE_TEMPLATE = '[{error_class}]({error_url}): "{error_message}" occurred.'
@api_key_only_webhook_view('Airbrake')
@has_request_variables
def api_airbrake_webhook(request, user_profile,
payload=REQ(argument_type='body'),
stream=REQ(default='airbrake')):
# type: (HttpRequest, UserProfile, Dict[str, Any], Text) -> HttpResponse
subject = get_subject(payload)
body = get_body(payload)
check_send_stream_message(user_profile, request.client,
stream, subject, body)
return json_success()
def get_subject(payload):
# type: (Dict[str, Any]) -> str
return AIRBRAKE_SUBJECT_TEMPLATE.format(project_name=payload['error']['project']['name'])
def get_body(payload):
# type: (Dict[str, Any]) -> str
data = {
'error_url': payload['airbrake_error_url'],
'error_class': payload['error']['error_class'],
'error_message': payload['error']['error_message'],
}
return AIRBRAKE_MESSAGE_TEMPLATE.format(**data)
|
apache-2.0
|
marc-sensenich/ansible
|
test/units/module_utils/basic/test_filesystem.py
|
113
|
5190
|
# -*- coding: utf-8 -*-
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2016 Toshio Kuratomi <tkuratomi@ansible.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from units.mock.procenv import ModuleTestCase
from units.compat.mock import patch, MagicMock
from ansible.module_utils.six.moves import builtins
realimport = builtins.__import__
class TestOtherFilesystem(ModuleTestCase):
def test_module_utils_basic_ansible_module_user_and_group(self):
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = None
am = basic.AnsibleModule(
argument_spec=dict(),
)
mock_stat = MagicMock()
mock_stat.st_uid = 0
mock_stat.st_gid = 0
with patch('os.lstat', return_value=mock_stat):
self.assertEqual(am.user_and_group('/path/to/file'), (0, 0))
def test_module_utils_basic_ansible_module_find_mount_point(self):
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = None
am = basic.AnsibleModule(
argument_spec=dict(),
)
def _mock_ismount(path):
if path == b'/':
return True
return False
with patch('os.path.ismount', side_effect=_mock_ismount):
self.assertEqual(am.find_mount_point('/root/fs/../mounted/path/to/whatever'), '/')
def _mock_ismount(path):
if path == b'/subdir/mount':
return True
if path == b'/':
return True
return False
with patch('os.path.ismount', side_effect=_mock_ismount):
self.assertEqual(am.find_mount_point('/subdir/mount/path/to/whatever'), '/subdir/mount')
def test_module_utils_basic_ansible_module_set_owner_if_different(self):
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = None
am = basic.AnsibleModule(
argument_spec=dict(),
)
self.assertEqual(am.set_owner_if_different('/path/to/file', None, True), True)
self.assertEqual(am.set_owner_if_different('/path/to/file', None, False), False)
am.user_and_group = MagicMock(return_value=(500, 500))
with patch('os.lchown', return_value=None) as m:
self.assertEqual(am.set_owner_if_different('/path/to/file', 0, False), True)
m.assert_called_with(b'/path/to/file', 0, -1)
def _mock_getpwnam(*args, **kwargs):
mock_pw = MagicMock()
mock_pw.pw_uid = 0
return mock_pw
m.reset_mock()
with patch('pwd.getpwnam', side_effect=_mock_getpwnam):
self.assertEqual(am.set_owner_if_different('/path/to/file', 'root', False), True)
m.assert_called_with(b'/path/to/file', 0, -1)
with patch('pwd.getpwnam', side_effect=KeyError):
self.assertRaises(SystemExit, am.set_owner_if_different, '/path/to/file', 'root', False)
m.reset_mock()
am.check_mode = True
self.assertEqual(am.set_owner_if_different('/path/to/file', 0, False), True)
self.assertEqual(m.called, False)
am.check_mode = False
with patch('os.lchown', side_effect=OSError) as m:
self.assertRaises(SystemExit, am.set_owner_if_different, '/path/to/file', 'root', False)
def test_module_utils_basic_ansible_module_set_group_if_different(self):
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = None
am = basic.AnsibleModule(
argument_spec=dict(),
)
self.assertEqual(am.set_group_if_different('/path/to/file', None, True), True)
self.assertEqual(am.set_group_if_different('/path/to/file', None, False), False)
am.user_and_group = MagicMock(return_value=(500, 500))
with patch('os.lchown', return_value=None) as m:
self.assertEqual(am.set_group_if_different('/path/to/file', 0, False), True)
m.assert_called_with(b'/path/to/file', -1, 0)
def _mock_getgrnam(*args, **kwargs):
mock_gr = MagicMock()
mock_gr.gr_gid = 0
return mock_gr
m.reset_mock()
with patch('grp.getgrnam', side_effect=_mock_getgrnam):
self.assertEqual(am.set_group_if_different('/path/to/file', 'root', False), True)
m.assert_called_with(b'/path/to/file', -1, 0)
with patch('grp.getgrnam', side_effect=KeyError):
self.assertRaises(SystemExit, am.set_group_if_different, '/path/to/file', 'root', False)
m.reset_mock()
am.check_mode = True
self.assertEqual(am.set_group_if_different('/path/to/file', 0, False), True)
self.assertEqual(m.called, False)
am.check_mode = False
with patch('os.lchown', side_effect=OSError) as m:
self.assertRaises(SystemExit, am.set_group_if_different, '/path/to/file', 'root', False)
|
gpl-3.0
|
jpabferreira/linux-pcsws
|
tools/perf/scripts/python/syscall-counts.py
|
11181
|
1522
|
# system call counts
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
|
gpl-2.0
|
mattvonrocketstein/ymir
|
docs/fabfile.py
|
1
|
4347
|
# -*- coding: utf-8 -*-
from fabric import api
import os
import shutil
from fabric import colors
PORT = 8000
PROJECT_NAME = 'ymir'
DOC_ROOT = os.path.dirname(__file__)
SRC_ROOT = os.path.dirname(DOC_ROOT)
GEN_PATH = os.path.join(DOC_ROOT, 'ymir')
DEPLOY_PATH = "~/code/ghio/{0}".format(PROJECT_NAME)
DEPLOY_PATH = os.path.expanduser(DEPLOY_PATH)
def check_links_prod():
return check_links(
# proto='https',
base_domain='mattvonrocketstein.github.io')
def check_links(url='/ymir', proto='http', base_domain="localhost"):
""" check the links wget. """
base_url = '{1}://{0}:'.format(base_domain, proto)
port = str((PORT if base_domain == 'localhost' else 80))
url = base_url + port + url
cmd = (
"webcheck --force "
"--ignore-robots --avoid-external "
"--output webcheck ")
cmd = cmd + url
api.local(cmd)
import webbrowser
webbrowser.open("file://{0}/badlinks.html".format(
os.path.join(os.path.dirname(__file__), 'webcheck/')))
return
def parse_lines(lines):
print colors.red('broken links:')
links = [x.replace(url, '')[1:] for x in lines]
for link in links:
print colors.red(link)
with api.quiet(): # (hide="warn_only=True):
z = api.local(
"find {0} -name *.md|xargs grep '{1}'".format(DOC_ROOT, link), capture=True)
if z.succeeded:
print str(z)
else:
print "could not find any mention"
print
# fab run should already be started
logfile = "link_check.log"
base_url = 'http://{0}:'.format(base_domain)
port = str((PORT if base_domain == 'localhost' else 80))
url = base_url + port + url
wipe_logfile = lambda: api.local('rm -f "{0}"'.format(logfile))
wipe_logfile()
with api.settings(warn_only=True):
api.local(
("wget -e robots=off --spider -r -nd "
"-nv -o {1} {0}").format(url, logfile))
with open(logfile, 'r') as fhandle:
lines = [x.strip() for x in fhandle.readlines()]
start = end = None
for line in lines:
if line.startswith('Found') and line.endswith(" broken links."):
start = lines.index(line)
if line.startswith('FINISHED') and line.endswith('--'):
end = lines.index(line)
if start is not None and end is not None:
lines = lines[start + 2:end - 1]
parse_lines(lines)
else:
print "no broken links found"
def add_coverage(_dir=GEN_PATH):
print colors.red("adding coverage data")
cdir = os.path.join(SRC_ROOT, 'htmlcov')
if os.path.exists(cdir):
api.local("cp -r {0} {1}".format(cdir, _dir))
def clean():
""" Remove generated files """
if os.path.isdir(GEN_PATH):
shutil.rmtree(GEN_PATH)
os.makedirs(GEN_PATH)
def build(conf='pelicanconf.py'):
"""Build local version of site"""
with api.lcd(os.path.dirname(__file__)):
api.local('pelican -s {0} -o {1}'.format(conf, GEN_PATH))
def rebuild():
"""`clean` then `build`"""
clean()
build()
add_coverage(GEN_PATH)
def regenerate():
"""Automatically regenerate site upon file modification"""
with api.lcd(os.path.dirname(__file__)):
api.local('pelican -r -s pelicanconf.py -o {0}'.format(GEN_PATH))
def serve():
"""Serve site at http://localhost:8000/"""
with api.lcd(os.path.dirname(GEN_PATH)):
api.local("twistd -n web -p {0} --path .".format(PORT))
def push():
if os.path.exists(DEPLOY_PATH):
with api.lcd(DEPLOY_PATH):
api.local("find . -type f|xargs git rm -f")
api.local("mkdir -p {0}".format(DEPLOY_PATH))
api.local(
"cp -rfv {0} {1}".format(
os.path.join(GEN_PATH, '*'),
DEPLOY_PATH))
with api.lcd(DEPLOY_PATH):
api.local("find . -type f|xargs git add")
api.local("git commit . -m'publishing {0}'".format(PROJECT_NAME))
api.local("git push")
def publish():
build_prod()
push()
def build_prod():
clean()
build("pelican_publish.py")
add_coverage(GEN_PATH)
def run():
from littleworkers import Pool
commands = [
'fab regenerate',
'fab serve'
]
lil = Pool(workers=2)
lil.run(commands)
|
mit
|
ycaihua/scikit-learn
|
sklearn/tests/test_pipeline.py
|
17
|
12512
|
"""
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.base import BaseEstimator, clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(BaseEstimator):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
class TransfT(T):
def transform(self, X, y=None):
return X
class FitParamT(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
""" Test the various init parameters of the pipeline.
"""
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params()
params2 = pipe2.get_params()
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
""" Test the various methods of the pipeline (anova).
"""
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
"""Test that the pipeline can take fit parameters
"""
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_methods_pca_svm():
"""Test the various methods of the pipeline (pca + svm)."""
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
"""Test the various methods of the pipeline (preprocessing + svm)."""
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True, random_state=0)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA()
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
|
bsd-3-clause
|
raajitr/django_hangman
|
env/lib/python2.7/site-packages/requests/packages/urllib3/util/selectors.py
|
86
|
18836
|
# Backport of selectors.py from Python 3.5+ to support Python < 3.4
# Also has the behavior specified in PEP 475 which is to retry syscalls
# in the case of an EINTR error. This module is required because selectors34
# does not follow this behavior and instead returns that no dile descriptor
# events have occurred rather than retry the syscall. The decision to drop
# support for select.devpoll is made to maintain 100% test coverage.
import errno
import math
import select
from collections import namedtuple, Mapping
import time
try:
monotonic = time.monotonic
except (AttributeError, ImportError): # Python 3.3<
monotonic = time.time
EVENT_READ = (1 << 0)
EVENT_WRITE = (1 << 1)
HAS_SELECT = True # Variable that shows whether the platform has a selector.
_SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None.
class SelectorError(Exception):
def __init__(self, errcode):
super(SelectorError, self).__init__()
self.errno = errcode
def __repr__(self):
return "<SelectorError errno={0}>".format(self.errno)
def __str__(self):
return self.__repr__()
def _fileobj_to_fd(fileobj):
""" Return a file descriptor from a file object. If
given an integer will simply return that integer back. """
if isinstance(fileobj, int):
fd = fileobj
else:
try:
fd = int(fileobj.fileno())
except (AttributeError, TypeError, ValueError):
raise ValueError("Invalid file object: {0!r}".format(fileobj))
if fd < 0:
raise ValueError("Invalid file descriptor: {0}".format(fd))
return fd
def _syscall_wrapper(func, recalc_timeout, *args, **kwargs):
""" Wrapper function for syscalls that could fail due to EINTR.
All functions should be retried if there is time left in the timeout
in accordance with PEP 475. """
timeout = kwargs.get("timeout", None)
if timeout is None:
expires = None
recalc_timeout = False
else:
timeout = float(timeout)
if timeout < 0.0: # Timeout less than 0 treated as no timeout.
expires = None
else:
expires = monotonic() + timeout
args = list(args)
if recalc_timeout and "timeout" not in kwargs:
raise ValueError(
"Timeout must be in args or kwargs to be recalculated")
result = _SYSCALL_SENTINEL
while result is _SYSCALL_SENTINEL:
try:
result = func(*args, **kwargs)
# OSError is thrown by select.select
# IOError is thrown by select.epoll.poll
# select.error is thrown by select.poll.poll
# Aren't we thankful for Python 3.x rework for exceptions?
except (OSError, IOError, select.error) as e:
# select.error wasn't a subclass of OSError in the past.
errcode = None
if hasattr(e, "errno"):
errcode = e.errno
elif hasattr(e, "args"):
errcode = e.args[0]
# Also test for the Windows equivalent of EINTR.
is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and
errcode == errno.WSAEINTR))
if is_interrupt:
if expires is not None:
current_time = monotonic()
if current_time > expires:
raise OSError(errno=errno.ETIMEDOUT)
if recalc_timeout:
if "timeout" in kwargs:
kwargs["timeout"] = expires - current_time
continue
if errcode:
raise SelectorError(errcode)
else:
raise
return result
SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
class _SelectorMapping(Mapping):
""" Mapping of file objects to selector keys """
def __init__(self, selector):
self._selector = selector
def __len__(self):
return len(self._selector._fd_to_key)
def __getitem__(self, fileobj):
try:
fd = self._selector._fileobj_lookup(fileobj)
return self._selector._fd_to_key[fd]
except KeyError:
raise KeyError("{0!r} is not registered.".format(fileobj))
def __iter__(self):
return iter(self._selector._fd_to_key)
class BaseSelector(object):
""" Abstract Selector class
A selector supports registering file objects to be monitored
for specific I/O events.
A file object is a file descriptor or any object with a
`fileno()` method. An arbitrary object can be attached to the
file object which can be used for example to store context info,
a callback, etc.
A selector can use various implementations (select(), poll(), epoll(),
and kqueue()) depending on the platform. The 'DefaultSelector' class uses
the most efficient implementation for the current platform.
"""
def __init__(self):
# Maps file descriptors to keys.
self._fd_to_key = {}
# Read-only mapping returned by get_map()
self._map = _SelectorMapping(self)
def _fileobj_lookup(self, fileobj):
""" Return a file descriptor from a file object.
This wraps _fileobj_to_fd() to do an exhaustive
search in case the object is invalid but we still
have it in our map. Used by unregister() so we can
unregister an object that was previously registered
even if it is closed. It is also used by _SelectorMapping
"""
try:
return _fileobj_to_fd(fileobj)
except ValueError:
# Search through all our mapped keys.
for key in self._fd_to_key.values():
if key.fileobj is fileobj:
return key.fd
# Raise ValueError after all.
raise
def register(self, fileobj, events, data=None):
""" Register a file object for a set of events to monitor. """
if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
raise ValueError("Invalid events: {0!r}".format(events))
key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
if key.fd in self._fd_to_key:
raise KeyError("{0!r} (FD {1}) is already registered"
.format(fileobj, key.fd))
self._fd_to_key[key.fd] = key
return key
def unregister(self, fileobj):
""" Unregister a file object from being monitored. """
try:
key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
return key
def modify(self, fileobj, events, data=None):
""" Change a registered file object monitored events and data. """
# NOTE: Some subclasses optimize this operation even further.
try:
key = self._fd_to_key[self._fileobj_lookup(fileobj)]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
if events != key.events:
self.unregister(fileobj)
key = self.register(fileobj, events, data)
elif data != key.data:
# Use a shortcut to update the data.
key = key._replace(data=data)
self._fd_to_key[key.fd] = key
return key
def select(self, timeout=None):
""" Perform the actual selection until some monitored file objects
are ready or the timeout expires. """
raise NotImplementedError()
def close(self):
""" Close the selector. This must be called to ensure that all
underlying resources are freed. """
self._fd_to_key.clear()
self._map = None
def get_key(self, fileobj):
""" Return the key associated with a registered file object. """
mapping = self.get_map()
if mapping is None:
raise RuntimeError("Selector is closed")
try:
return mapping[fileobj]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
def get_map(self):
""" Return a mapping of file objects to selector keys """
return self._map
def _key_from_fd(self, fd):
""" Return the key associated to a given file descriptor
Return None if it is not found. """
try:
return self._fd_to_key[fd]
except KeyError:
return None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
# Almost all platforms have select.select()
if hasattr(select, "select"):
class SelectSelector(BaseSelector):
""" Select-based selector. """
def __init__(self):
super(SelectSelector, self).__init__()
self._readers = set()
self._writers = set()
def register(self, fileobj, events, data=None):
key = super(SelectSelector, self).register(fileobj, events, data)
if events & EVENT_READ:
self._readers.add(key.fd)
if events & EVENT_WRITE:
self._writers.add(key.fd)
return key
def unregister(self, fileobj):
key = super(SelectSelector, self).unregister(fileobj)
self._readers.discard(key.fd)
self._writers.discard(key.fd)
return key
def _select(self, r, w, timeout=None):
""" Wrapper for select.select because timeout is a positional arg """
return select.select(r, w, [], timeout)
def select(self, timeout=None):
# Selecting on empty lists on Windows errors out.
if not len(self._readers) and not len(self._writers):
return []
timeout = None if timeout is None else max(timeout, 0.0)
ready = []
r, w, _ = _syscall_wrapper(self._select, True, self._readers,
self._writers, timeout)
r = set(r)
w = set(w)
for fd in r | w:
events = 0
if fd in r:
events |= EVENT_READ
if fd in w:
events |= EVENT_WRITE
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
if hasattr(select, "poll"):
class PollSelector(BaseSelector):
""" Poll-based selector """
def __init__(self):
super(PollSelector, self).__init__()
self._poll = select.poll()
def register(self, fileobj, events, data=None):
key = super(PollSelector, self).register(fileobj, events, data)
event_mask = 0
if events & EVENT_READ:
event_mask |= select.POLLIN
if events & EVENT_WRITE:
event_mask |= select.POLLOUT
self._poll.register(key.fd, event_mask)
return key
def unregister(self, fileobj):
key = super(PollSelector, self).unregister(fileobj)
self._poll.unregister(key.fd)
return key
def _wrap_poll(self, timeout=None):
""" Wrapper function for select.poll.poll() so that
_syscall_wrapper can work with only seconds. """
if timeout is not None:
if timeout <= 0:
timeout = 0
else:
# select.poll.poll() has a resolution of 1 millisecond,
# round away from zero to wait *at least* timeout seconds.
timeout = math.ceil(timeout * 1e3)
result = self._poll.poll(timeout)
return result
def select(self, timeout=None):
ready = []
fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
for fd, event_mask in fd_events:
events = 0
if event_mask & ~select.POLLIN:
events |= EVENT_WRITE
if event_mask & ~select.POLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
if hasattr(select, "epoll"):
class EpollSelector(BaseSelector):
""" Epoll-based selector """
def __init__(self):
super(EpollSelector, self).__init__()
self._epoll = select.epoll()
def fileno(self):
return self._epoll.fileno()
def register(self, fileobj, events, data=None):
key = super(EpollSelector, self).register(fileobj, events, data)
events_mask = 0
if events & EVENT_READ:
events_mask |= select.EPOLLIN
if events & EVENT_WRITE:
events_mask |= select.EPOLLOUT
_syscall_wrapper(self._epoll.register, False, key.fd, events_mask)
return key
def unregister(self, fileobj):
key = super(EpollSelector, self).unregister(fileobj)
try:
_syscall_wrapper(self._epoll.unregister, False, key.fd)
except SelectorError:
# This can occur when the fd was closed since registry.
pass
return key
def select(self, timeout=None):
if timeout is not None:
if timeout <= 0:
timeout = 0.0
else:
# select.epoll.poll() has a resolution of 1 millisecond
# but luckily takes seconds so we don't need a wrapper
# like PollSelector. Just for better rounding.
timeout = math.ceil(timeout * 1e3) * 1e-3
timeout = float(timeout)
else:
timeout = -1.0 # epoll.poll() must have a float.
# We always want at least 1 to ensure that select can be called
# with no file descriptors registered. Otherwise will fail.
max_events = max(len(self._fd_to_key), 1)
ready = []
fd_events = _syscall_wrapper(self._epoll.poll, True,
timeout=timeout,
maxevents=max_events)
for fd, event_mask in fd_events:
events = 0
if event_mask & ~select.EPOLLIN:
events |= EVENT_WRITE
if event_mask & ~select.EPOLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
def close(self):
self._epoll.close()
super(EpollSelector, self).close()
if hasattr(select, "kqueue"):
class KqueueSelector(BaseSelector):
""" Kqueue / Kevent-based selector """
def __init__(self):
super(KqueueSelector, self).__init__()
self._kqueue = select.kqueue()
def fileno(self):
return self._kqueue.fileno()
def register(self, fileobj, events, data=None):
key = super(KqueueSelector, self).register(fileobj, events, data)
if events & EVENT_READ:
kevent = select.kevent(key.fd,
select.KQ_FILTER_READ,
select.KQ_EV_ADD)
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
if events & EVENT_WRITE:
kevent = select.kevent(key.fd,
select.KQ_FILTER_WRITE,
select.KQ_EV_ADD)
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
return key
def unregister(self, fileobj):
key = super(KqueueSelector, self).unregister(fileobj)
if key.events & EVENT_READ:
kevent = select.kevent(key.fd,
select.KQ_FILTER_READ,
select.KQ_EV_DELETE)
try:
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
except SelectorError:
pass
if key.events & EVENT_WRITE:
kevent = select.kevent(key.fd,
select.KQ_FILTER_WRITE,
select.KQ_EV_DELETE)
try:
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
except SelectorError:
pass
return key
def select(self, timeout=None):
if timeout is not None:
timeout = max(timeout, 0)
max_events = len(self._fd_to_key) * 2
ready_fds = {}
kevent_list = _syscall_wrapper(self._kqueue.control, True,
None, max_events, timeout)
for kevent in kevent_list:
fd = kevent.ident
event_mask = kevent.filter
events = 0
if event_mask == select.KQ_FILTER_READ:
events |= EVENT_READ
if event_mask == select.KQ_FILTER_WRITE:
events |= EVENT_WRITE
key = self._key_from_fd(fd)
if key:
if key.fd not in ready_fds:
ready_fds[key.fd] = (key, events & key.events)
else:
old_events = ready_fds[key.fd][1]
ready_fds[key.fd] = (key, (events | old_events) & key.events)
return list(ready_fds.values())
def close(self):
self._kqueue.close()
super(KqueueSelector, self).close()
# Choose the best implementation, roughly:
# kqueue == epoll > poll > select. Devpoll not supported. (See above)
# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
if 'KqueueSelector' in globals(): # Platform-specific: Mac OS and BSD
DefaultSelector = KqueueSelector
elif 'EpollSelector' in globals(): # Platform-specific: Linux
DefaultSelector = EpollSelector
elif 'PollSelector' in globals(): # Platform-specific: Linux
DefaultSelector = PollSelector
elif 'SelectSelector' in globals(): # Platform-specific: Windows
DefaultSelector = SelectSelector
else: # Platform-specific: AppEngine
def no_selector(_):
raise ValueError("Platform does not have a selector")
DefaultSelector = no_selector
HAS_SELECT = False
|
mit
|
calfonso/ansible
|
lib/ansible/modules/windows/win_file_version.py
|
22
|
1738
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Sam Liu <sam.liu@activenetwork.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_file_version
version_added: "2.1"
short_description: Get DLL or EXE file build version
description:
- Get DLL or EXE file build version.
notes:
- This module will always return no change.
options:
path:
description:
- File to get version.
- Always provide absolute path.
required: yes
author:
- Sam Liu (@SamLiu79)
'''
EXAMPLES = r'''
- name: Get acm instance version
win_file_version:
path: C:\Windows\System32\cmd.exe
register: exe_file_version
- debug:
msg: '{{ exe_file_version }}'
'''
RETURN = r'''
win_file_version.path:
description: file path
returned: always
type: string
win_file_version.file_version:
description: file version number.
returned: no error
type: string
win_file_version.product_version:
description: the version of the product this file is distributed with.
returned: no error
type: string
win_file_version.file_major_part:
description: the major part of the version number.
returned: no error
type: string
win_file_version.file_minor_part:
description: the minor part of the version number of the file.
returned: no error
type: string
win_file_version.file_build_part:
description: build number of the file.
returned: no error
type: string
win_file_version.file_private_part:
description: file private part number.
returned: no error
type: string
'''
|
gpl-3.0
|
superstack/nova
|
nova/api/openstack/server_metadata.py
|
3
|
3648
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova import compute
from nova import quota
from nova import wsgi
from nova.api.openstack import common
from nova.api.openstack import faults
class Controller(common.OpenstackController):
""" The server metadata API controller for the Openstack API """
def __init__(self):
self.compute_api = compute.API()
super(Controller, self).__init__()
def _get_metadata(self, context, server_id):
metadata = self.compute_api.get_instance_metadata(context, server_id)
meta_dict = {}
for key, value in metadata.iteritems():
meta_dict[key] = value
return dict(metadata=meta_dict)
def index(self, req, server_id):
""" Returns the list of metadata for a given instance """
context = req.environ['nova.context']
return self._get_metadata(context, server_id)
def create(self, req, server_id):
context = req.environ['nova.context']
data = self._deserialize(req.body, req.get_content_type())
metadata = data.get('metadata')
try:
self.compute_api.update_or_create_instance_metadata(context,
server_id,
metadata)
except quota.QuotaError as error:
self._handle_quota_error(error)
return req.body
def update(self, req, server_id, id):
context = req.environ['nova.context']
body = self._deserialize(req.body, req.get_content_type())
if not id in body:
expl = _('Request body and URI mismatch')
raise exc.HTTPBadRequest(explanation=expl)
if len(body) > 1:
expl = _('Request body contains too many items')
raise exc.HTTPBadRequest(explanation=expl)
try:
self.compute_api.update_or_create_instance_metadata(context,
server_id,
body)
except quota.QuotaError as error:
self._handle_quota_error(error)
return req.body
def show(self, req, server_id, id):
""" Return a single metadata item """
context = req.environ['nova.context']
data = self._get_metadata(context, server_id)
if id in data['metadata']:
return {id: data['metadata'][id]}
else:
return faults.Fault(exc.HTTPNotFound())
def delete(self, req, server_id, id):
""" Deletes an existing metadata """
context = req.environ['nova.context']
self.compute_api.delete_instance_metadata(context, server_id, id)
def _handle_quota_error(self, error):
"""Reraise quota errors as api-specific http exceptions."""
if error.code == "MetadataLimitExceeded":
raise exc.HTTPBadRequest(explanation=error.message)
raise error
|
apache-2.0
|
amisrs/angular-flask
|
angular_flask/lib/python2.7/site-packages/MySQLdb/__init__.py
|
76
|
3229
|
"""MySQLdb - A DB API v2.0 compatible interface to MySQL.
This package is a wrapper around _mysql, which mostly implements the
MySQL C API.
connect() -- connects to server
See the C API specification and the MySQL documentation for more info
on other items.
For information on how MySQLdb handles type conversion, see the
MySQLdb.converters module.
"""
__revision__ = """$Revision$"""[11:-2]
from MySQLdb.release import __version__, version_info, __author__
import _mysql
if version_info != _mysql.version_info:
raise ImportError("this is MySQLdb version %s, but _mysql is version %r" %
(version_info, _mysql.version_info))
threadsafety = 1
apilevel = "2.0"
paramstyle = "format"
from _mysql import *
from MySQLdb.constants import FIELD_TYPE
from MySQLdb.times import Date, Time, Timestamp, \
DateFromTicks, TimeFromTicks, TimestampFromTicks
try:
frozenset
except NameError:
from sets import ImmutableSet as frozenset
class DBAPISet(frozenset):
"""A special type of set for which A == x is true if A is a
DBAPISet and x is a member of that set."""
def __eq__(self, other):
if isinstance(other, DBAPISet):
return not self.difference(other)
return other in self
STRING = DBAPISet([FIELD_TYPE.ENUM, FIELD_TYPE.STRING,
FIELD_TYPE.VAR_STRING])
BINARY = DBAPISet([FIELD_TYPE.BLOB, FIELD_TYPE.LONG_BLOB,
FIELD_TYPE.MEDIUM_BLOB, FIELD_TYPE.TINY_BLOB])
NUMBER = DBAPISet([FIELD_TYPE.DECIMAL, FIELD_TYPE.DOUBLE, FIELD_TYPE.FLOAT,
FIELD_TYPE.INT24, FIELD_TYPE.LONG, FIELD_TYPE.LONGLONG,
FIELD_TYPE.TINY, FIELD_TYPE.YEAR])
DATE = DBAPISet([FIELD_TYPE.DATE, FIELD_TYPE.NEWDATE])
TIME = DBAPISet([FIELD_TYPE.TIME])
TIMESTAMP = DBAPISet([FIELD_TYPE.TIMESTAMP, FIELD_TYPE.DATETIME])
DATETIME = TIMESTAMP
ROWID = DBAPISet()
def test_DBAPISet_set_equality():
assert STRING == STRING
def test_DBAPISet_set_inequality():
assert STRING != NUMBER
def test_DBAPISet_set_equality_membership():
assert FIELD_TYPE.VAR_STRING == STRING
def test_DBAPISet_set_inequality_membership():
assert FIELD_TYPE.DATE != STRING
def Binary(x):
return str(x)
def Connect(*args, **kwargs):
"""Factory function for connections.Connection."""
from MySQLdb.connections import Connection
return Connection(*args, **kwargs)
connect = Connection = Connect
__all__ = [ 'BINARY', 'Binary', 'Connect', 'Connection', 'DATE',
'Date', 'Time', 'Timestamp', 'DateFromTicks', 'TimeFromTicks',
'TimestampFromTicks', 'DataError', 'DatabaseError', 'Error',
'FIELD_TYPE', 'IntegrityError', 'InterfaceError', 'InternalError',
'MySQLError', 'NULL', 'NUMBER', 'NotSupportedError', 'DBAPISet',
'OperationalError', 'ProgrammingError', 'ROWID', 'STRING', 'TIME',
'TIMESTAMP', 'Warning', 'apilevel', 'connect', 'connections',
'constants', 'converters', 'cursors', 'debug', 'escape', 'escape_dict',
'escape_sequence', 'escape_string', 'get_client_info',
'paramstyle', 'string_literal', 'threadsafety', 'version_info']
|
mit
|
longmen21/edx-platform
|
lms/djangoapps/teams/models.py
|
48
|
10408
|
"""Django models related to teams functionality."""
from datetime import datetime
from uuid import uuid4
import pytz
from model_utils import FieldTracker
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import User
from django.db import models
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy
from django_countries.fields import CountryField
from django_comment_common.signals import (
thread_created,
thread_edited,
thread_deleted,
thread_voted,
comment_created,
comment_edited,
comment_deleted,
comment_voted,
comment_endorsed
)
from xmodule_django.models import CourseKeyField
from util.model_utils import slugify
from student.models import LanguageField, CourseEnrollment
from .errors import AlreadyOnTeamInCourse, NotEnrolledInCourseForTeam, ImmutableMembershipFieldException
from lms.djangoapps.teams.utils import emit_team_event
from lms.djangoapps.teams import TEAM_DISCUSSION_CONTEXT
@receiver(thread_voted)
@receiver(thread_created)
@receiver(comment_voted)
@receiver(comment_created)
def post_create_vote_handler(sender, **kwargs): # pylint: disable=unused-argument
"""Update the user's last activity date upon creating or voting for a
post."""
handle_activity(kwargs['user'], kwargs['post'])
@receiver(thread_edited)
@receiver(thread_deleted)
@receiver(comment_edited)
@receiver(comment_deleted)
def post_edit_delete_handler(sender, **kwargs): # pylint: disable=unused-argument
"""Update the user's last activity date upon editing or deleting a
post."""
post = kwargs['post']
handle_activity(kwargs['user'], post, long(post.user_id))
@receiver(comment_endorsed)
def comment_endorsed_handler(sender, **kwargs): # pylint: disable=unused-argument
"""Update the user's last activity date upon endorsing a comment."""
comment = kwargs['post']
handle_activity(kwargs['user'], comment, long(comment.thread.user_id))
def handle_activity(user, post, original_author_id=None):
"""Handle user activity from django_comment_client and discussion_api
and update the user's last activity date. Checks if the user who
performed the action is the original author, and that the
discussion has the team context.
"""
if original_author_id is not None and user.id != original_author_id:
return
if getattr(post, "context", "course") == TEAM_DISCUSSION_CONTEXT:
CourseTeamMembership.update_last_activity(user, post.commentable_id)
class CourseTeam(models.Model):
"""This model represents team related info."""
class Meta(object):
app_label = "teams"
team_id = models.CharField(max_length=255, unique=True)
discussion_topic_id = models.CharField(max_length=255, unique=True)
name = models.CharField(max_length=255, db_index=True)
course_id = CourseKeyField(max_length=255, db_index=True)
topic_id = models.CharField(max_length=255, db_index=True, blank=True)
date_created = models.DateTimeField(auto_now_add=True)
description = models.CharField(max_length=300)
country = CountryField(blank=True)
language = LanguageField(
blank=True,
help_text=ugettext_lazy("Optional language the team uses as ISO 639-1 code."),
)
last_activity_at = models.DateTimeField(db_index=True) # indexed for ordering
users = models.ManyToManyField(User, db_index=True, related_name='teams', through='CourseTeamMembership')
team_size = models.IntegerField(default=0, db_index=True) # indexed for ordering
field_tracker = FieldTracker()
# Don't emit changed events when these fields change.
FIELD_BLACKLIST = ['last_activity_at', 'team_size']
@classmethod
def create(cls, name, course_id, description, topic_id=None, country=None, language=None):
"""Create a complete CourseTeam object.
Args:
name (str): The name of the team to be created.
course_id (str): The ID string of the course associated
with this team.
description (str): A description of the team.
topic_id (str): An optional identifier for the topic the
team formed around.
country (str, optional): An optional country where the team
is based, as ISO 3166-1 code.
language (str, optional): An optional language which the
team uses, as ISO 639-1 code.
"""
unique_id = uuid4().hex
team_id = slugify(name)[0:20] + '-' + unique_id
discussion_topic_id = unique_id
course_team = cls(
team_id=team_id,
discussion_topic_id=discussion_topic_id,
name=name,
course_id=course_id,
topic_id=topic_id if topic_id else '',
description=description,
country=country if country else '',
language=language if language else '',
last_activity_at=datetime.utcnow().replace(tzinfo=pytz.utc)
)
return course_team
def __repr__(self):
return "<CourseTeam team_id={0.team_id}>".format(self)
def add_user(self, user):
"""Adds the given user to the CourseTeam."""
if not CourseEnrollment.is_enrolled(user, self.course_id):
raise NotEnrolledInCourseForTeam
if CourseTeamMembership.user_in_team_for_course(user, self.course_id):
raise AlreadyOnTeamInCourse
return CourseTeamMembership.objects.create(
user=user,
team=self
)
def reset_team_size(self):
"""Reset team_size to reflect the current membership count."""
self.team_size = CourseTeamMembership.objects.filter(team=self).count()
self.save()
class CourseTeamMembership(models.Model):
"""This model represents the membership of a single user in a single team."""
class Meta(object):
app_label = "teams"
unique_together = (('user', 'team'),)
user = models.ForeignKey(User)
team = models.ForeignKey(CourseTeam, related_name='membership')
date_joined = models.DateTimeField(auto_now_add=True)
last_activity_at = models.DateTimeField()
immutable_fields = ('user', 'team', 'date_joined')
def __setattr__(self, name, value):
"""Memberships are immutable, with the exception of last activity
date.
"""
if name in self.immutable_fields:
# Check the current value -- if it is None, then this
# model is being created from the database and it's fine
# to set the value. Otherwise, we're trying to overwrite
# an immutable field.
current_value = getattr(self, name, None)
if value == current_value:
# This is an attempt to set an immutable value to the same value
# to which it's already set. Don't complain - just ignore the attempt.
return
else:
# This is an attempt to set an immutable value to a different value.
# Allow it *only* if the current value is None.
if current_value is not None:
raise ImmutableMembershipFieldException(
"Field %r shouldn't change from %r to %r" % (name, current_value, value)
)
super(CourseTeamMembership, self).__setattr__(name, value)
def save(self, *args, **kwargs):
"""Customize save method to set the last_activity_at if it does not
currently exist. Also resets the team's size if this model is
being created.
"""
should_reset_team_size = False
if self.pk is None:
should_reset_team_size = True
if not self.last_activity_at:
self.last_activity_at = datetime.utcnow().replace(tzinfo=pytz.utc)
super(CourseTeamMembership, self).save(*args, **kwargs)
if should_reset_team_size:
self.team.reset_team_size()
def delete(self, *args, **kwargs):
"""Recompute the related team's team_size after deleting a membership"""
super(CourseTeamMembership, self).delete(*args, **kwargs)
self.team.reset_team_size()
@classmethod
def get_memberships(cls, username=None, course_ids=None, team_id=None):
"""
Get a queryset of memberships.
Args:
username (unicode, optional): The username to filter on.
course_ids (list of unicode, optional) Course IDs to filter on.
team_id (unicode, optional): The team_id to filter on.
"""
queryset = cls.objects.all()
if username is not None:
queryset = queryset.filter(user__username=username)
if course_ids is not None:
queryset = queryset.filter(team__course_id__in=course_ids)
if team_id is not None:
queryset = queryset.filter(team__team_id=team_id)
return queryset
@classmethod
def user_in_team_for_course(cls, user, course_id):
"""
Checks whether or not a user is already in a team in the given course.
Args:
user: the user that we want to query on
course_id: the course_id of the course we're interested in
Returns:
True if the user is on a team in the course already
False if not
"""
return cls.objects.filter(user=user, team__course_id=course_id).exists()
@classmethod
def update_last_activity(cls, user, discussion_topic_id):
"""Set the `last_activity_at` for both this user and their team in the
given discussion topic. No-op if the user is not a member of
the team for this discussion.
"""
try:
membership = cls.objects.get(user=user, team__discussion_topic_id=discussion_topic_id)
# If a privileged user is active in the discussion of a team
# they do not belong to, do not update their last activity
# information.
except ObjectDoesNotExist:
return
now = datetime.utcnow().replace(tzinfo=pytz.utc)
membership.last_activity_at = now
membership.team.last_activity_at = now
membership.team.save()
membership.save()
emit_team_event('edx.team.activity_updated', membership.team.course_id, {
'team_id': membership.team_id,
})
|
agpl-3.0
|
Kalamatee/dhewm3
|
neo/sys/linux/runner/runner_lib.py
|
19
|
6735
|
# run doom process on a series of maps
# can be used for regression testing, or to fetch media
# keeps a log of each run ( see getLogfile )
# currently uses a basic stdout activity timeout to decide when to move on
# using a periodic check of /proc/<pid>/status SleepAVG
# when the sleep average is reaching 0, issue a 'quit' to stdout
# keeps serialized run status in runner.pickle
# NOTE: can be used to initiate runs on failed maps only for instance etc.
# TODO: use the serialized and not the logs to sort the run order
# TODO: better logging. Use idLogger?
# TODO: configurable event when the process is found interactive
# instead of emitting a quit, perform some warning action?
import sys, os, commands, string, time, traceback, pickle
from twisted.application import internet, service
from twisted.internet import protocol, reactor, utils, defer
from twisted.internet.task import LoopingCall
class doomClientProtocol( protocol.ProcessProtocol ):
# ProcessProtocol API
def connectionMade( self ):
self.logfile.write( 'connectionMade\n' )
def outReceived( self, data ):
print data
self.logfile.write( data )
def errReceived( self, data ):
print 'stderr: ' + data
self.logfile.write( 'stderr: ' + data )
def inConnectionLost( self ):
self.logfile.write( 'inConnectionLost\n' )
def outConnectionLost( self ):
self.logfile.write( 'outConnectionLost\n' )
def errConnectionLost( self ):
self.logfile.write( 'errConnectionLost\n' )
def processEnded( self, status_object ):
self.logfile.write( 'processEnded %s\n' % repr( status_object ) )
self.logfile.write( time.strftime( '%H:%M:%S', time.localtime( time.time() ) ) + '\n' )
self.logfile.close()
self.deferred.callback( None )
# mac management
def __init__( self, logfilename, deferred ):
self.logfilename = logfilename
self.logfile = open( logfilename, 'a' )
self.logfile.write( time.strftime( '%H:%M:%S', time.localtime( time.time() ) ) + '\n' )
self.deferred = deferred
class doomService( service.Service ):
# current monitoring state
# 0: nothing running
# 1: we have a process running, we're monitoring it's CPU usage
# 2: we issued a 'quit' to the process's stdin
# either going to get a processEnded, or a timeout
# 3: we forced a kill because of error, timeout etc.
state = 0
# load check period
check_period = 10
# pickled status file
pickle_file = 'runner.pickle'
# stores status indexed by filename
# { 'mapname' : ( state, last_update ), .. }
status = {}
# start the maps as multiplayer server
multiplayer = 0
def __init__( self, bin, cmdline, maps, sort = 0, multiplayer = 0, blank_run = 0 ):
self.p_transport = None
self.multiplayer = multiplayer
self.blank_run = blank_run
if ( self.multiplayer ):
print 'Operate in multiplayer mode'
self.bin = os.path.abspath( bin )
if ( type( cmdline ) is type( '' ) ):
self.cmdline = string.split( cmdline, ' ' )
else:
self.cmdline = cmdline
self.maps = maps
if ( os.path.exists( self.pickle_file ) ):
print 'Loading pickled status %s' % self.pickle_file
handle = open( self.pickle_file, 'r' )
self.status = pickle.load( handle )
handle.close()
if ( sort ):
print 'Sorting maps oldest runs first'
maps_sorted = [ ]
for i in self.maps:
i_log = self.getLogfile( i )
if ( os.path.exists( i_log ) ):
maps_sorted.append( ( i, os.path.getmtime( i_log ) ) )
else:
maps_sorted.append( ( i, 0 ) )
maps_sorted.sort( lambda x,y : cmp( x[1], y[1] ) )
self.maps = [ ]
if ( blank_run ):
self.maps.append( 'blankrun' )
for i in maps_sorted:
self.maps.append( i[ 0 ] )
print 'Sorted as: %s\n' % repr( self.maps )
def getLogfile( self, name ):
return 'logs/' + string.translate( name, string.maketrans( '/', '-' ) ) + '.log'
# deferred call when child process dies
def processEnded( self, val ):
print 'child has died - state %d' % self.state
self.status[ self.maps[ self.i_map ] ] = ( self.state, time.time() )
self.i_map += 1
if ( self.i_map >= len( self.maps ) ):
reactor.stop()
else:
self.nextMap()
def processTimeout( self ):
self.p_transport.signalProcess( "KILL" )
def sleepAVGReply( self, val ):
try:
s = val[10:][:-2]
print 'sleepAVGReply %s%%' % s
if ( s == '0' ):
# need twice in a row
if ( self.state == 2 ):
print 'child process is interactive'
self.p_transport.write( 'quit\n' )
else:
self.state = 2
else:
self.state = 1
# else:
# reactor.callLater( self.check_period, self.checkCPU )
except:
print traceback.format_tb( sys.exc_info()[2] )
print sys.exc_info()[0]
print 'exception raised in sleepAVGReply - killing process'
self.state = 3
self.p_transport.signalProcess( 'KILL' )
def sleepAVGTimeout( self ):
print 'sleepAVGTimeout - killing process'
self.state = 3
self.p_transport.signalProcess( 'KILL' )
# called at regular intervals to monitor the sleep average of the child process
# when sleep reaches 0, it means the map is loaded and interactive
def checkCPU( self ):
if ( self.state == 0 or self.p_transport is None or self.p_transport.pid is None ):
print 'checkCPU: no child process atm'
return
defer = utils.getProcessOutput( '/bin/bash', [ '-c', 'cat /proc/%d/status | grep SleepAVG' % self.p_transport.pid ] )
defer.addCallback( self.sleepAVGReply )
defer.setTimeout( 2, self.sleepAVGTimeout )
def nextMap( self ):
self.state = 0
name = self.maps[ self.i_map ]
print 'Starting map: ' + name
logfile = self.getLogfile( name )
print 'Logging to: ' + logfile
if ( self.multiplayer ):
cmdline = [ self.bin ] + self.cmdline + [ '+set', 'si_map', name ]
if ( name != 'blankrun' ):
cmdline.append( '+spawnServer' )
else:
cmdline = [ self.bin ] + self.cmdline
if ( name != 'blankrun' ):
cmdline += [ '+devmap', name ]
print 'Command line: ' + repr( cmdline )
self.deferred = defer.Deferred()
self.deferred.addCallback( self.processEnded )
self.p_transport = reactor.spawnProcess( doomClientProtocol( logfile, self.deferred ), self.bin, cmdline , path = os.path.dirname( self.bin ), env = os.environ )
self.state = 1
# # setup the CPU usage loop
# reactor.callLater( self.check_period, self.checkCPU )
def startService( self ):
print 'doomService startService'
loop = LoopingCall( self.checkCPU )
loop.start( self.check_period )
self.i_map = 0
self.nextMap()
def stopService( self ):
print 'doomService stopService'
if ( not self.p_transport.pid is None ):
self.p_transport.signalProcess( 'KILL' )
# serialize
print 'saving status to %s' % self.pickle_file
handle = open( self.pickle_file, 'w+' )
pickle.dump( self.status, handle )
handle.close()
|
gpl-3.0
|
BoPeng/simuPOP
|
src/simuPOP_op.py
|
1
|
409827
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (3, 0, 0):
new_instancemethod = lambda func, inst, cls: _simuPOP_op.SWIG_PyInstanceMethod_New(func)
else:
from new import instancemethod as new_instancemethod
if _swig_python_version_info >= (2, 7, 0):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_simuPOP_op')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_simuPOP_op')
_simuPOP_op = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_simuPOP_op', [dirname(__file__)])
except ImportError:
import _simuPOP_op
return _simuPOP_op
try:
_mod = imp.load_module('_simuPOP_op', fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_simuPOP_op = swig_import_helper()
del swig_import_helper
else:
import _simuPOP_op
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
object.__setattr__(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_method(set):
def set_attr(self, name, value):
if (name == "thisown"):
return self.this.own(value)
if hasattr(self, name) or (name == "this"):
set(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
#redefine __repr__ to make it shorter.
def _swig_repr(self):
if hasattr(self, 'describe'):
return self.describe()
else:
return "<%s.%s>" % (self.__class__.__module__.split('.')[-1].split('_')[0], self.__class__.__name__)
class SwigPyIterator(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _simuPOP_op.delete_SwigPyIterator
def __iter__(self):
return self
SwigPyIterator.value = new_instancemethod(_simuPOP_op.SwigPyIterator_value, None, SwigPyIterator)
SwigPyIterator.incr = new_instancemethod(_simuPOP_op.SwigPyIterator_incr, None, SwigPyIterator)
SwigPyIterator.decr = new_instancemethod(_simuPOP_op.SwigPyIterator_decr, None, SwigPyIterator)
SwigPyIterator.distance = new_instancemethod(_simuPOP_op.SwigPyIterator_distance, None, SwigPyIterator)
SwigPyIterator.equal = new_instancemethod(_simuPOP_op.SwigPyIterator_equal, None, SwigPyIterator)
SwigPyIterator.copy = new_instancemethod(_simuPOP_op.SwigPyIterator_copy, None, SwigPyIterator)
SwigPyIterator.next = new_instancemethod(_simuPOP_op.SwigPyIterator_next, None, SwigPyIterator)
SwigPyIterator.__next__ = new_instancemethod(_simuPOP_op.SwigPyIterator___next__, None, SwigPyIterator)
SwigPyIterator.previous = new_instancemethod(_simuPOP_op.SwigPyIterator_previous, None, SwigPyIterator)
SwigPyIterator.advance = new_instancemethod(_simuPOP_op.SwigPyIterator_advance, None, SwigPyIterator)
SwigPyIterator.__eq__ = new_instancemethod(_simuPOP_op.SwigPyIterator___eq__, None, SwigPyIterator)
SwigPyIterator.__ne__ = new_instancemethod(_simuPOP_op.SwigPyIterator___ne__, None, SwigPyIterator)
SwigPyIterator.__iadd__ = new_instancemethod(_simuPOP_op.SwigPyIterator___iadd__, None, SwigPyIterator)
SwigPyIterator.__isub__ = new_instancemethod(_simuPOP_op.SwigPyIterator___isub__, None, SwigPyIterator)
SwigPyIterator.__add__ = new_instancemethod(_simuPOP_op.SwigPyIterator___add__, None, SwigPyIterator)
SwigPyIterator.__sub__ = new_instancemethod(_simuPOP_op.SwigPyIterator___sub__, None, SwigPyIterator)
SwigPyIterator_swigregister = _simuPOP_op.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
HAVE_INLINE = _simuPOP_op.HAVE_INLINE
TR1_SUPPORT = _simuPOP_op.TR1_SUPPORT
THREAFPRIVATE_SUPPORT = _simuPOP_op.THREAFPRIVATE_SUPPORT
SIZE_T_FORMAT = _simuPOP_op.SIZE_T_FORMAT
OpSWIGType = _simuPOP_op.OpSWIGType
MALE = _simuPOP_op.MALE
FEMALE = _simuPOP_op.FEMALE
CUSTOMIZED = _simuPOP_op.CUSTOMIZED
AUTOSOME = _simuPOP_op.AUTOSOME
CHROMOSOME_X = _simuPOP_op.CHROMOSOME_X
CHROMOSOME_Y = _simuPOP_op.CHROMOSOME_Y
MITOCHONDRIAL = _simuPOP_op.MITOCHONDRIAL
CONSTANT = _simuPOP_op.CONSTANT
BINOMIAL_DISTRIBUTION = _simuPOP_op.BINOMIAL_DISTRIBUTION
EXPONENTIAL_DISTRIBUTION = _simuPOP_op.EXPONENTIAL_DISTRIBUTION
GEOMETRIC_DISTRIBUTION = _simuPOP_op.GEOMETRIC_DISTRIBUTION
POISSON_DISTRIBUTION = _simuPOP_op.POISSON_DISTRIBUTION
UNIFORM_DISTRIBUTION = _simuPOP_op.UNIFORM_DISTRIBUTION
NORMAL_DISTRIBUTION = _simuPOP_op.NORMAL_DISTRIBUTION
GAMMA_DISTRIBUTION = _simuPOP_op.GAMMA_DISTRIBUTION
NO_SEX = _simuPOP_op.NO_SEX
RANDOM_SEX = _simuPOP_op.RANDOM_SEX
PROB_OF_MALES = _simuPOP_op.PROB_OF_MALES
NUM_OF_MALES = _simuPOP_op.NUM_OF_MALES
NUM_OF_FEMALES = _simuPOP_op.NUM_OF_FEMALES
SEQUENCE_OF_SEX = _simuPOP_op.SEQUENCE_OF_SEX
GLOBAL_SEQUENCE_OF_SEX = _simuPOP_op.GLOBAL_SEQUENCE_OF_SEX
NO_CONVERSION = _simuPOP_op.NO_CONVERSION
NUM_MARKERS = _simuPOP_op.NUM_MARKERS
TRACT_LENGTH = _simuPOP_op.TRACT_LENGTH
OFFSPRING = _simuPOP_op.OFFSPRING
COMMON_OFFSPRING = _simuPOP_op.COMMON_OFFSPRING
SPOUSE = _simuPOP_op.SPOUSE
OUTBRED_SPOUSE = _simuPOP_op.OUTBRED_SPOUSE
SIBLING = _simuPOP_op.SIBLING
FULLSIBLING = _simuPOP_op.FULLSIBLING
ANY_SEX = _simuPOP_op.ANY_SEX
MALE_ONLY = _simuPOP_op.MALE_ONLY
FEMALE_ONLY = _simuPOP_op.FEMALE_ONLY
SAME_SEX = _simuPOP_op.SAME_SEX
OPPOSITE_SEX = _simuPOP_op.OPPOSITE_SEX
PAIR_ONLY = _simuPOP_op.PAIR_ONLY
UNAFFECTED = _simuPOP_op.UNAFFECTED
AFFECTED = _simuPOP_op.AFFECTED
ANY_AFFECTION_STATUS = _simuPOP_op.ANY_AFFECTION_STATUS
MULTIPLICATIVE = _simuPOP_op.MULTIPLICATIVE
ADDITIVE = _simuPOP_op.ADDITIVE
HETEROGENEITY = _simuPOP_op.HETEROGENEITY
EXPONENTIAL = _simuPOP_op.EXPONENTIAL
BY_IND_INFO = _simuPOP_op.BY_IND_INFO
BY_PROBABILITY = _simuPOP_op.BY_PROBABILITY
BY_PROPORTION = _simuPOP_op.BY_PROPORTION
BY_COUNTS = _simuPOP_op.BY_COUNTS
PATERNAL = _simuPOP_op.PATERNAL
MATERNAL = _simuPOP_op.MATERNAL
MEAN = _simuPOP_op.MEAN
MAXIMUM = _simuPOP_op.MAXIMUM
MINIMUM = _simuPOP_op.MINIMUM
SUMMATION = _simuPOP_op.SUMMATION
MULTIPLICATION = _simuPOP_op.MULTIPLICATION
PER_ALLELE = _simuPOP_op.PER_ALLELE
PER_LOCI = _simuPOP_op.PER_LOCI
PER_CHROMOSOME = _simuPOP_op.PER_CHROMOSOME
PER_PLOIDY = _simuPOP_op.PER_PLOIDY
PER_INDIVIDUAL = _simuPOP_op.PER_INDIVIDUAL
FROM_INFO = _simuPOP_op.FROM_INFO
FROM_INFO_SIGNED = _simuPOP_op.FROM_INFO_SIGNED
DBG_WARNING = _simuPOP_op.DBG_WARNING
class Exception(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, msg: 'string const &'):
_simuPOP_op.Exception_swiginit(self, _simuPOP_op.new_Exception(msg))
__swig_destroy__ = _simuPOP_op.delete_Exception
Exception.message = new_instancemethod(_simuPOP_op.Exception_message, None, Exception)
Exception_swigregister = _simuPOP_op.Exception_swigregister
Exception_swigregister(Exception)
cvar = _simuPOP_op.cvar
MISSING_VALUE = cvar.MISSING_VALUE
NOT_FOUND = cvar.NOT_FOUND
ModuleMaxAllele = cvar.ModuleMaxAllele
MaxRandomNumber = cvar.MaxRandomNumber
MaxTraitIndex = cvar.MaxTraitIndex
InvalidValue = cvar.InvalidValue
MaxIndexSize = cvar.MaxIndexSize
class StopIteration(Exception):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, msg: 'string const'):
_simuPOP_op.StopIteration_swiginit(self, _simuPOP_op.new_StopIteration(msg))
__swig_destroy__ = _simuPOP_op.delete_StopIteration
StopIteration_swigregister = _simuPOP_op.StopIteration_swigregister
StopIteration_swigregister(StopIteration)
class IndexError(Exception):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, msg: 'string const'):
_simuPOP_op.IndexError_swiginit(self, _simuPOP_op.new_IndexError(msg))
__swig_destroy__ = _simuPOP_op.delete_IndexError
IndexError_swigregister = _simuPOP_op.IndexError_swigregister
IndexError_swigregister(IndexError)
class ValueError(Exception):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, msg: 'string const'):
_simuPOP_op.ValueError_swiginit(self, _simuPOP_op.new_ValueError(msg))
__swig_destroy__ = _simuPOP_op.delete_ValueError
ValueError_swigregister = _simuPOP_op.ValueError_swigregister
ValueError_swigregister(ValueError)
class SystemError(Exception):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, msg: 'string const'):
_simuPOP_op.SystemError_swiginit(self, _simuPOP_op.new_SystemError(msg))
__swig_destroy__ = _simuPOP_op.delete_SystemError
SystemError_swigregister = _simuPOP_op.SystemError_swigregister
SystemError_swigregister(SystemError)
class RuntimeError(Exception):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, msg: 'string const'):
_simuPOP_op.RuntimeError_swiginit(self, _simuPOP_op.new_RuntimeError(msg))
__swig_destroy__ = _simuPOP_op.delete_RuntimeError
RuntimeError_swigregister = _simuPOP_op.RuntimeError_swigregister
RuntimeError_swigregister(RuntimeError)
class StopEvolution(Exception):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, msg: 'string const'):
_simuPOP_op.StopEvolution_swiginit(self, _simuPOP_op.new_StopEvolution(msg))
__swig_destroy__ = _simuPOP_op.delete_StopEvolution
StopEvolution_swigregister = _simuPOP_op.StopEvolution_swigregister
StopEvolution_swigregister(StopEvolution)
class RevertEvolution(Exception):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, msg: 'string const'):
_simuPOP_op.RevertEvolution_swiginit(self, _simuPOP_op.new_RevertEvolution(msg))
__swig_destroy__ = _simuPOP_op.delete_RevertEvolution
RevertEvolution_swigregister = _simuPOP_op.RevertEvolution_swigregister
RevertEvolution_swigregister(RevertEvolution)
UnnamedSubPop = _simuPOP_op.UnnamedSubPop
cmp_epsilon = _simuPOP_op.cmp_epsilon
def turnOnDebug(*args, **kwargs) -> "void":
"""
Usage:
turnOnDebug(code="")
Details:
Set debug code code. More than one code could be specified using a
comma separated string. Name of available codes are available from
moduleInfo()['debug'].keys().
"""
return _simuPOP_op.turnOnDebug(*args, **kwargs)
def turnOffDebug(*args, **kwargs) -> "void":
"""
Usage:
turnOffDebug(code="DBG_ALL")
Details:
Turn off debug code code. More than one code could be specified
using a comma separated string. Default to turn off all debug
codes.
"""
return _simuPOP_op.turnOffDebug(*args, **kwargs)
def setOptions(numThreads: 'int const'=-1, name: 'char const *'=None, seed: 'unsigned long'=0) -> "void":
"""
Usage:
setOptions(numThreads=-1, name=None, seed=0)
Details:
First argument is to set number of thread in openMP. The number of
threads can be be positive, integer (number of threads) or 0,
which implies all available cores, or a number set by
environmental variable OMP_NUM_THREADS. Second and third argument
is to set the type or seed of existing random number generator
using RNGname with seed. If using openMP, it sets the type or seed
of random number generator of each thread.
"""
return _simuPOP_op.setOptions(numThreads, name, seed)
def simuPOP_kbhit() -> "int":
return _simuPOP_op.simuPOP_kbhit()
simuPOP_kbhit = _simuPOP_op.simuPOP_kbhit
def simuPOP_getch() -> "int":
return _simuPOP_op.simuPOP_getch()
simuPOP_getch = _simuPOP_op.simuPOP_getch
def pow3(n: 'unsigned int') -> "unsigned int":
return _simuPOP_op.pow3(n)
pow3 = _simuPOP_op.pow3
class intList(object):
"""
Details:
A class to specify replicate list. The reason why I cannot simple
use vectori() is that users have got used to use a single number
to specify a single replicate.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, obj: 'PyObject *'=None):
"""
Usage:
intList(obj=None)
"""
_simuPOP_op.intList_swiginit(self, _simuPOP_op.new_intList(obj))
__swig_destroy__ = _simuPOP_op.delete_intList
intList_swigregister = _simuPOP_op.intList_swigregister
intList_swigregister(intList)
class uintList(object):
"""
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
uintList(obj=Py_True)
"""
_simuPOP_op.uintList_swiginit(self, _simuPOP_op.new_uintList(*args, **kwargs))
def unspecified(self) -> "bool":
"""
Usage:
x.unspecified()
"""
return _simuPOP_op.uintList_unspecified(self)
__swig_destroy__ = _simuPOP_op.delete_uintList
uintList.unspecified = new_instancemethod(_simuPOP_op.uintList_unspecified, None, uintList)
uintList_swigregister = _simuPOP_op.uintList_swigregister
uintList_swigregister(uintList)
class lociList(object):
"""
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
lociList(obj=Py_True)
"""
_simuPOP_op.lociList_swiginit(self, _simuPOP_op.new_lociList(*args, **kwargs))
def empty(self) -> "bool":
"""
Usage:
x.empty()
"""
return _simuPOP_op.lociList_empty(self)
def dynamic(self) -> "bool":
"""
Usage:
x.dynamic()
"""
return _simuPOP_op.lociList_dynamic(self)
__swig_destroy__ = _simuPOP_op.delete_lociList
lociList.empty = new_instancemethod(_simuPOP_op.lociList_empty, None, lociList)
lociList.dynamic = new_instancemethod(_simuPOP_op.lociList_dynamic, None, lociList)
lociList.elems = new_instancemethod(_simuPOP_op.lociList_elems, None, lociList)
lociList_swigregister = _simuPOP_op.lociList_swigregister
lociList_swigregister(lociList)
class floatList(object):
"""
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
Usage:
floatList(obj=None)
"""
_simuPOP_op.floatList_swiginit(self, _simuPOP_op.new_floatList(*args))
__swig_destroy__ = _simuPOP_op.delete_floatList
floatList_swigregister = _simuPOP_op.floatList_swigregister
floatList_swigregister(floatList)
class stringList(object):
"""
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
Usage:
stringList(str=None)
"""
_simuPOP_op.stringList_swiginit(self, _simuPOP_op.new_stringList(*args))
__swig_destroy__ = _simuPOP_op.delete_stringList
stringList.push_back = new_instancemethod(_simuPOP_op.stringList_push_back, None, stringList)
stringList_swigregister = _simuPOP_op.stringList_swigregister
stringList_swigregister(stringList)
class intMatrix(object):
"""
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, obj: 'PyObject *'=None):
"""
Usage:
intMatrix(obj=None)
"""
_simuPOP_op.intMatrix_swiginit(self, _simuPOP_op.new_intMatrix(obj))
__swig_destroy__ = _simuPOP_op.delete_intMatrix
intMatrix_swigregister = _simuPOP_op.intMatrix_swigregister
intMatrix_swigregister(intMatrix)
class floatMatrix(object):
"""
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, obj: 'PyObject *'=None):
"""
Usage:
floatMatrix(obj=None)
"""
_simuPOP_op.floatMatrix_swiginit(self, _simuPOP_op.new_floatMatrix(obj))
__swig_destroy__ = _simuPOP_op.delete_floatMatrix
floatMatrix_swigregister = _simuPOP_op.floatMatrix_swigregister
floatMatrix_swigregister(floatMatrix)
class stringMatrix(object):
"""
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, str: 'PyObject *'=None):
"""
Usage:
stringMatrix(str=None)
"""
_simuPOP_op.stringMatrix_swiginit(self, _simuPOP_op.new_stringMatrix(str))
__swig_destroy__ = _simuPOP_op.delete_stringMatrix
stringMatrix_swigregister = _simuPOP_op.stringMatrix_swigregister
stringMatrix_swigregister(stringMatrix)
class uintString(object):
"""
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
Usage:
uintString(value)
"""
_simuPOP_op.uintString_swiginit(self, _simuPOP_op.new_uintString(*args))
__swig_destroy__ = _simuPOP_op.delete_uintString
uintString_swigregister = _simuPOP_op.uintString_swigregister
uintString_swigregister(uintString)
def PyObj_AsString(str: 'PyObject *') -> "string":
return _simuPOP_op.PyObj_AsString(str)
PyObj_AsString = _simuPOP_op.PyObj_AsString
class stringFunc(object):
"""
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
Usage:
stringFunc(value)
"""
_simuPOP_op.stringFunc_swiginit(self, _simuPOP_op.new_stringFunc(*args))
def mode(self) -> "string":
"""
Description:
COPY.
Usage:
x.mode()
"""
return _simuPOP_op.stringFunc_mode(self)
__swig_destroy__ = _simuPOP_op.delete_stringFunc
stringFunc.mode = new_instancemethod(_simuPOP_op.stringFunc_mode, None, stringFunc)
stringFunc_swigregister = _simuPOP_op.stringFunc_swigregister
stringFunc_swigregister(stringFunc)
class uintListFunc(uintList):
"""
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
Usage:
uintListFunc(values=[])
"""
_simuPOP_op.uintListFunc_swiginit(self, _simuPOP_op.new_uintListFunc(*args))
__swig_destroy__ = _simuPOP_op.delete_uintListFunc
uintListFunc_swigregister = _simuPOP_op.uintListFunc_swigregister
uintListFunc_swigregister(uintListFunc)
class floatListFunc(floatList):
"""
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
Usage:
floatListFunc(func)
"""
_simuPOP_op.floatListFunc_swiginit(self, _simuPOP_op.new_floatListFunc(*args))
__swig_destroy__ = _simuPOP_op.delete_floatListFunc
floatListFunc_swigregister = _simuPOP_op.floatListFunc_swigregister
floatListFunc_swigregister(floatListFunc)
def PyObj_As_Bool(obj: 'PyObject *', val: 'bool &') -> "void":
return _simuPOP_op.PyObj_As_Bool(obj, val)
PyObj_As_Bool = _simuPOP_op.PyObj_As_Bool
def PyObj_As_Int(obj: 'PyObject *', val: 'long &') -> "void":
return _simuPOP_op.PyObj_As_Int(obj, val)
PyObj_As_Int = _simuPOP_op.PyObj_As_Int
def PyObj_As_SizeT(obj: 'PyObject *', val: 'size_t &') -> "void":
return _simuPOP_op.PyObj_As_SizeT(obj, val)
PyObj_As_SizeT = _simuPOP_op.PyObj_As_SizeT
def PyObj_As_Double(obj: 'PyObject *', val: 'double &') -> "void":
return _simuPOP_op.PyObj_As_Double(obj, val)
PyObj_As_Double = _simuPOP_op.PyObj_As_Double
def PyObj_As_String(obj: 'PyObject *', val: 'string &') -> "void":
return _simuPOP_op.PyObj_As_String(obj, val)
PyObj_As_String = _simuPOP_op.PyObj_As_String
def PyObj_As_Array(obj: 'PyObject *', val: 'vectorf &') -> "void":
return _simuPOP_op.PyObj_As_Array(obj, val)
PyObj_As_Array = _simuPOP_op.PyObj_As_Array
def PyObj_As_IntArray(obj: 'PyObject *', val: 'vectori &') -> "void":
return _simuPOP_op.PyObj_As_IntArray(obj, val)
PyObj_As_IntArray = _simuPOP_op.PyObj_As_IntArray
def PyObj_As_SizeTArray(obj: 'PyObject *', val: 'vectoru &') -> "void":
return _simuPOP_op.PyObj_As_SizeTArray(obj, val)
PyObj_As_SizeTArray = _simuPOP_op.PyObj_As_SizeTArray
def Allele_Vec_As_NumArray(begin: 'GenoIterator', end: 'GenoIterator') -> "PyObject *":
return _simuPOP_op.Allele_Vec_As_NumArray(begin, end)
Allele_Vec_As_NumArray = _simuPOP_op.Allele_Vec_As_NumArray
def Lineage_Vec_As_NumArray(begin: 'LineageIterator', end: 'LineageIterator') -> "PyObject *":
return _simuPOP_op.Lineage_Vec_As_NumArray(begin, end)
Lineage_Vec_As_NumArray = _simuPOP_op.Lineage_Vec_As_NumArray
def closeOutput(*args, **kwargs) -> "void":
"""
Usage:
closeOutput(output="")
Details:
Output files specified by '>' are closed immediately after they
are written. Those specified by '>>' and '>>>' are closed by a
simulator after Simulator.evolve(). However, these files will be
kept open if the operators are applied directly to a population
using the operators' function form. In this case, function
closeOutput can be used to close a specific file output, and close
all unclosed files if output is unspecified. An exception will be
raised if output does not exist or it has already been closed.
"""
return _simuPOP_op.closeOutput(*args, **kwargs)
class RNG_func(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, rng: 'gsl_rng *'):
_simuPOP_op.RNG_func_swiginit(self, _simuPOP_op.new_RNG_func(rng))
__swig_destroy__ = _simuPOP_op.delete_RNG_func
RNG_func.__call__ = new_instancemethod(_simuPOP_op.RNG_func___call__, None, RNG_func)
RNG_func_swigregister = _simuPOP_op.RNG_func_swigregister
RNG_func_swigregister(RNG_func)
class RNG(object):
"""
Details:
This random number generator class wraps around a number of random
number generators from GNU Scientific Library. You can obtain and
change the RNG used by the current simuPOP module through the
getRNG() function, or create a separate random number generator
and use it in your script.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, name: 'char const *'=None, seed: 'unsigned long'=0):
"""
Usage:
RNG(name=None, seed=0)
Details:
Create a RNG object using specified name and seed. If rng is not
given, environmental variable GSL_RNG_TYPE will be used if it is
available. Otherwise, generator mt19937 will be used. If seed is
not given, /dev/urandom, /dev/random, or other system random
number source will be used to guarantee that random seeds are used
even if more than one simuPOP sessions are started simultaneously.
Names of supported random number generators are available from
moduleInfo()['availableRNGs'].
"""
_simuPOP_op.RNG_swiginit(self, _simuPOP_op.new_RNG(name, seed))
__swig_destroy__ = _simuPOP_op.delete_RNG
def set(self, name: 'char const *'=None, seed: 'unsigned long'=0) -> "void":
"""
Usage:
x.set(name=None, seed=0)
Details:
Replace the existing random number generator using RNGname with
seed seed. If seed is 0, a random seed will be used. If name is
empty, use the existing RNG but reset the seed.
"""
return _simuPOP_op.RNG_set(self, name, seed)
def name(self) -> "char const *":
"""
Usage:
x.name()
Details:
Return the name of the current random number generator.
"""
return _simuPOP_op.RNG_name(self)
def seed(self) -> "unsigned long":
"""
Usage:
x.seed()
Details:
Return the seed used to initialize the RNG. This can be used to
repeat a previous session.
"""
return _simuPOP_op.RNG_seed(self)
generateRandomSeed = staticmethod(_simuPOP_op.RNG_generateRandomSeed)
def randUniform(self) -> "double":
"""
Usage:
x.randUniform()
Details:
Generate a random number following a rng_uniform [0, 1)
distribution.
"""
return _simuPOP_op.RNG_randUniform(self)
def randBit(self) -> "bool":
"""Obsolete or undocumented function."""
return _simuPOP_op.RNG_randBit(self)
def randInt(self, n: 'unsigned long') -> "unsigned long":
"""
Usage:
x.randInt(n)
Details:
return a random number in the range of [0, 1, 2, ... n-1]
"""
return _simuPOP_op.RNG_randInt(self, n)
def randNormal(self, mu: 'double', sigma: 'double') -> "double":
"""
Usage:
x.randNormal(mu, sigma)
Details:
Generate a random number following a normal distribution with mean
mu and standard deviation sigma.
"""
return _simuPOP_op.RNG_randNormal(self, mu, sigma)
def randExponential(self, mu: 'double') -> "double":
"""
Usage:
x.randExponential(mu)
Details:
Generate a random number following a exponential distribution with
parameter mu.
"""
return _simuPOP_op.RNG_randExponential(self, mu)
def randGamma(self, a: 'double', b: 'double') -> "double":
"""
Usage:
x.randGamma(a, b)
Details:
Generate a random number following a gamma distribution with a
shape parameters a and scale parameter b.
"""
return _simuPOP_op.RNG_randGamma(self, a, b)
def randChisq(self, nu: 'double') -> "double":
"""
Usage:
x.randChisq(nu)
Details:
Generate a random number following a Chi-squared distribution with
nu degrees of freedom.
"""
return _simuPOP_op.RNG_randChisq(self, nu)
def randGeometric(self, p: 'double') -> "long":
"""
Usage:
x.randGeometric(p)
Details:
Generate a random number following a geometric distribution with
parameter p.
"""
return _simuPOP_op.RNG_randGeometric(self, p)
def randBinomial(self, n: 'UINT', p: 'double') -> "ULONG":
"""
Usage:
x.randBinomial(n, p)
Details:
Generate a random number following a binomial distribution with
parameters n and p.
"""
return _simuPOP_op.RNG_randBinomial(self, n, p)
def randPoisson(self, mu: 'double') -> "ULONG":
"""
Usage:
x.randPoisson(mu)
Details:
Generate a random number following a Poisson distribution with
parameter mu.
"""
return _simuPOP_op.RNG_randPoisson(self, mu)
def randTruncatedPoisson(self, mu: 'double') -> "ULONG":
"""
Usage:
x.randTruncatedPoisson(mu)
Details:
Generate a positive random number following a zero-truncated
Poisson distribution with parameter mu.
"""
return _simuPOP_op.RNG_randTruncatedPoisson(self, mu)
def randTruncatedBinomial(self, n: 'UINT', p: 'double') -> "ULONG":
"""
Usage:
x.randTruncatedBinomial(n, p)
Details:
Generate a positive random number following a zero-truncated
binomial distribution with parameters n and p.
"""
return _simuPOP_op.RNG_randTruncatedBinomial(self, n, p)
def randMultinomial(self, N: 'unsigned int', p: 'vectorf const &') -> "vectoru":
"""
Usage:
x.randMultinomial(N, p)
Details:
Generate a random number following a multinomial distribution with
parameters N and p (a list of probabilities).
"""
return _simuPOP_op.RNG_randMultinomial(self, N, p)
RNG.set = new_instancemethod(_simuPOP_op.RNG_set, None, RNG)
RNG.name = new_instancemethod(_simuPOP_op.RNG_name, None, RNG)
RNG.seed = new_instancemethod(_simuPOP_op.RNG_seed, None, RNG)
RNG.randUniform = new_instancemethod(_simuPOP_op.RNG_randUniform, None, RNG)
RNG.randBit = new_instancemethod(_simuPOP_op.RNG_randBit, None, RNG)
RNG.randInt = new_instancemethod(_simuPOP_op.RNG_randInt, None, RNG)
RNG.randNormal = new_instancemethod(_simuPOP_op.RNG_randNormal, None, RNG)
RNG.randExponential = new_instancemethod(_simuPOP_op.RNG_randExponential, None, RNG)
RNG.randGamma = new_instancemethod(_simuPOP_op.RNG_randGamma, None, RNG)
RNG.randChisq = new_instancemethod(_simuPOP_op.RNG_randChisq, None, RNG)
RNG.randGeometric = new_instancemethod(_simuPOP_op.RNG_randGeometric, None, RNG)
RNG.randBinomial = new_instancemethod(_simuPOP_op.RNG_randBinomial, None, RNG)
RNG.randPoisson = new_instancemethod(_simuPOP_op.RNG_randPoisson, None, RNG)
RNG.randTruncatedPoisson = new_instancemethod(_simuPOP_op.RNG_randTruncatedPoisson, None, RNG)
RNG.randTruncatedBinomial = new_instancemethod(_simuPOP_op.RNG_randTruncatedBinomial, None, RNG)
RNG.randMultinomial = new_instancemethod(_simuPOP_op.RNG_randMultinomial, None, RNG)
RNG_swigregister = _simuPOP_op.RNG_swigregister
RNG_swigregister(RNG)
def RNG_generateRandomSeed() -> "unsigned long":
return _simuPOP_op.RNG_generateRandomSeed()
RNG_generateRandomSeed = _simuPOP_op.RNG_generateRandomSeed
def getRNG() -> "simuPOP::RNG &":
"""
Description:
return the currently used random number generator
Usage:
getRNG()
"""
return _simuPOP_op.getRNG()
class WeightedSampler(object):
"""
Details:
A random number generator that returns 0, 1, ..., k-1 with
probabilites that are proportional to their weights. For example,
a weighted sampler with weights 4, 3, 2 and 1 will return numbers
0, 1, 2 and 3 with probabilities 0.4, 0.3, 0.2 and 0.1,
respectively. If an additional parameter N is specified, the
weighted sampler will return exact proportions of numbers if N
numbers are returned. The version without additional parameter is
similar to the sample(prob, replace=FALSE) function of the R
statistical package.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
WeightedSampler(weights=[], N=0)
Details:
Creates a weighted sampler that returns 0, 1, ... k-1 when a list
of k weights are specified (weights). weights do not have to add
up to 1. If a non-zero N is specified, exact proportions of
numbers will be returned in N returned numbers.
"""
_simuPOP_op.WeightedSampler_swiginit(self, _simuPOP_op.new_WeightedSampler(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_WeightedSampler
def draw(self) -> "size_t":
"""
Usage:
x.draw()
Details:
Returns a random number between 0 and k-1 with probabilities that
are proportional to specified weights.
"""
return _simuPOP_op.WeightedSampler_draw(self)
def drawSamples(self, n: 'ULONG'=1) -> "vectoru":
"""
Usage:
x.drawSamples(n=1)
Details:
Returns a list of n random numbers
"""
return _simuPOP_op.WeightedSampler_drawSamples(self, n)
WeightedSampler.draw = new_instancemethod(_simuPOP_op.WeightedSampler_draw, None, WeightedSampler)
WeightedSampler.drawSamples = new_instancemethod(_simuPOP_op.WeightedSampler_drawSamples, None, WeightedSampler)
WeightedSampler_swigregister = _simuPOP_op.WeightedSampler_swigregister
WeightedSampler_swigregister(WeightedSampler)
class Bernullitrials(object):
"""
Details:
this class encapsulate behavior of a sequence of Bernulli trial.
the main idea is that when doing a sequence of Bernulli trials of
the same probability, we can use much quicker algorithms instead
of doing n Bernulli trials For example, when N=10000, p=0.001.
The usual way to do N Bin(p) trials is to do N randUnif(0,1)<p
comparison. using the new method, we can use geometric
distrubution to find the next true event. Also, for the cases of
p=0.5, random bits are generated. This class maintain a two
dimensional table: a vector of probabilities cross expected number
of trials p1 p2 p3 p4 p5 trial 1 trial 2 ... trial N We expect
that N is big (usually populaiton size) and p_i are small using
fast bernulliTrial method for fix p, we can fill up this table
very quickly column by column This class will provide easy access
to row (each trial) or column (called each prob) of this table.
if this table is accessed row by row (each trial), a internal
index is used. if index exceeds N, trials will be generated all
again. if trial will be called, e.g., N+2 times all the time, this
treatment might not be very efficient.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, arg2: 'RNG', prob: 'vectorf const &', trials: 'ULONG'=0):
"""
Usage:
Bernullitrials(, prob, trials=0)
"""
_simuPOP_op.Bernullitrials_swiginit(self, _simuPOP_op.new_Bernullitrials(arg2, prob, trials))
__swig_destroy__ = _simuPOP_op.delete_Bernullitrials
def probSize(self) -> "size_t":
"""
Usage:
x.probSize()
"""
return _simuPOP_op.Bernullitrials_probSize(self)
def doTrial(self) -> "void":
"""
Description:
generate the trial table, reset m_cur
Usage:
x.doTrial()
"""
return _simuPOP_op.Bernullitrials_doTrial(self)
def trial(self) -> "void":
"""
Description:
if necessary, do trail again.
Usage:
x.trial()
"""
return _simuPOP_op.Bernullitrials_trial(self)
def trialSucc(self, *args) -> "bool":
"""
Usage:
x.trialSucc(idx)
"""
return _simuPOP_op.Bernullitrials_trialSucc(self, *args)
def trialFirstSucc(self, idx: 'size_t') -> "size_t":
"""
Usage:
x.trialFirstSucc(idx)
"""
return _simuPOP_op.Bernullitrials_trialFirstSucc(self, idx)
def trialNextSucc(self, idx: 'size_t', pos: 'size_t') -> "size_t":
"""
Usage:
x.trialNextSucc(idx, pos)
"""
return _simuPOP_op.Bernullitrials_trialNextSucc(self, idx, pos)
def setTrialSucc(self, idx: 'size_t', succ: 'bool') -> "void":
"""
Usage:
x.setTrialSucc(idx, succ)
"""
return _simuPOP_op.Bernullitrials_setTrialSucc(self, idx, succ)
def trialSuccRate(self, index: 'UINT') -> "double":
"""
Description:
return the succ rate for one index, used for verification pruposes
Usage:
x.trialSuccRate(index)
"""
return _simuPOP_op.Bernullitrials_trialSuccRate(self, index)
def probSuccRate(self) -> "double":
"""
Description:
return the succ rate for current trial, used for verification
pruposes
Usage:
x.probSuccRate()
"""
return _simuPOP_op.Bernullitrials_probSuccRate(self)
npos = _simuPOP_op.Bernullitrials_npos
Bernullitrials.probSize = new_instancemethod(_simuPOP_op.Bernullitrials_probSize, None, Bernullitrials)
Bernullitrials.doTrial = new_instancemethod(_simuPOP_op.Bernullitrials_doTrial, None, Bernullitrials)
Bernullitrials.trial = new_instancemethod(_simuPOP_op.Bernullitrials_trial, None, Bernullitrials)
Bernullitrials.trialSucc = new_instancemethod(_simuPOP_op.Bernullitrials_trialSucc, None, Bernullitrials)
Bernullitrials.trialFirstSucc = new_instancemethod(_simuPOP_op.Bernullitrials_trialFirstSucc, None, Bernullitrials)
Bernullitrials.trialNextSucc = new_instancemethod(_simuPOP_op.Bernullitrials_trialNextSucc, None, Bernullitrials)
Bernullitrials.setTrialSucc = new_instancemethod(_simuPOP_op.Bernullitrials_setTrialSucc, None, Bernullitrials)
Bernullitrials.trialSuccRate = new_instancemethod(_simuPOP_op.Bernullitrials_trialSuccRate, None, Bernullitrials)
Bernullitrials.probSuccRate = new_instancemethod(_simuPOP_op.Bernullitrials_probSuccRate, None, Bernullitrials)
Bernullitrials_swigregister = _simuPOP_op.Bernullitrials_swigregister
Bernullitrials_swigregister(Bernullitrials)
class Bernullitrials_T(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
_simuPOP_op.Bernullitrials_T_swiginit(self, _simuPOP_op.new_Bernullitrials_T(*args))
__swig_destroy__ = _simuPOP_op.delete_Bernullitrials_T
npos = _simuPOP_op.Bernullitrials_T_npos
Bernullitrials_T.setParameter = new_instancemethod(_simuPOP_op.Bernullitrials_T_setParameter, None, Bernullitrials_T)
Bernullitrials_T.doTrial = new_instancemethod(_simuPOP_op.Bernullitrials_T_doTrial, None, Bernullitrials_T)
Bernullitrials_T.trial = new_instancemethod(_simuPOP_op.Bernullitrials_T_trial, None, Bernullitrials_T)
Bernullitrials_T.trialSucc = new_instancemethod(_simuPOP_op.Bernullitrials_T_trialSucc, None, Bernullitrials_T)
Bernullitrials_T.probFirstSucc = new_instancemethod(_simuPOP_op.Bernullitrials_T_probFirstSucc, None, Bernullitrials_T)
Bernullitrials_T.probNextSucc = new_instancemethod(_simuPOP_op.Bernullitrials_T_probNextSucc, None, Bernullitrials_T)
Bernullitrials_T.setTrialSucc = new_instancemethod(_simuPOP_op.Bernullitrials_T_setTrialSucc, None, Bernullitrials_T)
Bernullitrials_T.trialSuccRate = new_instancemethod(_simuPOP_op.Bernullitrials_T_trialSuccRate, None, Bernullitrials_T)
Bernullitrials_T.probSuccRate = new_instancemethod(_simuPOP_op.Bernullitrials_T_probSuccRate, None, Bernullitrials_T)
Bernullitrials_T_swigregister = _simuPOP_op.Bernullitrials_T_swigregister
Bernullitrials_T_swigregister(Bernullitrials_T)
def moduleInfo() -> "PyObject *":
"""
Usage:
moduleInfo()
Details:
Return a dictionary with information regarding the currently
loaded simuPOP module. This dictionary has the following keys:
* revision: revision number.
* version: simuPOP version string.
* optimized: Is this module optimized (True or False).
* alleleType: Allele type of the module (short, long or binary).
* maxAllele: the maximum allowed allele state, which is 1 for
binary modules, 255 for short modules and 65535 for long modules.
* compiler: the compiler that compiles this module.
* date: date on which this module is compiled.
* python: version of python.
* platform: platform of the module.
* wordsize: size of word, can be either 32 or 64.
* alleleBits: the number of bits used to store an allele
* maxNumSubPop: maximum number of subpopulations.
* maxIndex: maximum index size (limits population size * total
number of marker).
* debug: A dictionary with debugging codes as keys and the
status of each debugging code (True or False) as their values.
"""
return _simuPOP_op.moduleInfo()
class GenoStruTrait(object):
"""
Details:
All individuals in a population share the same genotypic
properties such as number of chromosomes, number and position of
loci, names of markers, chromosomes, and information fields. These
properties are stored in this GenoStruTrait class and are
accessible from both Individual and Population classes. Currently,
a genotypic structure consists of
* Ploidy, namely the number of homologous sets of chromosomes,
of a population. Haplodiploid population is also supported.
* Number of chromosomes and number of loci on each chromosome.
* Positions of loci, which determine the relative distance
between loci on the same chromosome. No unit is assumed so these
positions can be ordinal (1, 2, 3, ..., the default), in physical
distance (bp, kb or mb), or in map distance (e.g. centiMorgan)
depending on applications.
* Names of alleles, which can either be shared by all loci or be
specified for each locus.
* Names of loci and chromosomes.
* Names of information fields attached to each individual. In
addition to basic property access functions, this class provides
some utility functions such as locusByName, which looks up a locus
by its name.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self):
"""
Usage:
GenoStruTrait()
Details:
A GenoStruTrait object is created with the construction of a
Population object and cannot be initialized directly.
"""
_simuPOP_op.GenoStruTrait_swiginit(self, _simuPOP_op.new_GenoStruTrait())
def lociDist(self, locus1: 'size_t', locus2: 'size_t') -> "double":
"""
Usage:
x.lociDist(locus1, locus2)
Details:
Return the distance between loci locus1 and locus2 on the same
chromosome. A negative value will be returned if locus1 is after
locus2.
"""
return _simuPOP_op.GenoStruTrait_lociDist(self, locus1, locus2)
def lociLeft(self, locus: 'size_t') -> "size_t":
"""Obsolete or undocumented function."""
return _simuPOP_op.GenoStruTrait_lociLeft(self, locus)
def distLeft(self, locus: 'size_t') -> "double":
"""Obsolete or undocumented function."""
return _simuPOP_op.GenoStruTrait_distLeft(self, locus)
def lociCovered(self, locus: 'size_t', dist: 'double') -> "size_t":
"""Obsolete or undocumented function."""
return _simuPOP_op.GenoStruTrait_lociCovered(self, locus, dist)
def ploidy(self) -> "UINT":
"""
Usage:
x.ploidy()
Details:
return the number of homologous sets of chromosomes, specified by
the ploidy parameter of the Population function. Return 2 for a
haplodiploid population because two sets of chromosomes are stored
for both males and females in such a population.
"""
return _simuPOP_op.GenoStruTrait_ploidy(self)
def ploidyName(self) -> "string":
"""
Usage:
x.ploidyName()
Details:
return the ploidy name of this population, can be one of haploid,
diploid, haplodiploid, triploid, tetraploid or #-ploid where # is
the ploidy number.
"""
return _simuPOP_op.GenoStruTrait_ploidyName(self)
def numLoci(self, *args) -> "vectoru":
"""
Usage:
x.numLoci()
Details:
return a list of the number of loci on all chromosomes.
"""
return _simuPOP_op.GenoStruTrait_numLoci(self, *args)
def sexChrom(self) -> "bool":
"""Obsolete or undocumented function."""
return _simuPOP_op.GenoStruTrait_sexChrom(self)
def isHaplodiploid(self) -> "bool":
"""Obsolete or undocumented function."""
return _simuPOP_op.GenoStruTrait_isHaplodiploid(self)
def totNumLoci(self) -> "size_t":
"""
Usage:
x.totNumLoci()
Details:
return the total number of loci on all chromosomes.
"""
return _simuPOP_op.GenoStruTrait_totNumLoci(self)
def genoSize(self) -> "size_t":
"""Obsolete or undocumented function."""
return _simuPOP_op.GenoStruTrait_genoSize(self)
def locusPos(self, locus: 'size_t') -> "double":
"""
Usage:
x.locusPos(locus)
Details:
return the position of locus locus specified by the lociPos
parameter of the Population function.
"""
return _simuPOP_op.GenoStruTrait_locusPos(self, locus)
def lociPos(self) -> "vectorf":
"""
Usage:
x.lociPos()
Details:
return the positions of all loci, specified by the lociPos
prameter of the Population function. The default positions are 1,
2, 3, 4, ... on each chromosome.
"""
return _simuPOP_op.GenoStruTrait_lociPos(self)
def numChrom(self) -> "size_t":
"""
Usage:
x.numChrom()
Details:
return the number of chromosomes.
"""
return _simuPOP_op.GenoStruTrait_numChrom(self)
def chromBegin(self, chrom: 'size_t') -> "size_t":
"""
Usage:
x.chromBegin(chrom)
Details:
return the index of the first locus on chromosome chrom.
"""
return _simuPOP_op.GenoStruTrait_chromBegin(self, chrom)
def chromEnd(self, chrom: 'size_t') -> "size_t":
"""
Usage:
x.chromEnd(chrom)
Details:
return the index of the last locus on chromosome chrom plus 1.
"""
return _simuPOP_op.GenoStruTrait_chromEnd(self, chrom)
def absLocusIndex(self, chrom: 'UINT', locus: 'UINT') -> "size_t":
"""
Usage:
x.absLocusIndex(chrom, locus)
Details:
return the absolute index of locus locus on chromosome chrom. c.f.
chromLocusPair.
"""
return _simuPOP_op.GenoStruTrait_absLocusIndex(self, chrom, locus)
def chromLocusPair(self, locus: 'size_t') -> "pairu":
"""
Usage:
x.chromLocusPair(locus)
Details:
return the chromosome and relative index of a locus using its
absolute index locus. c.f. absLocusIndex.
"""
return _simuPOP_op.GenoStruTrait_chromLocusPair(self, locus)
def chromName(self, chrom: 'size_t const') -> "string":
"""
Usage:
x.chromName(chrom)
Details:
return the name of a chromosome chrom.
"""
return _simuPOP_op.GenoStruTrait_chromName(self, chrom)
def chromNames(self) -> "vectorstr":
"""
Usage:
x.chromNames()
Details:
return a list of the names of all chromosomes.
"""
return _simuPOP_op.GenoStruTrait_chromNames(self)
def chromType(self, chrom: 'size_t const') -> "size_t":
"""
Usage:
x.chromType(chrom)
Details:
return the type of a chromosome chrom (CUSTOMIZED, AUTOSOME,
CHROMOSOME_X, CHROMOSOME_Y or MITOCHONDRIAL.
"""
return _simuPOP_op.GenoStruTrait_chromType(self, chrom)
def chromTypes(self) -> "vectoru":
"""
Usage:
x.chromTypes()
Details:
return the type of all chromosomes (CUSTOMIZED, AUTOSOME,
CHROMOSOME_X, CHROMOSOME_Y, or MITOCHONDRIAL).
"""
return _simuPOP_op.GenoStruTrait_chromTypes(self)
def chromByName(self, name: 'string const') -> "size_t":
"""
Usage:
x.chromByName(name)
Details:
return the index of a chromosome by its name.
"""
return _simuPOP_op.GenoStruTrait_chromByName(self, name)
def alleleName(self, allele: 'ULONG const', locus: 'size_t const'=0) -> "string":
"""
Usage:
x.alleleName(allele, locus=0)
Details:
return the name of allele allele at lcous specified by the
alleleNames parameter of the Population function. locus could be
ignored if alleles at all loci share the same names. If the name
of an allele is unspecified, its value ('0', '1', '2', etc) is
returned.
"""
return _simuPOP_op.GenoStruTrait_alleleName(self, allele, locus)
def alleleNames(self, locus: 'size_t const'=0) -> "vectorstr":
"""
Usage:
x.alleleNames(locus=0)
Details:
return a list of allele names at given by the alleleNames
parameter of the Population function. locus could be ignored if
alleles at all loci share the same names. This list does not have
to cover all possible allele states of a population so
alleleNames()[allele] might fail (use alleleNames(allele)
instead).
"""
return _simuPOP_op.GenoStruTrait_alleleNames(self, locus)
def locusName(self, locus: 'size_t const') -> "string":
"""
Usage:
x.locusName(locus)
Details:
return the name of locus locus specified by the lociNames
parameter of the Population function. An empty string will be
returned if no name has been given to locus locus.
"""
return _simuPOP_op.GenoStruTrait_locusName(self, locus)
def lociNames(self) -> "vectorstr":
"""
Usage:
x.lociNames()
Details:
return the names of all loci specified by the lociNames parameter
of the Population function. An empty list will be returned if
lociNames was not specified.
"""
return _simuPOP_op.GenoStruTrait_lociNames(self)
def locusByName(self, name: 'string const') -> "size_t":
"""
Usage:
x.locusByName(name)
Details:
return the index of a locus with name name. Raise a ValueError if
no locus is found. Note that empty strings are used for loci
without name but you cannot lookup such loci using this function.
"""
return _simuPOP_op.GenoStruTrait_locusByName(self, name)
def lociByNames(self, names: 'vectorstr const &') -> "vectoru":
"""
Usage:
x.lociByNames(names)
Details:
return the indexes of loci with names names. Raise a ValueError if
any of the loci cannot be found.
"""
return _simuPOP_op.GenoStruTrait_lociByNames(self, names)
def indexesOfLoci(self, *args, **kwargs) -> "vectoru":
"""
Usage:
x.indexesOfLoci(loci=ALL_AVAIL)
Details:
return the indexes of loci with positions positions (list of (chr,
pos) pairs). Raise a ValueError if any of the loci cannot be
found.
"""
return _simuPOP_op.GenoStruTrait_indexesOfLoci(self, *args, **kwargs)
def hasInfoField(self, name: 'string const &') -> "bool":
"""Obsolete or undocumented function."""
return _simuPOP_op.GenoStruTrait_hasInfoField(self, name)
def infoSize(self) -> "size_t":
"""Obsolete or undocumented function."""
return _simuPOP_op.GenoStruTrait_infoSize(self)
def infoFields(self) -> "vectorstr":
"""
Usage:
x.infoFields()
Details:
return a list of the names of all information fields of the
population.
"""
return _simuPOP_op.GenoStruTrait_infoFields(self)
def infoField(self, idx: 'size_t') -> "string":
"""
Usage:
x.infoField(idx)
Details:
return the name of information field idx.
"""
return _simuPOP_op.GenoStruTrait_infoField(self, idx)
def infoIdx(self, name: 'string const &') -> "size_t":
"""
Usage:
x.infoIdx(name)
Details:
return the index of information field name. Raise an IndexError if
name is not one of the information fields.
"""
return _simuPOP_op.GenoStruTrait_infoIdx(self, name)
__swig_destroy__ = _simuPOP_op.delete_GenoStruTrait
GenoStruTrait.lociDist = new_instancemethod(_simuPOP_op.GenoStruTrait_lociDist, None, GenoStruTrait)
GenoStruTrait.lociLeft = new_instancemethod(_simuPOP_op.GenoStruTrait_lociLeft, None, GenoStruTrait)
GenoStruTrait.distLeft = new_instancemethod(_simuPOP_op.GenoStruTrait_distLeft, None, GenoStruTrait)
GenoStruTrait.lociCovered = new_instancemethod(_simuPOP_op.GenoStruTrait_lociCovered, None, GenoStruTrait)
GenoStruTrait.ploidy = new_instancemethod(_simuPOP_op.GenoStruTrait_ploidy, None, GenoStruTrait)
GenoStruTrait.ploidyName = new_instancemethod(_simuPOP_op.GenoStruTrait_ploidyName, None, GenoStruTrait)
GenoStruTrait.numLoci = new_instancemethod(_simuPOP_op.GenoStruTrait_numLoci, None, GenoStruTrait)
GenoStruTrait.sexChrom = new_instancemethod(_simuPOP_op.GenoStruTrait_sexChrom, None, GenoStruTrait)
GenoStruTrait.isHaplodiploid = new_instancemethod(_simuPOP_op.GenoStruTrait_isHaplodiploid, None, GenoStruTrait)
GenoStruTrait.totNumLoci = new_instancemethod(_simuPOP_op.GenoStruTrait_totNumLoci, None, GenoStruTrait)
GenoStruTrait.genoSize = new_instancemethod(_simuPOP_op.GenoStruTrait_genoSize, None, GenoStruTrait)
GenoStruTrait.locusPos = new_instancemethod(_simuPOP_op.GenoStruTrait_locusPos, None, GenoStruTrait)
GenoStruTrait.lociPos = new_instancemethod(_simuPOP_op.GenoStruTrait_lociPos, None, GenoStruTrait)
GenoStruTrait.numChrom = new_instancemethod(_simuPOP_op.GenoStruTrait_numChrom, None, GenoStruTrait)
GenoStruTrait.chromBegin = new_instancemethod(_simuPOP_op.GenoStruTrait_chromBegin, None, GenoStruTrait)
GenoStruTrait.chromEnd = new_instancemethod(_simuPOP_op.GenoStruTrait_chromEnd, None, GenoStruTrait)
GenoStruTrait.absLocusIndex = new_instancemethod(_simuPOP_op.GenoStruTrait_absLocusIndex, None, GenoStruTrait)
GenoStruTrait.chromLocusPair = new_instancemethod(_simuPOP_op.GenoStruTrait_chromLocusPair, None, GenoStruTrait)
GenoStruTrait.chromName = new_instancemethod(_simuPOP_op.GenoStruTrait_chromName, None, GenoStruTrait)
GenoStruTrait.chromNames = new_instancemethod(_simuPOP_op.GenoStruTrait_chromNames, None, GenoStruTrait)
GenoStruTrait.chromType = new_instancemethod(_simuPOP_op.GenoStruTrait_chromType, None, GenoStruTrait)
GenoStruTrait.chromTypes = new_instancemethod(_simuPOP_op.GenoStruTrait_chromTypes, None, GenoStruTrait)
GenoStruTrait.chromByName = new_instancemethod(_simuPOP_op.GenoStruTrait_chromByName, None, GenoStruTrait)
GenoStruTrait.alleleName = new_instancemethod(_simuPOP_op.GenoStruTrait_alleleName, None, GenoStruTrait)
GenoStruTrait.alleleNames = new_instancemethod(_simuPOP_op.GenoStruTrait_alleleNames, None, GenoStruTrait)
GenoStruTrait.locusName = new_instancemethod(_simuPOP_op.GenoStruTrait_locusName, None, GenoStruTrait)
GenoStruTrait.lociNames = new_instancemethod(_simuPOP_op.GenoStruTrait_lociNames, None, GenoStruTrait)
GenoStruTrait.locusByName = new_instancemethod(_simuPOP_op.GenoStruTrait_locusByName, None, GenoStruTrait)
GenoStruTrait.lociByNames = new_instancemethod(_simuPOP_op.GenoStruTrait_lociByNames, None, GenoStruTrait)
GenoStruTrait.indexesOfLoci = new_instancemethod(_simuPOP_op.GenoStruTrait_indexesOfLoci, None, GenoStruTrait)
GenoStruTrait.hasInfoField = new_instancemethod(_simuPOP_op.GenoStruTrait_hasInfoField, None, GenoStruTrait)
GenoStruTrait.infoSize = new_instancemethod(_simuPOP_op.GenoStruTrait_infoSize, None, GenoStruTrait)
GenoStruTrait.infoFields = new_instancemethod(_simuPOP_op.GenoStruTrait_infoFields, None, GenoStruTrait)
GenoStruTrait.infoField = new_instancemethod(_simuPOP_op.GenoStruTrait_infoField, None, GenoStruTrait)
GenoStruTrait.infoIdx = new_instancemethod(_simuPOP_op.GenoStruTrait_infoIdx, None, GenoStruTrait)
GenoStruTrait_swigregister = _simuPOP_op.GenoStruTrait_swigregister
GenoStruTrait_swigregister(GenoStruTrait)
class pyMutantIterator(object):
"""
Details:
this class implements a Python itertor class that can be used to
iterate through individuals in a (sub)population. If allInds are
true, visiblility of individuals will not be checked. Otherwise, a
functor will be used to check if indiviudals belong to a specified
virtual subpopulation. An instance of this class is returned by
population::Individuals() and Population::Individuals(subPop)
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, base: 'GenoIterator', begin: 'size_t', end: 'size_t', step: 'size_t'):
"""
Usage:
pyMutantIterator(base, begin, end, step)
"""
_simuPOP_op.pyMutantIterator_swiginit(self, _simuPOP_op.new_pyMutantIterator(base, begin, end, step))
__swig_destroy__ = _simuPOP_op.delete_pyMutantIterator
def next(self) -> "pairu":
"""
Usage:
x.__next__()
"""
return _simuPOP_op.pyMutantIterator_next(self)
pyMutantIterator.__iter__ = new_instancemethod(_simuPOP_op.pyMutantIterator___iter__, None, pyMutantIterator)
pyMutantIterator.next = new_instancemethod(_simuPOP_op.pyMutantIterator_next, None, pyMutantIterator)
pyMutantIterator.__next__ = new_instancemethod(_simuPOP_op.pyMutantIterator___next__, None, pyMutantIterator)
pyMutantIterator_swigregister = _simuPOP_op.pyMutantIterator_swigregister
pyMutantIterator_swigregister(pyMutantIterator)
class Individual(GenoStruTrait):
"""
Details:
A Population consists of individuals with the same genotypic
structure. An Individual object cannot be created independently,
but refences to inidividuals can be retrieved using member
functions of a Population object. In addition to structural
information shared by all individuals in a population (provided by
class GenoStruTrait), the Individual class provides member
functions to get and set genotype, sex, affection status and
information fields of an individual. Genotypes of an individual
are stored sequentially and can be accessed locus by locus, or in
batch. The alleles are arranged by position, chromosome and
ploidy. That is to say, the first allele on the first chromosome
of the first homologous set is followed by alleles at other loci
on the same chromsome, then markers on the second and later
chromosomes, followed by alleles on the second homologous set of
the chromosomes for a diploid individual. A consequence of this
memory layout is that alleles at the same locus of a non-haploid
individual are separated by Individual::totNumLoci() loci. It is
worth noting that access to invalid chromosomes, such as the Y
chromosomes of female individuals, is not restricted.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self):
"""
Usage:
Individual()
Details:
An Individual object cannot be created directly. It has to be
accessed from a Population object using functions such as
Population::Individual(idx).
"""
_simuPOP_op.Individual_swiginit(self, _simuPOP_op.new_Individual())
__swig_destroy__ = _simuPOP_op.delete_Individual
def allele(self, idx: 'size_t', ploidy: 'ssize_t'=-1, chrom: 'ssize_t'=-1) -> "ULONG":
"""
Usage:
x.allele(idx, ploidy=-1, chrom=-1)
Details:
return the current allele at a locus, using its absolute index
idx. If a ploidy ploidy and/or a chromosome indexes is given, idx
is relative to the beginning of specified homologous copy of
chromosomes (if chrom=-1) or the beginning of the specified
homologous copy of specified chromosome (if chrom >= 0).
"""
return _simuPOP_op.Individual_allele(self, idx, ploidy, chrom)
def alleleChar(self, idx: 'size_t', ploidy: 'ssize_t'=-1, chrom: 'ssize_t'=-1) -> "string":
"""
Usage:
x.alleleChar(idx, ploidy=-1, chrom=-1)
Details:
return the name of allele(idx, ploidy, chrom). If idx is invalid
(e.g. second homologus copy of chromosome Y), '_' is returned.
"""
return _simuPOP_op.Individual_alleleChar(self, idx, ploidy, chrom)
def setAllele(self, allele: 'ULONG', idx: 'size_t', ploidy: 'int'=-1, chrom: 'int'=-1) -> "void":
"""
Usage:
x.setAllele(allele, idx, ploidy=-1, chrom=-1)
Details:
set allele allele to a locus, using its absolute index idx. If a
ploidy ploidy and/or a chromosome indexes are given, idx is
relative to the beginning of specified homologous copy of
chromosomes (if chrom=-1) or the beginning of the specified
homologous copy of specified chromosome (if chrom >= 0).
"""
return _simuPOP_op.Individual_setAllele(self, allele, idx, ploidy, chrom)
def alleleLineage(self, idx: 'size_t', ploidy: 'ssize_t'=-1, chrom: 'ssize_t'=-1) -> "long":
"""
Usage:
x.alleleLineage(idx, ploidy=-1, chrom=-1)
Details:
return the lineage of the allele at a locus, using its absolute
index idx. If a ploidy ploidy and/or a chromosome indexes is
given, idx is relative to the beginning of specified homologous
copy of chromosomes (if chrom=-1) or the beginning of the
specified homologous copy of specified chromosome (if chrom >= 0).
This function returns 0 for modules without lineage information.
"""
return _simuPOP_op.Individual_alleleLineage(self, idx, ploidy, chrom)
def setAlleleLineage(self, lineage: 'long', idx: 'size_t', ploidy: 'int'=-1, chrom: 'int'=-1) -> "void":
"""
Usage:
x.setAlleleLineage(lineage, idx, ploidy=-1, chrom=-1)
Details:
set lineage lineage to an allele, using its absolute index idx. If
a ploidy ploidy and/or a chromosome indexes are given, idx is
relative to the beginning of specified homologous copy of
chromosomes (if chrom=-1) or the beginning of the specified
homologous copy of specified chromosome (if chrom >= 0). This
function does nothing for modules without lineage information.
"""
return _simuPOP_op.Individual_setAlleleLineage(self, lineage, idx, ploidy, chrom)
def genotype(self, *args, **kwargs) -> "PyObject *":
"""
Usage:
x.genotype(ploidy=ALL_AVAIL, chroms=ALL_AVAIL)
Details:
return an editable array (a carray object) that represents all
alleles of an individual. If ploidy or chroms is given, only
alleles on the specified chromosomes and homologous copy of
chromosomes will be returned. If multiple chromosomes are
specified, there should not be gaps between chromosomes. This
function ignores type of chromosomes so it will return unused
alleles for sex and mitochondrial chromosomes.
"""
return _simuPOP_op.Individual_genotype(self, *args, **kwargs)
def mutants(self, *args, **kwargs) -> "simuPOP::pyMutantIterator":
"""
Usage:
x.mutants(ploidy=ALL_AVAIL, chroms=ALL_AVAIL)
Details:
return an itertor that iterate through all mutants (non-zero
alleles) of an individual. Each mutant is presented as a tuple of
(index, value) where index is the index of mutant ranging from
zero to totNumLoci() * ploidy() - 1, so you will have to adjust
indexes to check multiple alleles at a locus. If ploidy or chroms
is given, only alleles on the specified chromosomes and homologous
copy of chromosomes will be iterated. If multiple chromosomes are
specified, there should not be gaps between chromosomes. This
function ignores type of chromosomes so it will return unused
alleles for sex and mitochondrial chromosomes.
"""
return _simuPOP_op.Individual_mutants(self, *args, **kwargs)
def lineage(self, *args, **kwargs) -> "PyObject *":
"""
Usage:
x.lineage(ploidy=ALL_AVAIL, chroms=ALL_AVAIL)
Details:
return an editable array (a carray_lineage object) that represents
the lineages of all alleles of an individual. If ploidy or chroms
is given, only lineages on the specified chromosomes and
homologous copy of chromosomes will be returned. If multiple
chromosomes are specified, there should not be gaps between
chromosomes. This function ignores type of chromosomes so it will
return lineage of unused alleles for sex and mitochondrial
chromosomes. A None object will be returned for modules without
lineage information.
"""
return _simuPOP_op.Individual_lineage(self, *args, **kwargs)
def setGenotype(self, *args, **kwargs) -> "void":
"""
Usage:
x.setGenotype(geno, ploidy=ALL_AVAIL, chroms=ALL_AVAIL)
Details:
Fill the genotype of an individual using a list of alleles geno.
If parameters ploidy and/or chroms are specified, alleles will be
copied to only all or specified chromosomes on selected homologous
copies of chromosomes. geno will be reused if its length is less
than number of alleles to be filled. This function ignores type of
chromosomes so it will set genotype for unused alleles for sex and
mitochondrial chromosomes.
"""
return _simuPOP_op.Individual_setGenotype(self, *args, **kwargs)
def setLineage(self, *args, **kwargs) -> "void":
"""
Usage:
x.setLineage(lineage, ploidy=ALL_AVAIL, chroms=ALL_AVAIL)
Details:
Fill the lineage of an individual using a list of IDs lineage. If
parameters ploidy and/or chroms are specified, lineages will be
copied to only all or specified chromosomes on selected homologous
copies of chromosomes. lineage will be reused if its length is
less than number of allelic lineage to be filled. This function
ignores type of chromosomes so it will set lineage to unused
alleles for sex and mitochondrial chromosomes. It does nothing for
modules without lineage information.
"""
return _simuPOP_op.Individual_setLineage(self, *args, **kwargs)
def sex(self) -> "Sex":
"""
Usage:
x.sex()
Details:
return the sex of an individual, 1 for male and 2 for female.
"""
return _simuPOP_op.Individual_sex(self)
def setSex(self, sex: 'Sex') -> "void":
"""
Usage:
x.setSex(sex)
Details:
set individual sex to MALE or FEMALE.
"""
return _simuPOP_op.Individual_setSex(self, sex)
def affected(self) -> "bool":
"""
Usage:
x.affected()
Details:
Return True if this individual is affected.
"""
return _simuPOP_op.Individual_affected(self)
def setAffected(self, affected: 'bool') -> "void":
"""
Usage:
x.setAffected(affected)
Details:
set affection status to affected (True or False).
"""
return _simuPOP_op.Individual_setAffected(self, affected)
def info(self, field: 'uintString') -> "double":
"""
Usage:
x.info(field)
Details:
Return the value of an information field filed (by index or name).
ind.info(name) is equivalent to ind.name although the function
form allows the use of indexes of information fieldes.
"""
return _simuPOP_op.Individual_info(self, field)
def setInfo(self, value: 'double', field: 'uintString') -> "void":
"""
Usage:
x.setInfo(value, field)
Details:
set the value of an information field field (by index or name) to
value. ind.setInfo(value, field) is equivalent to ind.field =
value although the function form allows the use of indexes of
information fieldes.
"""
return _simuPOP_op.Individual_setInfo(self, value, field)
Individual.allele = new_instancemethod(_simuPOP_op.Individual_allele, None, Individual)
Individual.alleleChar = new_instancemethod(_simuPOP_op.Individual_alleleChar, None, Individual)
Individual.setAllele = new_instancemethod(_simuPOP_op.Individual_setAllele, None, Individual)
Individual.alleleLineage = new_instancemethod(_simuPOP_op.Individual_alleleLineage, None, Individual)
Individual.setAlleleLineage = new_instancemethod(_simuPOP_op.Individual_setAlleleLineage, None, Individual)
Individual.genotype = new_instancemethod(_simuPOP_op.Individual_genotype, None, Individual)
Individual.mutants = new_instancemethod(_simuPOP_op.Individual_mutants, None, Individual)
Individual.lineage = new_instancemethod(_simuPOP_op.Individual_lineage, None, Individual)
Individual.setGenotype = new_instancemethod(_simuPOP_op.Individual_setGenotype, None, Individual)
Individual.setLineage = new_instancemethod(_simuPOP_op.Individual_setLineage, None, Individual)
Individual.sex = new_instancemethod(_simuPOP_op.Individual_sex, None, Individual)
Individual.setSex = new_instancemethod(_simuPOP_op.Individual_setSex, None, Individual)
Individual.affected = new_instancemethod(_simuPOP_op.Individual_affected, None, Individual)
Individual.setAffected = new_instancemethod(_simuPOP_op.Individual_setAffected, None, Individual)
Individual.info = new_instancemethod(_simuPOP_op.Individual_info, None, Individual)
Individual.setInfo = new_instancemethod(_simuPOP_op.Individual_setInfo, None, Individual)
Individual.genoEnd = new_instancemethod(_simuPOP_op.Individual_genoEnd, None, Individual)
Individual.__eq__ = new_instancemethod(_simuPOP_op.Individual___eq__, None, Individual)
Individual.__ne__ = new_instancemethod(_simuPOP_op.Individual___ne__, None, Individual)
Individual.__cmp__ = new_instancemethod(_simuPOP_op.Individual___cmp__, None, Individual)
Individual_swigregister = _simuPOP_op.Individual_swigregister
Individual_swigregister(Individual)
class vspID(object):
"""
Details:
A class to specify virtual subpopulation, which is composed of a
subpopulation ID and a virtual subpopulation ID.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
Usage:
vspID(id)
Details:
Create a subpopulation id. Accept id as well as names.
"""
_simuPOP_op.vspID_swiginit(self, _simuPOP_op.new_vspID(*args))
__swig_destroy__ = _simuPOP_op.delete_vspID
def resolve(self, pop: 'Population') -> "simuPOP::vspID":
"""
Usage:
x.resolve(pop)
"""
return _simuPOP_op.vspID_resolve(self, pop)
vspID.__eq__ = new_instancemethod(_simuPOP_op.vspID___eq__, None, vspID)
vspID.resolve = new_instancemethod(_simuPOP_op.vspID_resolve, None, vspID)
vspID_swigregister = _simuPOP_op.vspID_swigregister
vspID_swigregister(vspID)
def __lshift__(out: 'ostream &', vsp: 'vspID') -> "ostream &":
return _simuPOP_op.__lshift__(out, vsp)
__lshift__ = _simuPOP_op.__lshift__
class subPopList(object):
"""
Details:
A class to specify (virtual) subpopulation list. Using a dedicated
class allows users to specify a single subpopulation, or a list of
(virutal) subpoulations easily.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, obj: 'PyObject *'=None):
"""
Usage:
subPopList(obj=None)
"""
_simuPOP_op.subPopList_swiginit(self, _simuPOP_op.new_subPopList(obj))
def expandFrom(self, pop: 'Population') -> "simuPOP::subPopList":
"""
Description:
expand ALL_AVAIL and [(ALL_AVAIL, vsp), ...] according to pop
Usage:
x.expandFrom(pop)
"""
return _simuPOP_op.subPopList_expandFrom(self, pop)
__swig_destroy__ = _simuPOP_op.delete_subPopList
subPopList.__len__ = new_instancemethod(_simuPOP_op.subPopList___len__, None, subPopList)
subPopList.push_back = new_instancemethod(_simuPOP_op.subPopList_push_back, None, subPopList)
subPopList.begin = new_instancemethod(_simuPOP_op.subPopList_begin, None, subPopList)
subPopList.end = new_instancemethod(_simuPOP_op.subPopList_end, None, subPopList)
subPopList.expandFrom = new_instancemethod(_simuPOP_op.subPopList_expandFrom, None, subPopList)
subPopList_swigregister = _simuPOP_op.subPopList_swigregister
subPopList_swigregister(subPopList)
class BaseVspSplitter(object):
"""
Details:
This class is the base class of all virtual subpopulation (VSP)
splitters, which provide ways to define groups of individuals in a
subpopulation who share certain properties. A splitter defines a
fixed number of named VSPs. They do not have to add up to the
whole subpopulation, nor do they have to be distinct. After a
splitter is assigned to a population, many functions and operators
can be applied to individuals within specified VSPs. Each VSP has
a name. A default name is determined by each splitter but you can
also assign a name to each VSP. The name of a VSP can be retrieved
by function BaseVspSplitter.name() or Population.subPopName().
Only one VSP splitter can be assigned to a population, which
defined VSPs for all its subpopulations. If different splitters
are needed for different subpopulations, a CombinedSplitter can be
used.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
def clone(self) -> "simuPOP::BaseVspSplitter *":
"""
Usage:
x.clone()
Details:
All VSP splitter defines a clone() function to create an identical
copy of itself.
"""
return _simuPOP_op.BaseVspSplitter_clone(self)
__swig_destroy__ = _simuPOP_op.delete_BaseVspSplitter
def numVirtualSubPop(self) -> "size_t":
"""
Usage:
x.numVirtualSubPop()
Details:
Return the number of VSPs defined by this splitter.
"""
return _simuPOP_op.BaseVspSplitter_numVirtualSubPop(self)
def name(self, vsp: 'size_t') -> "string":
"""
Usage:
x.name(vsp)
Details:
Return the name of VSP vsp (an index between 0 and
numVirtualSubPop()).
"""
return _simuPOP_op.BaseVspSplitter_name(self, vsp)
def vspByName(self, name: 'string const &') -> "size_t":
"""
Usage:
x.vspByName(name)
Details:
Return the index of a virtual subpopulation from its name. If
multiple virtual subpopulations share the same name, the first vsp
is returned.
"""
return _simuPOP_op.BaseVspSplitter_vspByName(self, name)
BaseVspSplitter.clone = new_instancemethod(_simuPOP_op.BaseVspSplitter_clone, None, BaseVspSplitter)
BaseVspSplitter.numVirtualSubPop = new_instancemethod(_simuPOP_op.BaseVspSplitter_numVirtualSubPop, None, BaseVspSplitter)
BaseVspSplitter.name = new_instancemethod(_simuPOP_op.BaseVspSplitter_name, None, BaseVspSplitter)
BaseVspSplitter.vspByName = new_instancemethod(_simuPOP_op.BaseVspSplitter_vspByName, None, BaseVspSplitter)
BaseVspSplitter_swigregister = _simuPOP_op.BaseVspSplitter_swigregister
BaseVspSplitter_swigregister(BaseVspSplitter)
class CombinedSplitter(BaseVspSplitter):
"""
Details:
This splitter takes several splitters and stacks their VSPs
together. For example, if the first splitter defines 3 VSPs and
the second splitter defines 2, the two VSPs from the second
splitter become the fourth (index 3) and the fifth (index 4) VSPs
of the combined splitter. In addition, a new set of VSPs could be
defined as the union of one or more of the original VSPs. This
splitter is usually used to define different types of VSPs to a
population.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
CombinedSplitter(splitters=[], vspMap=[], names=[])
Details:
Create a combined splitter using a list of splitters. For example,
CombinedSplitter([SexSplitter(), AffectionSplitter()]) defines a
combined splitter with four VSPs, defined by male (vsp 0), female
(vsp 1), unaffected (vsp 2) and affected individuals (vsp 3).
Optionally, a new set of VSPs could be defined by parameter
vspMap. Each item in this parameter is a list of VSPs that will be
combined to a single VSP. For example, vspMap=[(0, 2), (1, 3)] in
the previous example will define two VSPs defined by male or
unaffected, and female or affected individuals. VSP names are
usually determined by splitters, but can also be specified using
parameter names.
"""
_simuPOP_op.CombinedSplitter_swiginit(self, _simuPOP_op.new_CombinedSplitter(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_CombinedSplitter
CombinedSplitter_swigregister = _simuPOP_op.CombinedSplitter_swigregister
CombinedSplitter_swigregister(CombinedSplitter)
class ProductSplitter(BaseVspSplitter):
"""
Details:
This splitter takes several splitters and take their intersections
as new VSPs. For example, if the first splitter defines 3 VSPs and
the second splitter defines 2, 6 VSPs will be defined by splitting
3 VSPs defined by the first splitter each to two VSPs. This
splitter is usually used to define finer VSPs from existing VSPs.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
ProductSplitter(splitters=[], names=[])
Details:
Create a product splitter using a list of splitters. For example,
ProductSplitter([SexSplitter(), AffectionSplitter()]) defines four
VSPs by male unaffected, male affected, female unaffected, and
female affected individuals. VSP names are usually determined by
splitters, but can also be specified using parameter names.
"""
_simuPOP_op.ProductSplitter_swiginit(self, _simuPOP_op.new_ProductSplitter(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_ProductSplitter
ProductSplitter_swigregister = _simuPOP_op.ProductSplitter_swigregister
ProductSplitter_swigregister(ProductSplitter)
class SexSplitter(BaseVspSplitter):
"""
Details:
This splitter defines two VSPs by individual sex. The first VSP
consists of all male individuals and the second VSP consists of
all females in a subpopulation.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
SexSplitter(names=[])
Details:
Create a sex splitter that defines male and female VSPs. These
VSPs are named Male and Female unless a new set of names are
specified by parameter names.
"""
_simuPOP_op.SexSplitter_swiginit(self, _simuPOP_op.new_SexSplitter(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_SexSplitter
SexSplitter_swigregister = _simuPOP_op.SexSplitter_swigregister
SexSplitter_swigregister(SexSplitter)
class AffectionSplitter(BaseVspSplitter):
"""
Details:
This class defines two VSPs according individual affection status.
The first VSP consists of unaffected invidiauls and the second VSP
consists of affected ones.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
AffectionSplitter(names=[])
Details:
Create a splitter that defined two VSPs by affection status.These
VSPs are named Unaffected and Affected unless a new set of names
are specified by parameter names.
"""
_simuPOP_op.AffectionSplitter_swiginit(self, _simuPOP_op.new_AffectionSplitter(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_AffectionSplitter
AffectionSplitter_swigregister = _simuPOP_op.AffectionSplitter_swigregister
AffectionSplitter_swigregister(AffectionSplitter)
class InfoSplitter(BaseVspSplitter):
"""
Details:
This splitter defines VSPs according to the value of an
information field of each indivdiual. A VSP is defined either by a
value or a range of values.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
InfoSplitter(field, values=[], cutoff=[], ranges=[], names=[])
Details:
Create an infomration splitter using information field field. If
parameter values is specified, each item in this list defines a
VSP in which all individuals have this value at information field
field. If a set of cutoff values are defined in parameter cutoff,
individuals are grouped by intervals defined by these cutoff
values. For example, cutoff=[1,2] defines three VSPs with v < 1, 1
<= v < 2 and v >=2 where v is the value of an individual at
information field field. If parameter ranges is specified, each
range defines a VSP. For example, ranges=[[1, 3], [2, 5]] defines
two VSPs with 1 <= v < 3 and 2 <= 3 < 5. Of course, only one of
the parameters values, cutoff and ranges should be defined, and
values in cutoff should be distinct, and in an increasing order. A
default set of names are given to each VSP unless a new set of
names is given by parameter names.
"""
_simuPOP_op.InfoSplitter_swiginit(self, _simuPOP_op.new_InfoSplitter(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_InfoSplitter
InfoSplitter_swigregister = _simuPOP_op.InfoSplitter_swigregister
InfoSplitter_swigregister(InfoSplitter)
class ProportionSplitter(BaseVspSplitter):
"""
Details:
This splitter divides subpopulations into several VSPs by
proportion.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
ProportionSplitter(proportions=[], names=[])
Details:
Create a splitter that divides subpopulations by proportions,
which should be a list of float numbers (between 0 and 1) that add
up to 1. A default set of names are given to each VSP unless a new
set of names is given by parameter names.
"""
_simuPOP_op.ProportionSplitter_swiginit(self, _simuPOP_op.new_ProportionSplitter(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_ProportionSplitter
ProportionSplitter_swigregister = _simuPOP_op.ProportionSplitter_swigregister
ProportionSplitter_swigregister(ProportionSplitter)
class RangeSplitter(BaseVspSplitter):
"""
Details:
This class defines a splitter that groups individuals in certain
ranges into VSPs.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
RangeSplitter(ranges, names=[])
Details:
Create a splitter according to a number of individual ranges
defined in ranges. For example, RangeSplitter(ranges=[[0, 20],
[40, 50]]) defines two VSPs. The first VSP consists of individuals
0, 1, ..., 19, and the sceond VSP consists of individuals 40, 41,
..., 49. Note that a nested list has to be used even if only one
range is defined. A default set of names are given to each VSP
unless a new set of names is given by parameter names.
"""
_simuPOP_op.RangeSplitter_swiginit(self, _simuPOP_op.new_RangeSplitter(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_RangeSplitter
RangeSplitter_swigregister = _simuPOP_op.RangeSplitter_swigregister
RangeSplitter_swigregister(RangeSplitter)
class GenotypeSplitter(BaseVspSplitter):
"""
Details:
This class defines a VSP splitter that defines VSPs according to
individual genotype at specified loci.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
GenotypeSplitter(loci, alleles, phase=False, names=[])
Details:
Create a splitter that defines VSPs by individual genotype at loci
(can be indexes or names of one or more loci). Each list in a list
allele defines a VSP, which is a list of allowed alleles at these
loci. If only one VSP is defined, the outer list of the nested
list can be ignored. If phase if true, the order of alleles in
each list is significant. If more than one set of alleles are
given, Individuals having either of them is qualified. For
example, in a haploid population, loci=1, alleles=[0, 1] defines a
VSP with individuals having allele 0 or 1 at locus 1, alleles=[[0,
1], [2]] defines two VSPs with indivdiuals in the second VSP
having allele 2 at locus 1. If multiple loci are involved, alleles
at each locus need to be defined. For example, VSP defined by
loci=[0, 1], alleles=[0, 1, 1, 1] consists of individuals having
alleles [0, 1] or [1, 1] at loci [0, 1]. In a haploid population,
loci=1, alleles=[0, 1] defines a VSP with individuals having
genotype [0, 1] or [1, 0] at locus 1. alleles[[0, 1], [2, 2]]
defines two VSPs with indivdiuals in the second VSP having
genotype [2, 2] at locus 1. If phase is set to True, the first VSP
will only has individuals with genotype [0, 1]. In the multiple
loci case, alleles should be arranged by haplotypes, for example,
loci=[0, 1], alleles=[0, 0, 1, 1], phase=True defines a VSP with
individuals having genotype -0-0-, -1-1- at loci 0 and 1. If
phase=False (default), genotypes -1-1-, -0-0-, -0-1- and -1-0- are
all allowed. A default set of names are given to each VSP unless
a new set of names is given by parameter names.
"""
_simuPOP_op.GenotypeSplitter_swiginit(self, _simuPOP_op.new_GenotypeSplitter(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_GenotypeSplitter
GenotypeSplitter_swigregister = _simuPOP_op.GenotypeSplitter_swigregister
GenotypeSplitter_swigregister(GenotypeSplitter)
class pyIndIterator(object):
"""
Details:
this class implements a Python itertor class that can be used to
iterate through individuals in a (sub)population. If allInds are
true, visiblility of individuals will not be checked. Otherwise, a
functor will be used to check if indiviudals belong to a specified
virtual subpopulation. An instance of this class is returned by
population::Individuals() and Population::Individuals(subPop)
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, begin: 'vector< simuPOP::Individual,std::allocator< simuPOP::Individual > >::iterator const', end: 'vector< simuPOP::Individual,std::allocator< simuPOP::Individual > >::iterator const', allInds: 'bool', func: 'simuPOP::vspFunctor'):
"""
Usage:
pyIndIterator(begin, end, allInds, func)
"""
_simuPOP_op.pyIndIterator_swiginit(self, _simuPOP_op.new_pyIndIterator(begin, end, allInds, func))
__swig_destroy__ = _simuPOP_op.delete_pyIndIterator
def next(self) -> "simuPOP::Individual &":
"""
Usage:
x.__next__()
"""
return _simuPOP_op.pyIndIterator_next(self)
pyIndIterator.__iter__ = new_instancemethod(_simuPOP_op.pyIndIterator___iter__, None, pyIndIterator)
pyIndIterator.next = new_instancemethod(_simuPOP_op.pyIndIterator_next, None, pyIndIterator)
pyIndIterator.__next__ = new_instancemethod(_simuPOP_op.pyIndIterator___next__, None, pyIndIterator)
pyIndIterator_swigregister = _simuPOP_op.pyIndIterator_swigregister
pyIndIterator_swigregister(pyIndIterator)
class Population(GenoStruTrait):
"""
Details:
A simuPOP population consists of individuals of the same genotypic
structure, organized by generations, subpopulations and virtual
subpopulations. It also contains a Python dictionary that is used
to store arbitrary population variables. In addition to genotypic
structured related functions provided by the GenoStruTrait class,
the population class provides a large number of member functions
that can be used to
* Create, copy and compare populations.
* Manipulate subpopulations. A population can be divided into
several subpopulations. Because individuals only mate with
individuals within the same subpopulation, exchange of genetic
information across subpopulations can only be done through
migration. A number of functions are provided to access
subpopulation structure information, and to merge and split
subpopulations.
* Define and access virtual subpopulations. A virtual
subpopulation splitter can be assigned to a population, which
defines groups of individuals called virtual subpopulations (VSP)
within each subpopulation.
* Access individuals individually, or through iterators that
iterate through individuals in (virtual) subpopulations.
* Access genotype and information fields of individuals at the
population level. From a population point of view, all genotypes
are arranged sequentially individual by individual. Please refer
to class Individual for an introduction to genotype arragement of
each individual.
* Store and access ancestral generations. A population can save
arbitrary number of ancestral generations. It is possible to
directly access an ancestor, or make an ancestral generation the
current generation for more efficient access.
* Insert or remove loci, resize (shrink or expand) a population,
sample from a population, or merge with other populations.
* Manipulate population variables and evaluate expressions in
this local namespace.
* Save and load a population.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
Population(size=[], ploidy=2, loci=[], chromTypes=[],
lociPos=[], ancGen=0, chromNames=[], alleleNames=[],
lociNames=[], subPopNames=[], infoFields=[])
Details:
The following parameters are used to create a population object:
Arguments:
size: A list of subpopulation sizes. The length of this
list determines the number of subpopulations of
this population. If there is no subpopulation,
size=[popSize] can be written as size=popSize.
ploidy: Number of homologous sets of chromosomes. Default
to 2 (diploid). For efficiency considerations, all
chromosomes have the same number of homologous
sets, even if some customized chromosomes or some
individuals (e.g. males in a haplodiploid
population) have different numbers of homologous
sets. The first case is handled by setting
chromTypes of each chromosome. Only the
haplodiploid populations are handled for the
second case, for which ploidy=HAPLODIPLOID should
be used.
loci: A list of numbers of loci on each chromosome. The
length of this parameter determines the number of
chromosomes. If there is only one chromosome,
numLoci instead of [numLoci] can be used.
chromTypes: A list that specifies the type of each chromosome,
which can be AUTOSOME, CHROMOSOME_X, CHROMOSOME_Y,
or CUSTOMIZED. All chromosomes are assumed to be
autosomes if this parameter is ignored. Sex
chromosome can only be specified in a diploid
population where the sex of an individual is
determined by the existence of these chromosomes
using the XX (FEMALE) and XY (MALE) convention.
Both sex chromosomes have to be available and be
specified only once. Because chromosomes X and Y
are treated as two chromosomes, recombination on
the pseudo-autosomal regions of the sex chromsomes
is not supported. CUSTOMIZED chromosomes are
special chromosomes whose inheritance patterns are
undefined. They rely on user-defined functions and
operators to be passed from parents to offspring.
Multiple customized chromosomes have to be
arranged consecutively.
lociPos: Positions of all loci on all chromosome, as a list
of float numbers. Default to 1, 2, ... etc on each
chromosome. lociPos should be arranged chromosome
by chromosome. If lociPos are not in order within
a chromosome, they will be re-arranged along with
corresponding lociNames (if specified).
ancGen: Number of the most recent ancestral generations to
keep during evolution. Default to 0, which means
only the current generation will be kept. If it is
set to -1, all ancestral generations will be kept
in this population (and exhaust your computer RAM
quickly).
chromNames: A list of chromosome names. Default to '' for all
chromosomes.
alleleNames: A list or a nested list of allele names. If a list
of alleles is given, it will be used for all loci
in this population. For example,
alleleNames=('A','C','T','G') gives names A, C, T,
and G to alleles 0, 1, 2, and 3 respectively. If a
nested list of names is given, it should specify
alleles names for all loci.
lociNames: A list of names for each locus. It can be empty or
a list of unique names for each locus. If loci are
not specified in order, loci names will be
rearranged according to their position on the
chromosome.
subPopNames: A list of subpopulation names. All subpopulations
will have name '' if this parameter is not
specified.
infoFields: Names of information fields (named float number)
that will be attached to each individual.
"""
_simuPOP_op.Population_swiginit(self, _simuPOP_op.new_Population(*args, **kwargs))
def clone(self) -> "simuPOP::Population *":
"""
Usage:
x.clone()
Details:
Create a cloned copy of a population. Note that Python statement
pop1 = pop only creates a reference to an existing population pop.
"""
return _simuPOP_op.Population_clone(self)
def swap(self, rhs: 'Population') -> "void":
"""
Usage:
x.swap(rhs)
Details:
Swap the content of two population objects, which can be handy in
some particular circumstances. For example, you could swap out a
population in a simulator.
"""
return _simuPOP_op.Population_swap(self, rhs)
__swig_destroy__ = _simuPOP_op.delete_Population
def virtualSplitter(self) -> "simuPOP::BaseVspSplitter *":
"""
Usage:
x.virtualSplitter()
Details:
Return the virtual splitter associated with the population, None
will be returned if there is no splitter.
"""
return _simuPOP_op.Population_virtualSplitter(self)
def setVirtualSplitter(self, splitter: 'BaseVspSplitter') -> "void":
"""
Usage:
x.setVirtualSplitter(splitter)
Details:
Set a VSP splitter to the population, which defines the same VSPs
for all subpopulations. If different VSPs are needed for different
subpopulations, a CombinedSplitter can be used to make these VSPs
available to all subpopulations.
"""
return _simuPOP_op.Population_setVirtualSplitter(self, splitter)
def numVirtualSubPop(self) -> "size_t":
"""
Usage:
x.numVirtualSubPop()
Details:
Return the number of virtual subpopulations (VSP) defined by a VSP
splitter. Return 0 if no VSP is defined.
"""
return _simuPOP_op.Population_numVirtualSubPop(self)
def activateVirtualSubPop(self, subPop: 'vspID') -> "void":
"""Obsolete or undocumented function."""
return _simuPOP_op.Population_activateVirtualSubPop(self, subPop)
def deactivateVirtualSubPop(self, subPop: 'size_t') -> "void":
"""Obsolete or undocumented function."""
return _simuPOP_op.Population_deactivateVirtualSubPop(self, subPop)
def fitGenoStru(self, stru: 'size_t') -> "void":
"""Obsolete or undocumented function."""
return _simuPOP_op.Population_fitGenoStru(self, stru)
def setSubPopStru(self, newSubPopSizes: 'vectoru const &', newSubPopNames: 'vectorstr const &') -> "void":
"""Obsolete or undocumented function."""
return _simuPOP_op.Population_setSubPopStru(self, newSubPopSizes, newSubPopNames)
def numSubPop(self) -> "size_t":
"""
Usage:
x.numSubPop()
Details:
Return the number of subpopulations in a population. Return 1 if
there is no subpopulation structure.
"""
return _simuPOP_op.Population_numSubPop(self)
def subPopSize(self, *args, **kwargs) -> "size_t":
"""
Usage:
x.subPopSize(subPop=[], ancGen=-1, sex=ANY_SEX)
Details:
Return the size of a subpopulation (subPopSize(sp)) or a virtual
subpopulation (subPopSize([sp, vsp])) in the current generation
(default) or a specified ancestral generation ancGen. If no subpop
is given, it is the same as popSize(ancGen, sex). Population and
virtual subpopulation names can be used. This function by default
returns number of all individuals (sex=ANY_SEX), but it will
return number of males (if sex=MALE_ONLY), number of females (if
sex=MALE_ONLY), and number of male/female pairs (if sex=PAIR_ONLY)
which is essentially less of the number of males and females.
<group>2-subpopsize</grouplociList()>
"""
return _simuPOP_op.Population_subPopSize(self, *args, **kwargs)
def subPopByName(self, name: 'string const &') -> "size_t":
"""
Usage:
x.subPopByName(name)
Details:
Return the index of the first subpopulation with name name. An
IndexError will be raised if subpopulations are not named, or if
no subpopulation with name name is found. Virtual subpopulation
name is not supported.
"""
return _simuPOP_op.Population_subPopByName(self, name)
def subPopName(self, subPop: 'vspID') -> "string":
"""
Usage:
x.subPopName(subPop)
Details:
Return the "spName - vspName" (virtual named subpopulation), ""
(unnamed non-virtual subpopulation), "spName" (named
subpopulation) or "vspName" (unnamed virtual subpopulation),
depending on whether subpopulation is named or if subPop is
virtual.
"""
return _simuPOP_op.Population_subPopName(self, subPop)
def subPopNames(self) -> "vectorstr":
"""
Usage:
x.subPopNames()
Details:
Return the names of all subpopulations (excluding virtual
subpopulations). An empty string will be returned for unnamed
subpopulations.
"""
return _simuPOP_op.Population_subPopNames(self)
def setSubPopName(self, name: 'string const &', subPop: 'size_t') -> "void":
"""
Usage:
x.setSubPopName(name, subPop)
Details:
Assign a name name to subpopulation subPop. Note that
subpopulation names do not have to be unique.
"""
return _simuPOP_op.Population_setSubPopName(self, name, subPop)
def subPopSizes(self, ancGen: 'int'=-1) -> "vectoru":
"""
Usage:
x.subPopSizes(ancGen=-1)
Details:
Return the sizes of all subpopulations at the current generation
(default) or specified ancestral generation ancGen. Virtual
subpopulations are not considered.
"""
return _simuPOP_op.Population_subPopSizes(self, ancGen)
def popSize(self, ancGen: 'int'=-1, sex: 'SexChoice'=ANY_SEX) -> "size_t":
"""
Usage:
x.popSize(ancGen=-1, sex=ANY_SEX)
Details:
Return the total number of individuals in all subpopulations of
the current generation (default) or the an ancestral generation
ancGen. This function by default returns number of all individuals
(sex=ANY_SEX), but it will return number of males (if
sex=MALE_ONLY), number of females (if sex=MALE_ONLY), and number
of male/female pairs (if sex=PAIR_ONLY) which is essentially less
of the number of males and females.
"""
return _simuPOP_op.Population_popSize(self, ancGen, sex)
def absIndIndex(self, idx: 'size_t', subPop: 'size_t') -> "size_t":
"""
Usage:
x.absIndIndex(idx, subPop)
Details:
return the absolute index of an individual idx in subpopulation
subPop.
"""
return _simuPOP_op.Population_absIndIndex(self, idx, subPop)
def subPopIndPair(self, idx: 'size_t') -> "pairu":
"""
Usage:
x.subPopIndPair(idx)
Details:
Return the subpopulation ID and relative index of an individual,
given its absolute index idx.
"""
return _simuPOP_op.Population_subPopIndPair(self, idx)
def subPopBegin(self, subPop: 'size_t') -> "size_t":
"""
Usage:
x.subPopBegin(subPop)
Details:
Return the index of the first individual in subpopulation subPop.
"""
return _simuPOP_op.Population_subPopBegin(self, subPop)
def subPopEnd(self, subPop: 'size_t') -> "size_t":
"""
Usage:
x.subPopEnd(subPop)
Details:
Return the index of the last individual in subpopulation subPop
plus 1, so that range(subPopBegin(subPop), subPopEnd(subPop) can
iterate through the index of all individuals in subpopulation
subPop.
"""
return _simuPOP_op.Population_subPopEnd(self, subPop)
def individual(self, *args) -> "simuPOP::Individual &":
"""
Usage:
x.individual(idx, subPop=[])
Details:
Return a refernce to individual idx in the population (if
subPop=[], default) or a subpopulation (if subPop=sp). Virtual
subpopulation is not supported. Note that a float idx is
acceptable as long as it rounds closely to an integer.
"""
return _simuPOP_op.Population_individual(self, *args)
def indByID(self, *args, **kwargs) -> "simuPOP::Individual &":
"""
Usage:
x.indByID(id, ancGens=ALL_AVAIL, idField="ind_id")
Details:
Return a reference to individual with id stored in information
field idField (default to ind_id). This function by default search
the present and all ancestral generations (ancGen=ALL_AVAIL), but
you can limit the search in specific generations if you know which
generations to search (ancGens=[0,1] for present and parental
generations) or UNSPECIFIED to search only the current generation.
If no individual with id is found, an IndexError will be raised. A
float id is acceptable as long as it rounds closely to an integer.
Note that this function uses a dynamic searching algorithm which
tends to be slow. If you need to look for multiple individuals
from a static population, you might want to convert a population
object to a pedigree object and use function Pedigree.indByID.
"""
return _simuPOP_op.Population_indByID(self, *args, **kwargs)
def ancestor(self, *args, **kwargs) -> "simuPOP::Individual &":
"""
Usage:
x.ancestor(idx, gen, subPop=[])
Details:
Return a reference to individual idx in ancestral generation gen.
The correct individual will be returned even if the current
generation is not the present one (see also useAncestralGen). If a
valid subPop is specified, index is relative to that subPop.
Virtual subpopulation is not supported. Note that a float idx is
acceptable as long as it rounds closely to an integer.
"""
return _simuPOP_op.Population_ancestor(self, *args, **kwargs)
def individuals(self, *args, **kwargs) -> "simuPOP::pyIndIterator":
"""
Usage:
x.individuals(subPop=[])
Details:
Return an iterator that can be used to iterate through all
individuals in a population (if subPop=[], default), or a
(virtual) subpopulation (subPop=spID or (spID, vspID)). If you
would like to iterate through multiple subpopulations in multiple
ancestral generations, please use function
Population.allIndividuals().
"""
return _simuPOP_op.Population_individuals(self, *args, **kwargs)
def genotype(self, *args, **kwargs) -> "PyObject *":
"""
Usage:
x.genotype(subPop=[])
Details:
Return an editable array of the genotype of all individuals in a
population (if subPop=[], default), or individuals in a
subpopulation subPop. Virtual subpopulation is unsupported.
"""
return _simuPOP_op.Population_genotype(self, *args, **kwargs)
def mutants(self, *args, **kwargs) -> "simuPOP::pyMutantIterator":
"""
Usage:
x.mutants(subPop=[])
Details:
Return an iterator that iterate through mutants of all individuals
in a population (if subPop=[], default), or individuals in a
subpopulation subPop. Virtual subpopulation is unsupported. Each
mutant is presented as a tuple of (index, value) where index is
the index of mutant (from 0 to totNumLoci()*ploidy()) so you will
have to adjust its value to check multiple alleles at a locus.
This function ignores type of chromosomes so non-zero alleles in
unused alleles of sex and mitochondrial chromosomes are also
iterated.
"""
return _simuPOP_op.Population_mutants(self, *args, **kwargs)
def lineage(self, *args, **kwargs) -> "PyObject *":
"""
Usage:
x.lineage(subPop=[])
Details:
Return an editable array of the lineage of alleles for all
individuals in a population (if subPop=[], default), or
individuals in a subpopulation subPop. Virtual subpopulation is
unsupported. This function returns None for modules without
lineage information.
"""
return _simuPOP_op.Population_lineage(self, *args, **kwargs)
def setGenotype(self, *args, **kwargs) -> "void":
"""
Usage:
x.setGenotype(geno, subPop=[])
Details:
Fill the genotype of all individuals in a population (if
subPop=[]) or in a (virtual) subpopulation subPop (if subPop=sp or
(sp, vsp)) using a list of alleles geno. geno will be reused if
its length is less than subPopSize(subPop)*totNumLoci()*ploidy().
"""
return _simuPOP_op.Population_setGenotype(self, *args, **kwargs)
def setLineage(self, *args, **kwargs) -> "void":
"""
Usage:
x.setLineage(geno, subPop=[])
Details:
Fill the lineage of all individuals in a population (if subPop=[])
or in a (virtual) subpopulation subPop (if subPop=sp or (sp, vsp))
using a list of IDs lineage. lineage will be reused if its length
is less than subPopSize(subPop)*totNumLoci()*ploidy(). This
function returns directly for modules without lineage information.
"""
return _simuPOP_op.Population_setLineage(self, *args, **kwargs)
def sortIndividuals(self, infoFields: 'stringList', reverse: 'bool'=False) -> "void":
"""
Usage:
x.sortIndividuals(infoFields, reverse=False)
Details:
Sort individuals according to values at specified information
fields (infoFields). Individuals will be sorted at an increasing
order unless reverse is set to true.
"""
return _simuPOP_op.Population_sortIndividuals(self, infoFields, reverse)
def setSubPopByIndInfo(self, field: 'string const &') -> "void":
"""
Usage:
x.setSubPopByIndInfo(field)
Details:
Rearrange individuals to their new subpopulations according to
their integer values at information field field (value returned by
Individual::info(field)). individuals with negative values at this
field will be removed. Existing subpopulation names are kept. New
subpopulations will have empty names.
"""
return _simuPOP_op.Population_setSubPopByIndInfo(self, field)
def splitSubPop(self, *args, **kwargs) -> "vectoru":
"""
Usage:
x.splitSubPop(subPop, sizes, names=[])
Details:
Split subpopulation subPop into subpopulations of given sizes,
which should add up to the size of subpopulation subPop or 1, in
which case sizes are treated as proportions. If subPop is not the
last subpopulation, indexes of subpopulations after subPop are
shifted. If subPop is named, the same name will be given to all
new subpopulations unless a new set of names are specified for
these subpopulations. This function returns the IDs of split
subpopulations.
"""
return _simuPOP_op.Population_splitSubPop(self, *args, **kwargs)
def removeSubPops(self, subPops: 'subPopList') -> "void":
"""
Usage:
x.removeSubPops(subPops)
Details:
Remove (virtual) subpopulation(s) subPops and all their
individuals. This function can be used to remove complete
subpopulations (with shifted subpopulation indexes) or individuals
belonging to virtual subpopulations of a subpopulation. In the
latter case, the subpopulations are kept even if all individuals
have been removed. This function only handles the present
generation.
"""
return _simuPOP_op.Population_removeSubPops(self, subPops)
def removeIndividuals(self, *args, **kwargs) -> "void":
"""
Usage:
x.removeIndividuals(indexes=[], IDs=[], idField="ind_id",
filter=None)
Details:
remove individual(s) by absolute indexes (parameter index) or
their IDs (parameter IDs), or using a filter function (paramter
filter). If indexes are used, only individuals at the current
generation will be removed. If IDs are used, all individuals with
one of the IDs at information field idField (default to "ind_id")
will be removed. Although "ind_id" usually stores unique IDs of
individuals, this function is frequently used to remove groups of
individuals with the same value at an information field. An
IndexError will be raised if an index is out of bound, but no
error will be given if an invalid ID is specified. In the last
case, a user-defined function should be provided. This function
should accept parameter "ind" or one or more of the information
fields. All individuals, including ancestors if there are multiple
ancestral generations, will be passed to this function.
Individuals that returns True will be removed. This function does
not affect subpopulation structure in the sense that a
subpopulation will be kept even if all individuals from it are
removed.
"""
return _simuPOP_op.Population_removeIndividuals(self, *args, **kwargs)
def mergeSubPops(self, *args, **kwargs) -> "size_t":
"""
Usage:
x.mergeSubPops(subPops=ALL_AVAIL, name="", toSubPop=-1)
Details:
Merge subpopulations subPops. If subPops is ALL_AVAIL (default),
all subpopulations will be merged. subPops do not have to be
adjacent to each other. They will all be merged to the
subpopulation with the smallest subpopulation ID, unless a
subpopulation ID is specified using parameter toSubPop. Indexes of
the rest of the subpopulation may be changed. A new name can be
assigned to the merged subpopulation through parameter name (an
empty name will be ignored). This function returns the ID of the
merged subpopulation.
"""
return _simuPOP_op.Population_mergeSubPops(self, *args, **kwargs)
def addIndFrom(self, pop: 'Population') -> "void":
"""
Usage:
x.addIndFrom(pop)
Details:
Add all individuals, including ancestors, in pop to the current
population. Two populations should have the same genotypic
structures and number of ancestral generations. Subpopulations in
population pop are kept.
"""
return _simuPOP_op.Population_addIndFrom(self, pop)
def addChromFrom(self, pop: 'Population') -> "void":
"""
Usage:
x.addChromFrom(pop)
Details:
Add chromosomes in population pop to the current population.
population pop should have the same number of individuals as the
current population in the current and all ancestral generations.
Chromosomes of pop, if named, should not conflict with names of
existing chromosome. This function merges genotypes on the new
chromosomes from population pop individual by individual.
"""
return _simuPOP_op.Population_addChromFrom(self, pop)
def addLociFrom(self, pop: 'Population', byName: 'bool'=False) -> "void":
"""
Usage:
x.addLociFrom(pop, byName=False)
Details:
Add loci from population pop. By default, chromosomes are merged
by index and names of merged chromosomes of population pop will be
ignored (merge of two chromosomes with different names will yield
a warning). If byName is set to True, chromosomes in pop will be
merged to chromosomes with identical names. Added loci will be
inserted according to their position. Their position and names
should not overlap with any locus in the current population.
population pop should have the same number of individuals as the
current population in the current and all ancestral generations.
Allele lineages are also copied from pop in modules with lineage
information.
"""
return _simuPOP_op.Population_addLociFrom(self, pop, byName)
def addChrom(self, *args, **kwargs) -> "void":
"""
Usage:
x.addChrom(lociPos, lociNames=[], chromName="", alleleNames=[],
chromType=AUTOSOME)
Details:
Add chromosome chromName with given type chromType to a
population, with loci lociNames inserted at position lociPos.
lociPos should be ordered. lociNames and chromName should not
exist in the current population. Allele names could be specified
for all loci (a list of names) or differently for each locus (a
nested list of names), using parameter alleleNames. Empty loci
names will be used if lociNames is not specified. The newly added
alleles will have zero lineage in modules wiht lineage
information.
"""
return _simuPOP_op.Population_addChrom(self, *args, **kwargs)
def addLoci(self, *args, **kwargs) -> "vectoru":
"""
Usage:
x.addLoci(chrom, pos, lociNames=[], alleleNames=[])
Details:
Insert loci lociNames at positions pos on chromosome chrom. These
parameters should be lists of the same length, although names may
be ignored, in which case empty strings will be assumed. Single-
value input is allowed for parameter chrom and pos if only one
locus is added. Alleles at inserted loci are initialized with zero
alleles. Note that loci have to be added to existing chromosomes.
If loci on a new chromosome need to be added, function addChrom
should be used. Optionally, allele names could be specified either
for all loci (a single list) or each loci (a nested list). This
function returns indexes of the inserted loci. Newly inserted
alleles will have zero lineage in modules with lineage
information.
"""
return _simuPOP_op.Population_addLoci(self, *args, **kwargs)
def resize(self, sizes: 'uintList', propagate: 'bool'=False) -> "void":
"""
Usage:
x.resize(sizes, propagate=False)
Details:
Resize population by giving new subpopulation sizes sizes.
individuals at the end of some subpopulations will be removed if
the new subpopulation size is smaller than the old one. New
individuals will be appended to a subpopulation if the new size is
larger. Their genotypes will be set to zero (default), or be
copied from existing individuals if propagate is set to True. More
specifically, if a subpopulation with 3 individuals is expanded to
7, the added individuals will copy genotypes from individual 1, 2,
3, and 1 respectively. Note that this function only resizes the
current generation.
"""
return _simuPOP_op.Population_resize(self, sizes, propagate)
def extractSubPops(self, *args, **kwargs) -> "simuPOP::Population &":
"""
Usage:
x.extractSubPops(subPops=ALL_AVAIL, rearrange=False)
Details:
Extract a list of (virtual) subpopulations from a population and
create a new population. If rearrange is False (default),
structure and names of extracted subpopulations are kept although
extracted subpopulations can have fewer individuals if they are
created from extracted virtual subpopulations. (e.g. it is
possible to extract all male individuals from a subpopulation
using a SexSplitter()). If rearrange is True, each (virtual)
subpopulation in subPops becomes a new subpopulation in the
extracted population in the order at which they are specified.
Because each virtual subpopulation becomes a subpopulation, this
function could be used, for example, to separate male and female
individuals to two subpopulations ( subPops=[(0,0), (0,1)]). If
overlapping (virtual) subpopulations are specified, individuals
will be copied multiple times. This function only extract
individuals from the present generation.
"""
return _simuPOP_op.Population_extractSubPops(self, *args, **kwargs)
def extractIndividuals(self, *args, **kwargs) -> "simuPOP::Population &":
"""
Usage:
x.extractIndividuals(indexes=[], IDs=[], idField="ind_id",
filter=None)
Details:
Extract individuals with given absolute indexes (parameter
indexes), IDs (parameter IDs, stored in information field idField,
default to ind_id), or a filter function (parameter filter). If a
list of absolute indexes are specified, the present generation
will be extracted and form a one-generational population. If a
list of IDs are specified, this function will look through all
ancestral generations and extract individuals with given ID.
Individuals with shared IDs are allowed. In the last case, a user-
defined Python function should be provided. This function should
accept parameter "ind" or one or more of the information fields.
All individuals, including ancestors if there are multiple
ancestral generations, will be passed to this function.
Individuals that returns True will be extracted. Extracted
individuals will be in their original ancestral generations and
subpopulations, even if some subpopulations or generations are
empty. An IndexError will be raised if an index is out of bound
but no error will be given if an invalid ID is encountered.
"""
return _simuPOP_op.Population_extractIndividuals(self, *args, **kwargs)
def removeLoci(self, *args, **kwargs) -> "void":
"""
Usage:
x.removeLoci(loci=UNSPECIFIED, keep=UNSPECIFIED)
Details:
Remove loci (absolute indexes or names) and genotypes at these
loci from the current population. Alternatively, a parameter keep
can be used to specify loci that will not be removed.
"""
return _simuPOP_op.Population_removeLoci(self, *args, **kwargs)
def recodeAlleles(self, *args, **kwargs) -> "void":
"""
Usage:
x.recodeAlleles(alleles, loci=ALL_AVAIL, alleleNames=[])
Details:
Recode alleles at loci (can be a list of loci indexes or names, or
all loci in a population (ALL_AVAIL)) to other values according to
parameter alleles. This parameter can a list of new allele numbers
for alleles 0, 1, 2, ... (allele x will be recoded to
newAlleles[x], x outside of the range of newAlleles will not be
recoded, although a warning will be given if DBG_WARNING is
defined) or a Python function, which should accept one or both
parameters allele (existing allele) and locus (index of locus).
The return value will become the new allele. This function is
intended to recode some alleles without listing all alleles in a
list. It will be called once for each existing allele so it is not
possible to recode an allele to different alleles. A new list of
allele names could be specified for these loci. Different sets of
names could be specified for each locus if a nested list of names
are given. This function recode alleles for all subpopulations in
all ancestral generations.
"""
return _simuPOP_op.Population_recodeAlleles(self, *args, **kwargs)
def push(self, pop: 'Population') -> "void":
"""
Usage:
x.push(pop)
Details:
Push population pop into the current population. Both populations
should have the same genotypic structure. The current population
is discarded if ancestralDepth (maximum number of ancestral
generations to hold) is zero so no ancestral generation can be
kept. Otherise, the current population will become the parental
generation of pop. If ancGen of a population is positive and there
are already ancGen ancestral generations (c.f. ancestralGens()),
the greatest ancestral generation will be discarded. In any case,
Populationpop becomes invalid as all its individuals are absorbed
by the current population.
"""
return _simuPOP_op.Population_push(self, pop)
def curAncestralGen(self) -> "size_t":
"""Obsolete or undocumented function."""
return _simuPOP_op.Population_curAncestralGen(self)
def ancestralGens(self) -> "int":
"""
Usage:
x.ancestralGens()
Details:
Return the actual number of ancestral generations stored in a
population, which does not necessarily equal to the number set by
setAncestralDepth().
"""
return _simuPOP_op.Population_ancestralGens(self)
def setIndInfo(self, *args, **kwargs) -> "void":
"""
Usage:
x.setIndInfo(values, field, subPop=[])
Details:
Set information field field (specified by index or name) of all
individuals (if subPop=[], default), or individuals in a (virtual)
subpopulation (subPop=sp or (sp, vsp)) to values. values will be
reused if its length is smaller than the size of the population or
(virtual) subpopulation.
"""
return _simuPOP_op.Population_setIndInfo(self, *args, **kwargs)
def indInfo(self, *args, **kwargs) -> "vectorf":
"""
Usage:
x.indInfo(field, subPop=[])
Details:
Return the values (as a list) of information field field (by index
or name) of all individuals (if subPop=[], default), or
individuals in a (virtual) subpopulation (if subPop=sp or (sp,
vsp)).
"""
return _simuPOP_op.Population_indInfo(self, *args, **kwargs)
def addInfoFields(self, fields: 'stringList', init: 'double'=0) -> "void":
"""
Usage:
x.addInfoFields(fields, init=0)
Details:
Add a list of information fields fields to a population and
initialize their values to init. If an information field alreay
exists, it will be re-initialized.
"""
return _simuPOP_op.Population_addInfoFields(self, fields, init)
def setInfoFields(self, fields: 'stringList', init: 'double'=0) -> "void":
"""
Usage:
x.setInfoFields(fields, init=0)
Details:
Set information fields fields to a population and initialize them
with value init. All existing information fields will be removed.
"""
return _simuPOP_op.Population_setInfoFields(self, fields, init)
def removeInfoFields(self, fields: 'stringList') -> "void":
"""
Usage:
x.removeInfoFields(fields)
Details:
Remove information fields fields from a population.
"""
return _simuPOP_op.Population_removeInfoFields(self, fields)
def updateInfoFieldsFrom(self, *args, **kwargs) -> "void":
"""
Usage:
x.updateInfoFieldsFrom(fields, pop, fromFields=[],
ancGens=ALL_AVAIL)
Details:
Update information fields fields from fromFields of another
population (or Pedigree) pop. Two populations should have the same
number of individuals. If fromFields is not specified, it is
assumed to be the same as fields. If ancGens is not ALL_AVAIL,
only the specified ancestral generations are updated.
"""
return _simuPOP_op.Population_updateInfoFieldsFrom(self, *args, **kwargs)
def setAncestralDepth(self, depth: 'int') -> "void":
"""
Usage:
x.setAncestralDepth(depth)
Details:
set the intended ancestral depth of a population to depth, which
can be 0 (does not store any ancestral generation), -1 (store all
ancestral generations), and a positive number (store depth
ancestral generations. If there exists more than depth ancestral
generations (if depth > 0), extra ancestral generations are
removed.
"""
return _simuPOP_op.Population_setAncestralDepth(self, depth)
def useAncestralGen(self, idx: 'ssize_t') -> "void":
"""
Usage:
x.useAncestralGen(idx)
Details:
Making ancestral generation idx (0 for current generation, 1 for
parental generation, 2 for grand-parental generation, etc) the
current generation. This is an efficient way to access Population
properties of an ancestral generation. useAncestralGen(0) should
always be called afterward to restore the correct order of
ancestral generations.
"""
return _simuPOP_op.Population_useAncestralGen(self, idx)
def save(self, filename: 'string const &') -> "void":
"""
Usage:
x.save(filename)
Details:
Save population to a file filename, which can be loaded by a
global function loadPopulation(filename).
"""
return _simuPOP_op.Population_save(self, filename)
def vars(self, *args, **kwargs) -> "PyObject *":
"""
Usage:
x.vars(subPop=[])
Details:
return variables of a population as a Python dictionary. If a
valid subpopulation subPop is specified, a dictionary
vars()["subPop"][subPop] is returned. A ValueError will be raised
if key subPop does not exist in vars(), or if key subPop does not
exist in vars()["subPop"].
"""
return _simuPOP_op.Population_vars(self, *args, **kwargs)
def evaluate(self, *args, **kwargs) -> "PyObject *":
"""Obsolete or undocumented function."""
return _simuPOP_op.Population_evaluate(self, *args, **kwargs)
def execute(self, *args, **kwargs) -> "void":
"""Obsolete or undocumented function."""
return _simuPOP_op.Population_execute(self, *args, **kwargs)
Population.clone = new_instancemethod(_simuPOP_op.Population_clone, None, Population)
Population.swap = new_instancemethod(_simuPOP_op.Population_swap, None, Population)
Population.virtualSplitter = new_instancemethod(_simuPOP_op.Population_virtualSplitter, None, Population)
Population.setVirtualSplitter = new_instancemethod(_simuPOP_op.Population_setVirtualSplitter, None, Population)
Population.numVirtualSubPop = new_instancemethod(_simuPOP_op.Population_numVirtualSubPop, None, Population)
Population.activateVirtualSubPop = new_instancemethod(_simuPOP_op.Population_activateVirtualSubPop, None, Population)
Population.deactivateVirtualSubPop = new_instancemethod(_simuPOP_op.Population_deactivateVirtualSubPop, None, Population)
Population.__cmp__ = new_instancemethod(_simuPOP_op.Population___cmp__, None, Population)
Population.fitGenoStru = new_instancemethod(_simuPOP_op.Population_fitGenoStru, None, Population)
Population.setSubPopStru = new_instancemethod(_simuPOP_op.Population_setSubPopStru, None, Population)
Population.numSubPop = new_instancemethod(_simuPOP_op.Population_numSubPop, None, Population)
Population.subPopSize = new_instancemethod(_simuPOP_op.Population_subPopSize, None, Population)
Population.subPopByName = new_instancemethod(_simuPOP_op.Population_subPopByName, None, Population)
Population.subPopName = new_instancemethod(_simuPOP_op.Population_subPopName, None, Population)
Population.subPopNames = new_instancemethod(_simuPOP_op.Population_subPopNames, None, Population)
Population.setSubPopName = new_instancemethod(_simuPOP_op.Population_setSubPopName, None, Population)
Population.subPopSizes = new_instancemethod(_simuPOP_op.Population_subPopSizes, None, Population)
Population.popSize = new_instancemethod(_simuPOP_op.Population_popSize, None, Population)
Population.absIndIndex = new_instancemethod(_simuPOP_op.Population_absIndIndex, None, Population)
Population.subPopIndPair = new_instancemethod(_simuPOP_op.Population_subPopIndPair, None, Population)
Population.subPopBegin = new_instancemethod(_simuPOP_op.Population_subPopBegin, None, Population)
Population.subPopEnd = new_instancemethod(_simuPOP_op.Population_subPopEnd, None, Population)
Population.individual = new_instancemethod(_simuPOP_op.Population_individual, None, Population)
Population.indByID = new_instancemethod(_simuPOP_op.Population_indByID, None, Population)
Population.ancestor = new_instancemethod(_simuPOP_op.Population_ancestor, None, Population)
Population.individuals = new_instancemethod(_simuPOP_op.Population_individuals, None, Population)
Population.indIterator = new_instancemethod(_simuPOP_op.Population_indIterator, None, Population)
Population.rawIndBegin = new_instancemethod(_simuPOP_op.Population_rawIndBegin, None, Population)
Population.rawIndEnd = new_instancemethod(_simuPOP_op.Population_rawIndEnd, None, Population)
Population.alleleIterator = new_instancemethod(_simuPOP_op.Population_alleleIterator, None, Population)
Population.indGenoBegin = new_instancemethod(_simuPOP_op.Population_indGenoBegin, None, Population)
Population.indGenoEnd = new_instancemethod(_simuPOP_op.Population_indGenoEnd, None, Population)
Population.genotype = new_instancemethod(_simuPOP_op.Population_genotype, None, Population)
Population.mutants = new_instancemethod(_simuPOP_op.Population_mutants, None, Population)
Population.lineage = new_instancemethod(_simuPOP_op.Population_lineage, None, Population)
Population.setGenotype = new_instancemethod(_simuPOP_op.Population_setGenotype, None, Population)
Population.setLineage = new_instancemethod(_simuPOP_op.Population_setLineage, None, Population)
Population.sortIndividuals = new_instancemethod(_simuPOP_op.Population_sortIndividuals, None, Population)
Population.setSubPopByIndInfo = new_instancemethod(_simuPOP_op.Population_setSubPopByIndInfo, None, Population)
Population.splitSubPop = new_instancemethod(_simuPOP_op.Population_splitSubPop, None, Population)
Population.removeSubPops = new_instancemethod(_simuPOP_op.Population_removeSubPops, None, Population)
Population.removeIndividuals = new_instancemethod(_simuPOP_op.Population_removeIndividuals, None, Population)
Population.mergeSubPops = new_instancemethod(_simuPOP_op.Population_mergeSubPops, None, Population)
Population.addIndFrom = new_instancemethod(_simuPOP_op.Population_addIndFrom, None, Population)
Population.addChromFrom = new_instancemethod(_simuPOP_op.Population_addChromFrom, None, Population)
Population.addLociFrom = new_instancemethod(_simuPOP_op.Population_addLociFrom, None, Population)
Population.addChrom = new_instancemethod(_simuPOP_op.Population_addChrom, None, Population)
Population.addLoci = new_instancemethod(_simuPOP_op.Population_addLoci, None, Population)
Population.resize = new_instancemethod(_simuPOP_op.Population_resize, None, Population)
Population.extractSubPops = new_instancemethod(_simuPOP_op.Population_extractSubPops, None, Population)
Population.extractIndividuals = new_instancemethod(_simuPOP_op.Population_extractIndividuals, None, Population)
Population.removeLoci = new_instancemethod(_simuPOP_op.Population_removeLoci, None, Population)
Population.recodeAlleles = new_instancemethod(_simuPOP_op.Population_recodeAlleles, None, Population)
Population.push = new_instancemethod(_simuPOP_op.Population_push, None, Population)
Population.curAncestralGen = new_instancemethod(_simuPOP_op.Population_curAncestralGen, None, Population)
Population.ancestralGens = new_instancemethod(_simuPOP_op.Population_ancestralGens, None, Population)
Population.setIndInfo = new_instancemethod(_simuPOP_op.Population_setIndInfo, None, Population)
Population.infoBegin = new_instancemethod(_simuPOP_op.Population_infoBegin, None, Population)
Population.infoEnd = new_instancemethod(_simuPOP_op.Population_infoEnd, None, Population)
Population.indInfo = new_instancemethod(_simuPOP_op.Population_indInfo, None, Population)
Population.addInfoFields = new_instancemethod(_simuPOP_op.Population_addInfoFields, None, Population)
Population.setInfoFields = new_instancemethod(_simuPOP_op.Population_setInfoFields, None, Population)
Population.removeInfoFields = new_instancemethod(_simuPOP_op.Population_removeInfoFields, None, Population)
Population.updateInfoFieldsFrom = new_instancemethod(_simuPOP_op.Population_updateInfoFieldsFrom, None, Population)
Population.setAncestralDepth = new_instancemethod(_simuPOP_op.Population_setAncestralDepth, None, Population)
Population.useAncestralGen = new_instancemethod(_simuPOP_op.Population_useAncestralGen, None, Population)
Population.save = new_instancemethod(_simuPOP_op.Population_save, None, Population)
Population.vars = new_instancemethod(_simuPOP_op.Population_vars, None, Population)
Population.evaluate = new_instancemethod(_simuPOP_op.Population_evaluate, None, Population)
Population.execute = new_instancemethod(_simuPOP_op.Population_execute, None, Population)
Population_swigregister = _simuPOP_op.Population_swigregister
Population_swigregister(Population)
HAPLODIPLOID = _simuPOP_op.HAPLODIPLOID
def loadPopulation(file: 'string const &') -> "simuPOP::Population &":
"""
Usage:
loadPopulation(file)
Details:
load a population from a file saved by Population::save().
"""
return _simuPOP_op.loadPopulation(file)
class BaseOperator(object):
"""
Details:
Operators are objects that act on populations. They can be applied
to populations directly using their function forms, but they are
usually managed and applied by a simulator. In the latter case,
operators are passed to the evolve function of a simulator, and
are applied repeatedly during the evolution of the simulator. The
BaseOperator class is the base class for all operators. It defines
a common user interface that specifies at which generations, at
which stage of a life cycle, to which populations and
subpopulations an operator is applied. These are achieved by a
common set of parameters such as begin, end, step, at, stage for
all operators. Note that a specific operator does not have to
honor all these parameters. For example, a Recombinator can only
be applied during mating so it ignores the stage parameter. An
operator can be applied to all or part of the generations during
the evolution of a simulator. At the beginning of an evolution, a
simulator is usually at the beginning of generation 0. If it
evolves 10 generations, it evolves generations 0, 1, ,,,., and 9
(10 generations) and stops at the begging of generation 10. A
negative generation number a has generation number 10 + a, with -1
referring to the last evolved generation 9. Note that the starting
generation number of a simulator can be changed by its setGen()
member function. Output from an operator is usually directed to
the standard output (sys.stdout). This can be configured using a
output specification string, which can be '' for no output, '>'
standard terminal output (default), a filename prefixed by one or
more '>' characters or a Python expression indicated by a leading
exclamation mark ('!expr'). In the case of '>filename' (or
equivalently 'filename'), the output from an operator is written
to this file. However, if two operators write to the same file
filename, or if an operator writes to this file more than once,
only the last write operation will succeed. In the case of
'>>filename', file filename will be opened at the beginning of the
evolution and closed at the end. Outputs from multiple operators
are appended. >>>filename works similar to >>filename but
filename, if it already exists at the beginning of an evolutionary
process, will not be cleared. If the output specification is
prefixed by an exclamation mark, the string after the mark is
considered as a Python expression. When an operator is applied to
a population, this expression will be evaluated within the
population's local namespace to obtain a population specific
output specification. As an advanced feature, a Python function
can be assigned to this parameter. Output strings will be sent to
this function for processing. Lastly, if the output stream only
accept a binary output (e.g. a gzip stream), WithMode(output, 'b')
should be used to let simuPOP convert string to bytes before
writing to the output.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, output: 'stringFunc', begin: 'int', end: 'int', step: 'int', at: 'intList', reps: 'intList', subPops: 'subPopList', infoFields: 'stringList'):
"""
Usage:
BaseOperator(output, begin, end, step, at, reps, subPops,
infoFields)
Details:
The following parameters can be specified by all operators.
However, an operator can ignore some parameters and the exact
meaning of a parameter can vary.
Arguments:
output: A string that specifies how output from an
operator is written, which can be '' (no output),
'>' (standard output), 'filename' prefixed by one
or more '>', or an Python expression prefixed by
an exclamation mark ('!expr'). If a file object,
or any Python object with a write function is
provided, the output will be write to this file.
Alternatively, a Python function or a file object
(any Python object with a write function) can be
given which will be called with a string of output
content. A global function WithMode can be used to
let simuPOP output bytes instead of string.
begin: The starting generation at which an operator will
be applied. Default to 0. A negative number is
interpreted as a generation counted from the end
of an evolution (-1 being the last evolved
generation).
end: The last generation at which an operator will be
applied. Default to -1, namely the last
generation.
step: The number of generations between applicable
generations. Default to 1.
at: A list of applicable generations. Parameters
begin, end, and step will be ignored if this
parameter is specified. A single generation number
is also acceptable.
reps: A list of applicable replicates. A common default
value ALL_AVAIL is interpreted as all replicates
in a simulator. Negative indexes such as -1 (last
replicate) is acceptable. rep=idx can be used as a
shortcut for rep=[idx].
subPops: A list of applicable (virtual) subpopulations,
such as subPops=[sp1, sp2, (sp2, vsp1)].
subPops=[sp1] can be simplied as subPops=sp1.
Negative indexes are not supported. A common
default value (ALL_AVAIL) of this parameter
reprents all subpopulations of the population
being aplied. Suport for this parameter vary from
operator to operator and some operators do not
support virtual subpopulations at all. Please
refer to the reference manual of individual
operators for their support for this parameter.
infoFields: A list of information fields that will be used by
an operator. You usually do not need to specify
this parameter because operators that use
information fields usually have default values for
this parameter.
"""
_simuPOP_op.BaseOperator_swiginit(self, _simuPOP_op.new_BaseOperator(output, begin, end, step, at, reps, subPops, infoFields))
__swig_destroy__ = _simuPOP_op.delete_BaseOperator
def clone(self) -> "simuPOP::BaseOperator *":
"""
Usage:
x.clone()
Details:
Return a cloned copy of an operator. This function is available to
all operators.
"""
return _simuPOP_op.BaseOperator_clone(self)
def apply(self, pop: 'Population') -> "bool":
"""
Usage:
x.apply(pop)
Details:
Apply an operator to population pop directly, without checking its
applicability.
"""
return _simuPOP_op.BaseOperator_apply(self, pop)
def describe(self, format: 'bool'=True) -> "string":
"""Obsolete or undocumented function."""
return _simuPOP_op.BaseOperator_describe(self, format)
BaseOperator.clone = new_instancemethod(_simuPOP_op.BaseOperator_clone, None, BaseOperator)
BaseOperator.apply = new_instancemethod(_simuPOP_op.BaseOperator_apply, None, BaseOperator)
BaseOperator.describe = new_instancemethod(_simuPOP_op.BaseOperator_describe, None, BaseOperator)
BaseOperator_swigregister = _simuPOP_op.BaseOperator_swigregister
BaseOperator_swigregister(BaseOperator)
class opList(object):
"""
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
Usage:
opList(obj=None)
"""
_simuPOP_op.opList_swiginit(self, _simuPOP_op.new_opList(*args))
__swig_destroy__ = _simuPOP_op.delete_opList
opList_swigregister = _simuPOP_op.opList_swigregister
opList_swigregister(opList)
class Pause(BaseOperator):
"""
Details:
This operator pauses the evolution of a simulator at given
generations or at a key stroke. When a simulator is stopped, you
can go to a Python shell to examine the status of an evolutionary
process, resume or stop the evolution.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
Pause(stopOnKeyStroke=False, prompt=True, output=">", begin=0,
end=-1, step=1, at=[], reps=ALL_AVAIL, subPops=ALL_AVAIL,
infoFields=[])
Details:
Create an operator that pause the evolution of a population when
it is applied to this population. If stopOnKeyStroke is False
(default), it will always pause a population when it is applied,
if this parameter is set to True, the operator will pause a
population if any key has been pressed. If a specific character is
set, the operator will stop when this key has been pressed. This
allows, for example, the use of several pause operators to pause
different populations. After a population has been paused, a
message will be displayed (unless prompt is set to False) and
tells you how to proceed. You can press 's' to stop the evolution
of this population, 'S' to stop the evolution of all populations,
or 'p' to enter a Python shell. The current population will be
available in this Python shell as "pop_X_Y" when X is generation
number and Y is replicate number. The evolution will continue
after you exit this interactive Python shell.
Note:
Ctrl-C will be intercepted even if a specific character is
specified in parameter stopOnKeyStroke.
"""
_simuPOP_op.Pause_swiginit(self, _simuPOP_op.new_Pause(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_Pause
Pause_swigregister = _simuPOP_op.Pause_swigregister
Pause_swigregister(Pause)
class NoneOp(BaseOperator):
"""
Details:
This operator does nothing when it is applied to a population. It
is usually used as a placeholder when an operator is needed
syntactically.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
NoneOp(output=">", begin=0, end=0, step=1, at=[],
reps=ALL_AVAIL, subPops=ALL_AVAIL, infoFields=[])
Details:
Create a NoneOp.
"""
_simuPOP_op.NoneOp_swiginit(self, _simuPOP_op.new_NoneOp(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_NoneOp
NoneOp_swigregister = _simuPOP_op.NoneOp_swigregister
NoneOp_swigregister(NoneOp)
class IfElse(BaseOperator):
"""
Details:
This operator uses a condition, which can be a fixed condition, an
expression or a user-defined function, to determine which
operators to be applied when this operator is applied. A list of
if-operators will be applied when the condition is True. Otherwise
a list of else-operators will be applied.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
IfElse(cond, ifOps=[], elseOps=[], output=">", begin=0, end=-1,
step=1, at=[], reps=ALL_AVAIL, subPops=ALL_AVAIL, infoFields=[])
Details:
Create a conditional operator that will apply operators ifOps if
condition cond is met and elseOps otherwise. If a Python
expression (a string) is given to parameter cond, the expression
will be evalulated in each population's local namespace when this
operator is applied. When a Python function is specified, it
accepts parameter pop when it is applied to a population, and one
or more parameters pop, off, dad or mom when it is applied during
mating. The return value of this function should be True or False.
Otherwise, parameter cond will be treated as a fixed condition
(converted to True or False) upon which one set of operators is
always applied. The applicability of ifOps and elseOps are
controlled by parameters begin, end, step, at and rep of both the
IfElse operator and individual operators but ifOps and elseOps
opeartors does not support negative indexes for replicate and
generation numbers.
"""
_simuPOP_op.IfElse_swiginit(self, _simuPOP_op.new_IfElse(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_IfElse
IfElse_swigregister = _simuPOP_op.IfElse_swigregister
IfElse_swigregister(IfElse)
class TerminateIf(BaseOperator):
"""
Details:
This operator evaluates an expression in a population's local
namespace and terminate the evolution of this population, or the
whole simulator, if the return value of this expression is True.
Termination caused by an operator will stop the execution of all
operators after it. The generation at which the population is
terminated will be counted in the evolved generations (return
value from Simulator::evolve) if termination happens after mating.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
TerminateIf(condition="", stopAll=False, message="", output="",
begin=0, end=-1, step=1, at=[], reps=ALL_AVAIL,
subPops=ALL_AVAIL, infoFields=[])
Details:
Create a terminator with an expression condition, which will be
evalulated in a population's local namespace when the operator is
applied to this population. If the return value of condition is
True, the evolution of the population will be terminated. If
stopAll is set to True, the evolution of all replicates of the
simulator will be terminated. If this operator is allowed to write
to an output (default to ""), the generation number, proceeded
with an optional message.
"""
_simuPOP_op.TerminateIf_swiginit(self, _simuPOP_op.new_TerminateIf(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_TerminateIf
TerminateIf_swigregister = _simuPOP_op.TerminateIf_swigregister
TerminateIf_swigregister(TerminateIf)
class RevertIf(BaseOperator):
"""
Details:
This operator replaces the current evolving population by a
population loaded from a specified filename if certain condition
is met. It is mostly used to return to a previously saved state if
the simulation process fails to met a condition (e.g. a disease
allele is lost).
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
RevertIf(cond, fromPop="", ops=[], output="", begin=0, end=-1,
step=1, at=[], reps=ALL_AVAIL, subPops=ALL_AVAIL, infoFields=[])
Details:
Replaces the current evolving population by a population loaded
from fromPop, which should be a file saved by function
Population.save() or operator SavePopulation. If a Python
expression (a string) is given to parameter cond, the expression
will be evalulated in each population's local namespace when this
operator is applied. When a Python function with optional
parameter pop is specified, it should accept the current
population (to parameter pop) and converts and return True or
False. Otherwise, parameter cond will be treated as a fixed
condition (converted to True or False) upon which the population
is reverted. After the population is reverted, an optional list of
operators ops could be applied to the population.
"""
_simuPOP_op.RevertIf_swiginit(self, _simuPOP_op.new_RevertIf(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_RevertIf
RevertIf_swigregister = _simuPOP_op.RevertIf_swigregister
RevertIf_swigregister(RevertIf)
class DiscardIf(BaseOperator):
"""
Details:
This operator discards individuals according to either an
expression that evaluates according to individual information
field, or a Python function that accepts individual and its
information fields.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
DiscardIf(cond, exposeInd="", output="", begin=0, end=-1,
step=1, at=[], reps=ALL_AVAIL, subPops=ALL_AVAIL, infoFields=[])
Details:
Create an operator that discard individuals according to an
expression or the return value of a Python function (parameter
cond). This operator can be applied to a population before or
after mating, or to offspring during mating. If an expression is
passed to cond, it will be evalulated with each individual's
information fields (see operator InfoEval for details). If
exposeInd is non-empty, individuals will be available for
evaluation in the expression as an variable with name spacied by
exposeInd. If the expression is evaluated to be True, individuals
(if applied before or after mating) or offspring (if applied
during mating) will be removed or discard. Otherwise the return
value should be either False (not discard), or a float number
between 0 and 1 as the probability that the individual is removed.
If a function is passed to cond, it should accept paramters ind
and pop or names of information fields when it is applied to a
population (pre or post mating), or parameters off, dad, mom, pop
(parental population), or names of information fields if the
operator is applied during mating. Individuals will be discarded
if this function returns True or at a probability if a float
number between 0 and 1 is returned. A constant expression (e.g.
True, False, 0.4) is also acceptable, with the last example
(cond=0.1) that removes 10% of individuals at randomly. This
operator supports parameter subPops and will remove only
individuals belonging to specified (virtual) subpopulations.
"""
_simuPOP_op.DiscardIf_swiginit(self, _simuPOP_op.new_DiscardIf(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_DiscardIf
DiscardIf_swigregister = _simuPOP_op.DiscardIf_swigregister
DiscardIf_swigregister(DiscardIf)
class TicToc(BaseOperator):
"""
Details:
This operator, when called, output the difference between current
and the last called clock time. This can be used to estimate
execution time of each generation. Similar information can also be
obtained from turnOnDebug("DBG_PROFILE"), but this operator has
the advantage of measuring the duration between several
generations by setting step parameter. As an advanced feature that
mainly used for performance testing, this operator accepts a
parameter stopAfter (seconds), and will stop the evolution of a
population if the overall time exceeds stopAfter. Note that
elapsed time is only checked when this operator is applied to a
population so it might not be able to stop the evolution process
right after stopAfter seconds. This operator can also be applied
during mating. Note that to avoid excessive time checking, this
operator does not always check system time accurately.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
TicToc(output=">", stopAfter=0, begin=0, end=-1, step=1, at=[],
reps=ALL_AVAIL, subPops=ALL_AVAIL, infoFields=[])
Details:
Create a TicToc operator that outputs the elapsed since the last
time it was applied, and the overall time since the first time
this operator is applied.
"""
_simuPOP_op.TicToc_swiginit(self, _simuPOP_op.new_TicToc(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_TicToc
TicToc_swigregister = _simuPOP_op.TicToc_swigregister
TicToc_swigregister(TicToc)
class PyOperator(BaseOperator):
"""
Details:
An operator that calls a user-defined function when it is applied
to a population (pre- or post-mating) or offsprings (during-
mating). The function can have have parameters pop when the
operator is applied pre- or post-mating, pop, off, dad, mom when
the operator is applied during-mating. An optional parameter can
be passed if parameter param is given. In the during-mating case,
parameters pop, dad and mom can be ignored if offspringOnly is set
to True.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
PyOperator(func, param=None, begin=0, end=-1, step=1, at=[],
reps=ALL_AVAIL, subPops=ALL_AVAIL, infoFields=[])
Details:
Create a pure-Python operator that calls a user-defined function
when it is applied. If this operator is applied before or after
mating, your function should have form func(pop) or func(pop,
param) where pop is the population to which the operator is
applied, param is the value specified in parameter param. param
will be ignored if your function only accepts one parameter.
Althernatively, the function should have form func(ind) with
optional parameters pop and param. In this case, the function will
be called for all individuals, or individuals in subpopulations
subPops. Individuals for which the function returns False will be
removed from the population. This operator can therefore perform
similar functions as operator DiscardIf. If this operator is
applied during mating, your function should accept parameters pop,
off (or ind), dad, mom and param where pop is the parental
population, and off or ind, dad, and mom are offspring and their
parents for each mating event, and param is an optional parameter.
If subPops are provided, only offspring in specified (virtual)
subpopulations are acceptable. This operator does not support
parameters output, and infoFields. If certain output is needed, it
should be handled in the user defined function func. Because the
status of files used by other operators through parameter output
is undetermined during evolution, they should not be open or
closed in this Python operator.
"""
_simuPOP_op.PyOperator_swiginit(self, _simuPOP_op.new_PyOperator(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_PyOperator
PyOperator_swigregister = _simuPOP_op.PyOperator_swigregister
PyOperator_swigregister(PyOperator)
def applyDuringMatingOperator(op: 'BaseOperator', pop: 'Population', offPop: 'Population', dad: 'ssize_t', mom: 'ssize_t', off: 'pairu const &') -> "void":
"""Obsolete or undocumented function."""
return _simuPOP_op.applyDuringMatingOperator(op, pop, offPop, dad, mom, off)
class OffspringGenerator(object):
"""
Details:
An offspring generator generates offspring from parents chosen by
a parent chooser. It is responsible for creating a certain number
of offspring, determinning their sex, and transmitting genotypes
from parents to offspring.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, ops: 'opList', numOffspring: 'floatListFunc'=1, sexMode: 'floatListFunc'=RANDOM_SEX):
"""
Usage:
OffspringGenerator(ops, numOffspring=1, sexMode=RANDOM_SEX)
Details:
Create a basic offspring generator. This offspring generator uses
ops genotype transmitters to transmit genotypes from parents to
offspring. A number of during-mating operators (parameter ops)
can be used to, among other possible duties such as setting
information fields of offspring, transmit genotype from parents to
offspring. This general offspring generator does not have any
default during-mating operator but all stock mating schemes use an
offspring generator with a default operator. For example, a
mendelianOffspringGenerator is used by RandomMating to trasmit
genotypes. Note that applicability parameters begin, step, end, at
and reps could be used in these operators but negative population
and generation indexes are unsupported. Parameter numOffspring is
used to control the number of offspring per mating event, or in
another word the number of offspring in each family. It can be a
number, a Python function or generator, or a mode parameter
followed by some optional arguments. If a number is given, given
number of offspring will be generated at each mating event. If a
Python function is given, it will be called each time when a
mating event happens. When a generator function is specified, it
will be called for each subpopulation to provide number of
offspring for all mating events during the populating of this
subpopulation. Current generation number will be passed to this
function or generator function if parameter "gen" is used in this
function. In the last case, a tuple (or a list) in one of the
following forms can be given:
* (GEOMETRIC_DISTRIBUTION, p)
* (POISSON_DISTRIBUTION, p), p > 0
* (BINOMIAL_DISTRIBUTION, p, N), 0 < p <=1, N > 0
* (UNIFORM_DISTRIBUTION, a, b), 0 <= a <= b. In this case, the
number of offspring will be determined randomly following the
specified statistical distributions. Because families with zero
offspring are silently ignored, the distribution of the observed
number of offspring per mating event (excluding zero) follows
zero-truncated versions of these distributions. Parameter
numOffspring specifies the number of offspring per mating event
but the actual surviving offspring can be less than specified.
More spefically, if any during-mating operator returns False, an
offspring will be discarded so the actually number of offspring of
a mating event will be reduced. This is essentially how during-
mating selector works. Parameter sexMode is used to control the
sex of each offspring. Its default value is usually RANDOM_SEX
which assign MALE or FEMALE to each individual randomly, with
equal probabilities. If NO_SEX is given, offspring sex will not be
changed. sexMode can also be one of
* (PROB_OF_MALES, p) where p is the probability of male for each
offspring,
* (NUM_OF_MALES, n) where n is the number of males in a mating
event. If n is greater than or equal to the number of offspring in
this family, all offspring in this family will be MALE.
* (NUM_OF_FEMALES, n) where n is the number of females in a
mating event,
* (SEQUENCE_OF_SEX, s1, s2 ...) where s1, s2 etc are MALE or
FEMALE. The sequence will be used for each mating event. It will
be reused if the number of offspring in a mating event is greater
than the length of sequence.
* (GLOBAL_SEQUENCE_OF_SEX, s1, s2, ...) where s1, s2 etc are
MALE or FEMALE. The sequence will be used across mating events. It
will be reused if the number of offspring in a subpopulation is
greater than the length of sequence. Finally, parameter sexMode
accepts a function or a generator function. A function will be
called whenever an offspring is produced. A generator will be
created at each subpopulation and will be used to produce sex for
all offspring in this subpopulation. No parameter is accepted.
"""
_simuPOP_op.OffspringGenerator_swiginit(self, _simuPOP_op.new_OffspringGenerator(ops, numOffspring, sexMode))
__swig_destroy__ = _simuPOP_op.delete_OffspringGenerator
def clone(self) -> "simuPOP::OffspringGenerator *":
"""Obsolete or undocumented function."""
return _simuPOP_op.OffspringGenerator_clone(self)
def describe(self, format: 'bool'=True) -> "string":
"""Obsolete or undocumented function."""
return _simuPOP_op.OffspringGenerator_describe(self, format)
OffspringGenerator.clone = new_instancemethod(_simuPOP_op.OffspringGenerator_clone, None, OffspringGenerator)
OffspringGenerator.describe = new_instancemethod(_simuPOP_op.OffspringGenerator_describe, None, OffspringGenerator)
OffspringGenerator_swigregister = _simuPOP_op.OffspringGenerator_swigregister
OffspringGenerator_swigregister(OffspringGenerator)
class ControlledOffspringGenerator(OffspringGenerator):
"""
Details:
This offspring generator populates an offspring population and
controls allele frequencies at specified loci. At each generation,
expected allele frequencies at these loci are passed from a user
defined allele frequency trajectory function. The offspring
population is populated in two steps. At the first step, only
families with disease alleles are accepted until until the
expected number of disease alleles are met. At the second step,
only families with wide type alleles are accepted to populate the
rest of the offspring generation. This method is described in
detail in "Peng et al, (2007) PLoS Genetics".
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
ControlledOffspringGenerator(loci, alleles, freqFunc, ops=[],
numOffspring=1, sexMode=RANDOM_SEX)
Details:
Create an offspring generator that selects offspring so that
allele frequency at specified loci in the offspring generation
reaches specified allele frequency. At the beginning of each
generation, expected allele frequency of alleles at loci is
returned from a user-defined trajectory function freqFunc.
Parameter loci can be a list of loci indexes, names, or ALL_AVAIL.
If there is no subpopulation, this function should return a list
of frequencies for each locus. If there are multiple
subpopulations, freqFunc can return a list of allele frequencies
for all subpopulations or combined frequencies that ignore
population structure. In the former case, allele frequencies
should be arranged by loc0_sp0, loc1_sp0, ... loc0_sp1, loc1_sp1,
..., and so on. In the latter case, overall expected number of
alleles are scattered to each subpopulation in proportion to
existing number of alleles in each subpopulation, using a
multinomial distribution. After the expected alleles are
calculated, this offspring generator accept and reject families
according to their genotype at loci until allele frequecies reach
their expected values. The rest of the offspring generation is
then filled with families without only wild type alleles at these
loci. This offspring generator is derived from class
OffspringGenerator. Please refer to class OffspringGenerator for a
detailed description of parameters ops, numOffspring and sexMode.
"""
_simuPOP_op.ControlledOffspringGenerator_swiginit(self, _simuPOP_op.new_ControlledOffspringGenerator(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_ControlledOffspringGenerator
ControlledOffspringGenerator_swigregister = _simuPOP_op.ControlledOffspringGenerator_swigregister
ControlledOffspringGenerator_swigregister(ControlledOffspringGenerator)
class ParentChooser(object):
"""
Details:
A parent chooser repeatedly chooses parent(s) from a parental
population and pass them to an offspring generator. A parent
chooser can select one or two parents, which should be matched by
the offspring generator. This class is the base class of all
parent choosers, and should not be used directly.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
ParentChooser(selectionField="")
"""
_simuPOP_op.ParentChooser_swiginit(self, _simuPOP_op.new_ParentChooser(*args, **kwargs))
def clone(self) -> "simuPOP::ParentChooser *":
"""Obsolete or undocumented function."""
return _simuPOP_op.ParentChooser_clone(self)
def initialize(self, pop: 'Population', subPop: 'size_t') -> "void":
"""
Description:
Initialize a parent chooser for subpopulation subPop of population
pop.
Usage:
x.initialize(pop, subPop)
"""
return _simuPOP_op.ParentChooser_initialize(self, pop, subPop)
def describe(self, format: 'bool'=True) -> "string":
"""Obsolete or undocumented function."""
return _simuPOP_op.ParentChooser_describe(self, format)
def chooseParents(self) -> "simuPOP::ParentChooser::IndividualPair":
"""
Description:
Return chosen parents from a population if the parent chooser
object is created with a population.
Usage:
x.chooseParents()
"""
return _simuPOP_op.ParentChooser_chooseParents(self)
__swig_destroy__ = _simuPOP_op.delete_ParentChooser
ParentChooser.clone = new_instancemethod(_simuPOP_op.ParentChooser_clone, None, ParentChooser)
ParentChooser.initialize = new_instancemethod(_simuPOP_op.ParentChooser_initialize, None, ParentChooser)
ParentChooser.describe = new_instancemethod(_simuPOP_op.ParentChooser_describe, None, ParentChooser)
ParentChooser.chooseParents = new_instancemethod(_simuPOP_op.ParentChooser_chooseParents, None, ParentChooser)
ParentChooser_swigregister = _simuPOP_op.ParentChooser_swigregister
ParentChooser_swigregister(ParentChooser)
class SequentialParentChooser(ParentChooser):
"""
Details:
This parent chooser chooses a parent from a parental (virtual)
subpopulation sequentially. Natural selection is not considered.
If the last parent is reached, this parent chooser will restart
from the beginning of the (virtual) subpopulation.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, sexChoice: 'SexChoice'=ANY_SEX):
"""
Usage:
SequentialParentChooser(sexChoice=ANY_SEX)
Details:
Create a parent chooser that chooses a parent from a parental
(virtual) subpopulation sequentially. Parameter choice can be
ANY_SEX (default), MALE_ONLY and FEMALE_ONLY. In the latter two
cases, only male or female individuals are selected. A
RuntimeError will be raised if there is no male or female
individual from the population.
"""
_simuPOP_op.SequentialParentChooser_swiginit(self, _simuPOP_op.new_SequentialParentChooser(sexChoice))
__swig_destroy__ = _simuPOP_op.delete_SequentialParentChooser
SequentialParentChooser_swigregister = _simuPOP_op.SequentialParentChooser_swigregister
SequentialParentChooser_swigregister(SequentialParentChooser)
class RandomParentChooser(ParentChooser):
"""
Details:
This parent chooser chooses a parent randomly from a (virtual)
parental subpopulation. Parents are chosen with or without
replacement. If parents are chosen with replacement, a parent can
be selected multiple times. If individual fitness values are
assigned to individuals ( stored in an information field
selectionField (default to "fitness"), individuals will be chosen
at a probability proportional to his or her fitness value. If
parents are chosen without replacement, a parent can be chosen
only once. An RuntimeError will be raised if all parents are
exhausted. Natural selection is disabled in the without-
replacement case.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
RandomParentChooser(replacement=True, selectionField="fitness",
sexChoice=ANY_SEX)
Details:
Create a random parent chooser that choose parents with or without
replacement (parameter replacement, default to True). If selection
is enabled and information field selectionField exists in the
passed population, the probability that a parent is chosen is
proportional to his/her fitness value stored in selectionField.
This parent chooser by default chooses parent from all individuals
(ANY_SEX), but it can be made to select only male (MALE_ONLY) or
female (FEMALE_ONLY) individuals by setting parameter sexChoice.
"""
_simuPOP_op.RandomParentChooser_swiginit(self, _simuPOP_op.new_RandomParentChooser(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_RandomParentChooser
RandomParentChooser_swigregister = _simuPOP_op.RandomParentChooser_swigregister
RandomParentChooser_swigregister(RandomParentChooser)
class RandomParentsChooser(ParentChooser):
"""
Details:
This parent chooser chooses two parents, a male and a female,
randomly from a (virtual) parental subpopulation. Parents are
chosen with or without replacement from their respective sex
group. If parents are chosen with replacement, a parent can be
selected multiple times. If individual fitness values are assigned
(stored in information field selectionField, default to "fitness",
the probability that an individual is chosen is proportional to
his/her fitness value among all individuals with the same sex. If
parents are chosen without replacement, a parent can be chosen
only once. An RuntimeError will be raised if all males or females
are exhausted. Natural selection is disabled in the without-
replacement case.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
RandomParentsChooser(replacement=True, selectionField="fitness")
Details:
Create a random parents chooser that choose two parents with or
without replacement (parameter replacement, default to True). If
selection is enabled and information field selectionField exists
in the passed population, the probability that a parent is chosen
is proportional to his/her fitness value stored in selectionField.
"""
_simuPOP_op.RandomParentsChooser_swiginit(self, _simuPOP_op.new_RandomParentsChooser(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_RandomParentsChooser
RandomParentsChooser_swigregister = _simuPOP_op.RandomParentsChooser_swigregister
RandomParentsChooser_swigregister(RandomParentsChooser)
class PolyParentsChooser(ParentChooser):
"""
Details:
This parent chooser is similar to random parents chooser but
instead of selecting a new pair of parents each time, one of the
parents in this parent chooser will mate with several spouses
before he/she is replaced. This mimicks multi-spouse mating
schemes such as polygyny or polyandry in some populations. Natural
selection is supported for both sexes.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
PolyParentsChooser(polySex=MALE, polyNum=1,
selectionField="fitness")
Details:
Create a multi-spouse parents chooser where each father (if
polySex is MALE) or mother (if polySex is FEMALE) has polyNum
spouses. The parents are chosen with replacement. If individual
fitness values are assigned (stored to information field
selectionField, default to "fitness"), the probability that an
individual is chosen is proportional to his/her fitness value
among all individuals with the same sex.
"""
_simuPOP_op.PolyParentsChooser_swiginit(self, _simuPOP_op.new_PolyParentsChooser(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_PolyParentsChooser
PolyParentsChooser_swigregister = _simuPOP_op.PolyParentsChooser_swigregister
PolyParentsChooser_swigregister(PolyParentsChooser)
class CombinedParentsChooser(ParentChooser):
"""
Details:
This parent chooser accepts two parent choosers. It takes one
parent from each parent chooser and return them as father and
mother. Because two parent choosers do not have to choose parents
from the same virtual subpopulation, this parent chooser allows
you to choose parents from different subpopulations.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, fatherChooser: 'ParentChooser', motherChooser: 'ParentChooser', allowSelfing: 'bool'=True):
"""
Usage:
CombinedParentsChooser(fatherChooser, motherChooser,
allowSelfing=True)
Details:
Create a Python parent chooser using two parent choosers
fatherChooser and motherChooser. It takes one parent from each
parent chooser and return them as father and mother. If two valid
parents are returned, the first valid parent (father) will be used
for fatherChooser, the second valid parent (mother) will be used
for motherChooser. Although these two parent choosers are supposed
to return a father and a mother respectively, the sex of returned
parents are not checked so it is possible to return parents with
the same sex using this parents chooser. This choose by default
allows the selection of the same parents as father and mother
(self-fertilization), unless a parameter allowSelfing is used to
disable it.
"""
_simuPOP_op.CombinedParentsChooser_swiginit(self, _simuPOP_op.new_CombinedParentsChooser(fatherChooser, motherChooser, allowSelfing))
__swig_destroy__ = _simuPOP_op.delete_CombinedParentsChooser
CombinedParentsChooser_swigregister = _simuPOP_op.CombinedParentsChooser_swigregister
CombinedParentsChooser_swigregister(CombinedParentsChooser)
class PyParentsChooser(ParentChooser):
"""
Details:
This parent chooser accepts a Python generator function that
repeatedly yields one or two parents, which can be references to
individual objects or indexes relative to each subpopulation. The
parent chooser calls the generator function with parental
population and a subpopulation index for each subpopulation and
retrieves parents repeatedly using the iterator interface of the
generator function. This parent chooser does not support virtual
subpopulation directly. However, because virtual subpopulations
are defined in the passed parental population, it is easy to
return parents from a particular virtual subpopulation using
virtual subpopulation related functions.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, generator: 'PyObject *'):
"""
Usage:
PyParentsChooser(generator)
Details:
Create a Python parent chooser using a Python generator function
parentsGenerator. This function should accept one or both of
parameters pop (the parental population) and subPop (index of
subpopulation) and return the reference or index (relative to
subpopulation) of a parent or a pair of parents repeatedly using
the iterator interface of the generator function.
"""
_simuPOP_op.PyParentsChooser_swiginit(self, _simuPOP_op.new_PyParentsChooser(generator))
__swig_destroy__ = _simuPOP_op.delete_PyParentsChooser
PyParentsChooser_swigregister = _simuPOP_op.PyParentsChooser_swigregister
PyParentsChooser_swigregister(PyParentsChooser)
class MatingScheme(object):
"""
Details:
This mating scheme is the base class of all mating schemes. It
evolves a population generation by generation but does not
actually transmit genotype.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
MatingScheme(subPopSize=[])
Details:
Create a base mating scheme that evolves a population without
transmitting genotypes. At each generation, this mating scheme
creates an offspring generation according to parameter subPopSize,
which can be a list of subpopulation sizes (or a number if there
is only one subpopulation) or a Python function which will be
called at each generation, just before mating, to determine the
subpopulation sizes of the offspring generation. The function
should be defined with one or both parameters of gen and pop where
gen is the current generation number and pop is the parental
population just before mating. The return value of this function
should be a list of subpopulation sizes for the offspring
generation. A single number can be returned if there is only one
subpopulation. The passed parental population is usually used to
determine offspring population size from parental population size
but you can also modify this population to prepare for mating. A
common practice is to split and merge parental populations in this
function so that you demographic related information and actions
could be implemented in the same function.
"""
_simuPOP_op.MatingScheme_swiginit(self, _simuPOP_op.new_MatingScheme(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_MatingScheme
def clone(self) -> "simuPOP::MatingScheme *":
"""Obsolete or undocumented function."""
return _simuPOP_op.MatingScheme_clone(self)
def describe(self, format: 'bool'=True) -> "string":
"""Obsolete or undocumented function."""
return _simuPOP_op.MatingScheme_describe(self, format)
MatingScheme.clone = new_instancemethod(_simuPOP_op.MatingScheme_clone, None, MatingScheme)
MatingScheme.describe = new_instancemethod(_simuPOP_op.MatingScheme_describe, None, MatingScheme)
MatingScheme_swigregister = _simuPOP_op.MatingScheme_swigregister
MatingScheme_swigregister(MatingScheme)
class HomoMating(MatingScheme):
"""
Details:
A homogeneous mating scheme that uses a parent chooser to choose
parents from a prental generation, and an offspring generator to
generate offspring from chosen parents. It can be either used
directly, or within a heterogeneous mating scheme. In the latter
case, it can be applied to a (virtual) subpopulation.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
HomoMating(chooser, generator, subPopSize=[], subPops=ALL_AVAIL,
weight=0)
Details:
Create a homogeneous mating scheme using a parent chooser chooser
and an offspring generator generator. If this mating scheme is
used directly in a simulator, it will be responsible for creating
an offspring population according to parameter subPopSize. This
parameter can be a list of subpopulation sizes (or a number if
there is only one subpopulation) or a Python function which will
be called at each generation to determine the subpopulation sizes
of the offspring generation. Please refer to class MatingScheme
for details about this parameter. If this mating shcme is used
within a heterogeneous mating scheme. Parameters subPops and
weight are used to determine which (virtual) subpopulations this
mating scheme will be applied to, and how many offspring this
mating scheme will produce. Please refer to mating scheme
HeteroMating for the use of these two parameters.
"""
_simuPOP_op.HomoMating_swiginit(self, _simuPOP_op.new_HomoMating(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_HomoMating
HomoMating_swigregister = _simuPOP_op.HomoMating_swigregister
HomoMating_swigregister(HomoMating)
class PedigreeMating(MatingScheme):
"""
Details:
This mating scheme evolves a population following an existing
pedigree structure. If the Pedigree object has N ancestral
generations and a present generation, it can be used to evolve a
population for N generations, starting from the topmost ancestral
generation. At the k-th generation, this mating scheme produces an
offspring generation according to subpopulation structure of the
N-k-1 ancestral generation in the pedigree object (e.g. producing
the offspring population of generation 0 according to the N-1
ancestral generation of the pedigree object ). For each offspring,
this mating scheme copies individual ID and sex from the
corresponing individual in the pedigree object. It then locates
the parents of each offspring using their IDs in the pedigree
object. A list of during mating operators are then used to
transmit parental genotype to the offspring. The population being
evolved must have an information field 'ind_id'.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
PedigreeMating(ped, ops, idField="ind_id")
Details:
Creates a pedigree mating scheme that evolves a population
according to Pedigree object ped. The evolved population should
contain individuals with ID (at information field idField, default
to 'ind_id') that match those individual in the topmost ancestral
generation who have offspring. After parents of each individuals
are determined from their IDs, a list of during-mating operators
ops are applied to transmit genotypes. The return value of these
operators are not checked.
"""
_simuPOP_op.PedigreeMating_swiginit(self, _simuPOP_op.new_PedigreeMating(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_PedigreeMating
def parallelizable(self) -> "bool":
"""
Usage:
x.parallelizable()
"""
return _simuPOP_op.PedigreeMating_parallelizable(self)
PedigreeMating.parallelizable = new_instancemethod(_simuPOP_op.PedigreeMating_parallelizable, None, PedigreeMating)
PedigreeMating_swigregister = _simuPOP_op.PedigreeMating_swigregister
PedigreeMating_swigregister(PedigreeMating)
class HeteroMating(MatingScheme):
"""
Details:
A heterogeneous mating scheme that applies a list of homogeneous
mating schemes to different (virtual) subpopulations.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
HeteroMating(matingSchemes, subPopSize=[],
shuffleOffspring=True, weightBy=ANY_SEX)
Details:
Create a heterogeneous mating scheme that will apply a list of
homogeneous mating schemes matingSchemes to different (virtual)
subpopulations. The size of the offspring generation is determined
by parameter subPopSize, which can be a list of subpopulation
sizes or a Python function that returns a list of subpopulation
sizes at each generation. Please refer to class MatingScheme for a
detailed explanation of this parameter. Each mating scheme
defined in matingSchemes can be applied to one or more (virtual)
subpopulation. If parameter subPops is not specified, a mating
scheme will be applied to all subpopulations. If a list of
(virtual) subpopulation is specified, the mating scheme will be
applied to specific (virtual) subpopulations. If multiple mating
schemes are applied to the same subpopulation, a weight (parameter
weight) can be given to each mating scheme to determine how many
offspring it will produce. The default for all mating schemes are
0. In this case, the number of offspring each mating scheme
produces is proportional to the number of individuals in its
parental (virtual) subpopulation (default to all parents, but can
be father for weightBy=MALE_ONLY, mother for weightBy=FEMALE_ONLY,
or father mother pairs (less of number of father and mothers) for
weightBy=PAIR_ONLY). If all weights are negative, the numbers of
offspring are determined by the multiplication of the absolute
values of the weights and their respective parental (virtual)
subpopulation sizes. If all weights are positive, the number of
offspring produced by each mating scheme is proportional to these
weights. Mating schemes with zero weight in this case will produce
no offspring. If both negative and positive weights are present,
negative weights are processed before positive ones. A sexual
mating scheme might fail if a parental (virtual) subpopulation has
no father or mother. In this case, you can set weightBy to
PAIR_ONLY so a (virtual) subpopulation will appear to have zero
size, and will thus contribute no offspring to the offspring
population. Note that the size of parental (virtual) subpopulation
in this mode (and in the modes of MALE_ONLY, FEMALE_ONLY) during
the calculation of the size of the offspring subpopulation will be
roughly half of the actual population size so you might have to
use weight=-2 if you would like to have an offspring subpopulation
that is roughly the same size of the parental (virtual)
subpopulation. If multiple mating schemes are applied to the same
subpopulation, offspring produced by these mating schemes are
shuffled randomly. If this is not desired, you can turn off
offspring shuffling by setting parameter shuffleOffspring to
False.
"""
_simuPOP_op.HeteroMating_swiginit(self, _simuPOP_op.new_HeteroMating(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_HeteroMating
HeteroMating_swigregister = _simuPOP_op.HeteroMating_swigregister
HeteroMating_swigregister(HeteroMating)
class ConditionalMating(MatingScheme):
"""
Details:
A conditional mating scheme that applies different mating schemes
according to a condition (similar to operator IfElse). The
condition can be a fixed condition, an expression or a user-
defined function, to determine which mating scheme to be used.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, cond: 'PyObject *', ifMatingScheme: 'MatingScheme', elseMatingScheme: 'MatingScheme'):
"""
Usage:
ConditionalMating(cond, ifMatingScheme, elseMatingScheme)
Details:
Create a conditional mating scheme that applies mating scheme
ifMatingScheme if the condition cond is True, or elseMatingScheme
if cond is False. If a Python expression (a string) is given to
parameter cond, the expression will be evalulated in parental
population's local namespace. When a Python function is specified,
it accepts parameter pop for the parental population. The return
value of this function should be True or False. Otherwise,
parameter cond will be treated as a fixed condition (converted to
True or False) upon which ifMatingScheme or elseMatingScheme will
alway be applied.
"""
_simuPOP_op.ConditionalMating_swiginit(self, _simuPOP_op.new_ConditionalMating(cond, ifMatingScheme, elseMatingScheme))
__swig_destroy__ = _simuPOP_op.delete_ConditionalMating
ConditionalMating_swigregister = _simuPOP_op.ConditionalMating_swigregister
ConditionalMating_swigregister(ConditionalMating)
class pyPopIterator(object):
"""
Details:
This class implements a Python itertor class that can be used to
iterate through populations in a population.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, begin: 'vector< simuPOP::Population *,std::allocator< simuPOP::Population * > >::iterator const', end: 'vector< simuPOP::Population *,std::allocator< simuPOP::Population * > >::iterator const'):
"""
Usage:
pyPopIterator(begin, end)
"""
_simuPOP_op.pyPopIterator_swiginit(self, _simuPOP_op.new_pyPopIterator(begin, end))
__swig_destroy__ = _simuPOP_op.delete_pyPopIterator
def next(self) -> "simuPOP::Population &":
"""
Usage:
x.__next__()
"""
return _simuPOP_op.pyPopIterator_next(self)
pyPopIterator.__iter__ = new_instancemethod(_simuPOP_op.pyPopIterator___iter__, None, pyPopIterator)
pyPopIterator.next = new_instancemethod(_simuPOP_op.pyPopIterator_next, None, pyPopIterator)
pyPopIterator.__next__ = new_instancemethod(_simuPOP_op.pyPopIterator___next__, None, pyPopIterator)
pyPopIterator_swigregister = _simuPOP_op.pyPopIterator_swigregister
pyPopIterator_swigregister(pyPopIterator)
class Simulator(object):
"""
Details:
A simuPOP simulator is responsible for evolving one or more
populations forward in time, subject to various operators.
Populations in a simulator are created from one or more replicates
of specified populations. A number of functions are provided to
access and manipulate populations, and most importantly, to evolve
them.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, pops: 'PyObject *', rep: 'UINT'=1, stealPops: 'bool'=True):
"""
Usage:
Simulator(pops, rep=1, stealPops=True)
Details:
Create a simulator with rep (default to 1) replicates of
populations pops, which is a list of populations although a single
population object is also acceptable. Contents of passed
populations are by default moved to the simulator to avoid
duplication of potentially large population objects, leaving empty
populations behind. This behavior can be changed by setting
stealPops to False, in which case populations are copied to the
simulator.
"""
_simuPOP_op.Simulator_swiginit(self, _simuPOP_op.new_Simulator(pops, rep, stealPops))
__swig_destroy__ = _simuPOP_op.delete_Simulator
def clone(self) -> "simuPOP::Simulator *":
"""
Usage:
x.clone()
Details:
Clone a simulator, along with all its populations. Note that
Python assign statement simu1 = simu only creates a symbolic link
to an existing simulator.
"""
return _simuPOP_op.Simulator_clone(self)
def numRep(self) -> "size_t":
"""
Usage:
x.numRep()
Details:
Return the number of replicates.
"""
return _simuPOP_op.Simulator_numRep(self)
def population(self, rep: 'size_t') -> "simuPOP::Population &":
"""
Usage:
x.population(rep)
Details:
Return a reference to the rep-th population of a simulator. The
reference will become invalid once the simulator starts evolving
or becomes invalid (removed). If an independent copy of the
population is needed, you can use population.clone() to create a
cloned copy or simulator.extract() to remove the population from
the simulator.
"""
return _simuPOP_op.Simulator_population(self, rep)
def add(self, pop: 'Population', stealPop: 'bool'=True) -> "void":
"""
Usage:
x.add(pop, stealPop=True)
Details:
Add a population pop to the end of an existing simulator. This
function by default moves pop to the simulator, leaving an empty
population for passed population object. If steal is set to False,
the population will be copied to the simulator, and thus
unchanged.
"""
return _simuPOP_op.Simulator_add(self, pop, stealPop)
def extract(self, rep: 'UINT') -> "simuPOP::Population &":
"""
Usage:
x.extract(rep)
Details:
Extract the rep-th population from a simulator. This will reduce
the number of populations in this simulator by one.
"""
return _simuPOP_op.Simulator_extract(self, rep)
def populations(self) -> "simuPOP::pyPopIterator":
"""
Usage:
x.populations()
Details:
Return a Python iterator that can be used to iterate through all
populations in a simulator.
"""
return _simuPOP_op.Simulator_populations(self)
def evolve(self, *args, **kwargs) -> "vectoru":
"""
Usage:
x.evolve(initOps=[], preOps=[], matingScheme=MatingScheme,
postOps=[], finalOps=[], gen=-1, dryrun=False)
Details:
Evolve all populations gen generations, subject to several lists
of operators which are applied at different stages of an
evolutionary process. Operators initOps are applied to all
populations (subject to applicability restrictions of the
operators, imposed by the rep parameter of these operators) before
evolution. They are used to initialize populations before
evolution. Operators finalOps are applied to all populations after
the evolution. Operators preOps, and postOps are applied during
the life cycle of each generation. These operators can be applied
at all or some of the generations, to all or some of the evolving
populations, depending the begin, end, step, at and reps
parameters of these operators. These operators are applied in the
order at which they are specified. populations in a simulator are
evolved one by one. At each generation, operators preOps are
applied to the parental generations. A mating scheme is then used
to populate an offspring generation. For each offspring, his or
her sex is determined before during-mating operators of the mating
scheme are used to transmit parental genotypes. After an offspring
generation is successfully generated and becomes the current
generation, operators postOps are applied to the offspring
generation. If any of the preOps and postOps fails (return False),
the evolution of a population will be stopped. The generation
number of a population, which is the variable "gen" in each
populations local namespace, is increased by one if an offspring
generation has been successfully populated even if a post-mating
operator fails. Another variable "rep" will also be set to
indicate the index of each population in the simulator. Note that
populations in a simulator does not have to have the same
generation number. You could reset a population's generation
number by changing this variable. Parameter gen can be set to a
non-negative number, which is the number of generations to evolve.
If a simulator starts at the beginning of a generation g (for
example 0), a simulator will stop at the beginning (instead of the
end) of generation g + gen (for example gen). If gen is negative
(default), the evolution will continue indefinitely, until all
replicates are stopped by operators that return False at some
point (these operators are called terminators). At the end of the
evolution, the generations that each replicates have evolved are
returned. Note that finalOps are applied to all applicable
population, including those that have stopped before others. If
parameter dryrun is set to True, this function will print a
description of the evolutionary process generated by function
describeEvolProcess() and exits.
"""
return _simuPOP_op.Simulator_evolve(self, *args, **kwargs)
def vars(self, *args, **kwargs) -> "PyObject *":
"""
Usage:
x.vars(rep, subPop=[])
Details:
Return the local namespace of the rep-th population, equivalent to
x.Population(rep).vars(subPop).
"""
return _simuPOP_op.Simulator_vars(self, *args, **kwargs)
Simulator.clone = new_instancemethod(_simuPOP_op.Simulator_clone, None, Simulator)
Simulator.numRep = new_instancemethod(_simuPOP_op.Simulator_numRep, None, Simulator)
Simulator.population = new_instancemethod(_simuPOP_op.Simulator_population, None, Simulator)
Simulator.add = new_instancemethod(_simuPOP_op.Simulator_add, None, Simulator)
Simulator.extract = new_instancemethod(_simuPOP_op.Simulator_extract, None, Simulator)
Simulator.populations = new_instancemethod(_simuPOP_op.Simulator_populations, None, Simulator)
Simulator.evolve = new_instancemethod(_simuPOP_op.Simulator_evolve, None, Simulator)
Simulator.vars = new_instancemethod(_simuPOP_op.Simulator_vars, None, Simulator)
Simulator.__cmp__ = new_instancemethod(_simuPOP_op.Simulator___cmp__, None, Simulator)
Simulator_swigregister = _simuPOP_op.Simulator_swigregister
Simulator_swigregister(Simulator)
def describeEvolProcess(*args, **kwargs) -> "string":
"""
Usage:
describeEvolProcess(initOps=[], preOps=[],
matingScheme=MatingScheme, postOps=[], finalOps=[], gen=-1,
numRep=1)
Details:
This function takes the same parameters as Simulator.evolve and
output a description of how an evolutionary process will be
executed. It is recommended that you call this function if you
have any doubt how your simulation will proceed.
"""
return _simuPOP_op.describeEvolProcess(*args, **kwargs)
class PyEval(BaseOperator):
"""
Details:
A PyEval operator evaluates a Python expression in a population's
local namespace when it is applied to this population. The result
is written to an output specified by parameter output.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
PyEval(expr="", stmts="", exposePop="", output=">", begin=0,
end=-1, step=1, at=[], reps=ALL_AVAIL, subPops=Py_False,
infoFields=[])
Details:
Create a PyEval operator that evaluates a Python expression expr
in a population's local namespaces when it is applied to this
population. This namespace can either be the population's local
namespace (pop.vars()), or namespaces subPop[sp] for (virtual)
subpop (pop.vars(subpop)) in specified subPops. If Python
statements stmts is given (a single or multi-line string), the
statement will be executed before expr. If exposePop is set to an
non-empty string, the current population will be exposed in its
own local namespace as a variable with this name. This allows the
execution of expressions such as 'pop.individual(0).allele(0)'.
The result of expr will be sent to an output stream specified by
parameter output. The exposed population variable will be removed
after expr is evaluated. Please refer to class BaseOperator for
other parameters.
Note:
Although the statements and expressions are evaluated in a
population's local namespace, they have access to a global
namespace which is the module global namespace. It is therefore
possible to refer to any module variable in these expressions.
Such mixed use of local and global variables is, however, strongly
discouraged.
"""
_simuPOP_op.PyEval_swiginit(self, _simuPOP_op.new_PyEval(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_PyEval
def evaluate(self, pop: 'Population') -> "string":
"""Obsolete or undocumented function."""
return _simuPOP_op.PyEval_evaluate(self, pop)
PyEval.evaluate = new_instancemethod(_simuPOP_op.PyEval_evaluate, None, PyEval)
PyEval_swigregister = _simuPOP_op.PyEval_swigregister
PyEval_swigregister(PyEval)
class PyExec(PyEval):
"""
Details:
This operator executes given Python statements in a population's
local namespace when it is applied to this population.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
PyExec(stmts="", exposePop="", output=">", begin=0, end=-1,
step=1, at=[], reps=ALL_AVAIL, subPops=Py_False, infoFields=[])
Details:
Create a PyExec operator that executes statements stmts in a
population's local namespace when it is applied to this
population. This namespace can either be the population's local
namespace (pop.vars()), or namespaces subPop[sp] for each
(virtual) subpop (pop.vars(subpop)) in specified subPops. If
exposePop is given, current population will be exposed in its
local namespace as a variable named by exposePop. Although
multiple statements can be executed, it is recommended that you
use this operator to execute short statements and use PyOperator
for more complex once. Note that exposed population variables will
be removed after the statements are executed.
"""
_simuPOP_op.PyExec_swiginit(self, _simuPOP_op.new_PyExec(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_PyExec
PyExec_swigregister = _simuPOP_op.PyExec_swigregister
PyExec_swigregister(PyExec)
class InfoEval(BaseOperator):
"""
Details:
Unlike operator PyEval and PyExec that work at the population
level, in a population's local namespace, operator InfoEval works
at the individual level, working with individual information
fields. When this operator is applied to a population, information
fields of eligible individuals are put into the local namespace of
the population. A Python expression is then evaluated for each
individual. The result is written to an output.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
InfoEval(expr="", stmts="", usePopVars=False, exposeInd="",
output=">", begin=0, end=-1, step=1, at=[], reps=ALL_AVAIL,
subPops=ALL_AVAIL, infoFields=[])
Details:
Create an operator that evaluate a Python expression expr using
individual information fields and population variables as
variables. If exposeInd is not empty, the individual itself will
be exposed in the population's local namespace as a variable with
name specified by exposeInd. A Python expression (expr) is
evaluated for each individual. The results are converted to
strings and are written to an output specified by parameter
output. Optionally, a statement (or several statements separated
by newline) can be executed before expr is evaluated. The
evaluation of this statement may change the value of information
fields. Parameter usePopVars is obsolete because population
variables are always usable in such expressions.
"""
_simuPOP_op.InfoEval_swiginit(self, _simuPOP_op.new_InfoEval(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_InfoEval
InfoEval_swigregister = _simuPOP_op.InfoEval_swigregister
InfoEval_swigregister(InfoEval)
class InfoExec(InfoEval):
"""
Details:
Operator InfoExec is similar to InfoEval in that it works at the
individual level, using individual information fields as
variables. This is usually used to change the value of information
fields. For example, "b=a*2" will set the value of information
field b to a*a for all individuals.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
InfoExec(stmts="", usePopVars=False, exposeInd="", output="",
begin=0, end=-1, step=1, at=[], reps=ALL_AVAIL,
subPops=ALL_AVAIL, infoFields=[])
Details:
Create an operator that executes Python statements stmts using
individual information fields and population variables as
variables. If exposeInd is not empty, the individual itself will
be exposed in the population's local namespace as a variable with
name specified by exposeInd. One or more python statements
(stmts) are executed for each individual. Information fields of
these individuals are then updated from the corresponding
variables. For example, a=1 will set information field a of all
individuals to 1, a=b will set information field a of all
individuals to information field b or a population variable b if b
is not an information field but a population variable, and
a=ind.sex() will set information field a of all individuals to its
sex (needs exposeInd='ind'. Parameter usePopVars is obsolete
because population variables will always be usable.
"""
_simuPOP_op.InfoExec_swiginit(self, _simuPOP_op.new_InfoExec(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_InfoExec
InfoExec_swigregister = _simuPOP_op.InfoExec_swigregister
InfoExec_swigregister(InfoExec)
class Stat(BaseOperator):
"""
Details:
Operator Stat calculates various statistics of the population
being applied and sets variables in its local namespace. Other
operators or functions can retrieve results from or evalulate
expressions in this local namespace after Stat is applied.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
Stat(popSize=False, numOfMales=False, numOfAffected=False,
numOfSegSites=[], numOfMutants=[], alleleFreq=[], heteroFreq=[],
homoFreq=[], genoFreq=[], haploFreq=[], haploHeteroFreq=[],
haploHomoFreq=[], sumOfInfo=[], meanOfInfo=[], varOfInfo=[],
maxOfInfo=[], minOfInfo=[], LD=[], association=[],
neutrality=[], structure=[], HWE=[], inbreeding=[],
effectiveSize=[], vars=ALL_AVAIL, suffix="", output="", begin=0,
end=-1, step=1, at=[], reps=ALL_AVAIL, subPops=ALL_AVAIL,
infoFields=[])
Details:
Create a Stat operator that calculates specified statistics of a
population when it is applied to this population. This operator
can be applied to specified replicates (parameter rep) at
specified generations (parameter begin, end, step, and at). This
operator does not produce any output (ignore parameter output)
after statistics are calculated. Instead, it stores results in the
local namespace of the population being applied. Other operators
can retrieve these variables or evalulate expression directly in
this local namespace. Please refer to operator BaseOperator for a
detailed explanation of these common operator parameters. Stat
supports parameter subPops. It usually calculate the same set of
statistics for all subpopulations (subPops=subPopList()). If a
list of (virtual) subpopulations are specified, statistics for
only specified subpopulations will be calculated. However,
different statistics treat this parameter differently and it is
very important to check its reference before you use subPops for
any statistics. Calculated statistics are saved as variables in a
population's local namespace. These variables can be numbers,
lists or dictionaries and can be retrieved using functions
Population.vars() or Population.dvars(). A special default
dictionary (defdict) is used for dictionaries whose keys are
determined dynamically. Accessing elements of such a dictionary
with an invalid key will yield value 0 instead of a KeyError. If
the same variables are calculated for one or more (virtual)
subpopulation, the variables are stored in
vars()['subPop'][sp]['var'] where sp is a subpopulation ID (sp) or
a tuple of virtual subpopulation ID ((sp, vsp)).
Population.vars(sp) and Population.dvars(sp) provide shortcuts to
these variables. Operator Stat outputs a number of most useful
variables for each type of statistic. For example, alleleFreq
calculates both allele counts and allele frequencies and it by
default sets variable alleleFreq (dvars().alleleFreq) for all or
specified subpopulations. If this does not fit your need, you can
use parameter vars to output additional parameters, or limit the
output of existing parameters. More specifically, for this
particular statistic, the available variables are 'alleleFreq',
'alleleNum', 'alleleFreq_sp' ('alleleFreq' in each subpopulation),
and 'alleleNum_sp' ('alleleNum' in each subpopulation). You can
set vars=['alleleNum_sp'] to output only subpopulation specific
allele count. An optional suffix (parameter suffix) can be used to
append a suffix to default parameter names. This parameter can be
used, for example, to calculate and store the same statistics for
different subpopulations (e.g. pairwise Fst). Operator Stat
supports the following statistics: popSize: If popSize=True,
number of individuals in all or specified subpopulations
(parameter subPops) will be set to the following variables:
* popSize (default): Number of individuals in all or specified
subpopulations. Because subPops does not have to cover all
individuals, it may not be the actual population size.
* popSize_sp: Size of (virtual) subpopulation sp.
* subPopSize (default): A list of (virtual) subpopulation sizes.
This variable is easier to use than accessing popSize from each
(virtual) subpopulation.numOfMales: If numOfMales=True, number of
male individuals in all or specified (virtual) subpopulations will
be set to the following variables:
* numOfMales (default): Total number of male individuals in all
or specified (virtual) subpopulations.
* numOfFemales (default): Total number of female individuals in
all or specified (virtual) subpopulations.
* propOfMales: Proportion of male individuals.
* propOfFemales: Proportion of female individuals.
* numOfMales_sp: Number of male individuals in each (virtual)
subpopulation.
* numOfFemales_sp: Number of female individuals in each
(virtual) subpopulation.
* propOfMales_sp: Proportion of male individuals in each
(virtual) subpopulation.
* propOfFemales_sp: Proportion of female individuals in each
(virtual) subpopulation.numOfAffected: If numOfAffected=True,
number of affected individuals in all or specified (virtual)
subpopulations will be set to the following variables:
* numOfAffected (default): Total number of affected individuals
in all or specified (virtual) subpopulations.
* numOfUnaffected (default): Total number of unaffected
individuals in all or specified (virtual) subpopulations.
* propOfAffected: Proportion of affected individuals.
* propOfUnaffected: Proportion of unaffected individuals.
* numOfAffected_sp: Number of affected individuals in each
(virtual) subpopulation.
* numOfUnaffected_sp: Number of unaffected individuals in each
(virtual) subpopulation.
* propOfAffected_sp: Proportion of affected individuals in each
(virtual) subpopulation.
* propOfUnaffected_sp: Proportion of unaffected individuals in
each (virtual) subpopulation.numOfSegSites: Parameter
numOfSegSites accepts a list of loci (loci indexes, names, or
ALL_AVAIL) and count the number of loci with at least two
different alleles (segregating sites) or loci with only one non-
zero allele (no zero allele, not segragating) for individuals in
all or specified (virtual) subpopulations. This parameter sets
variables
* numOfSegSites (default): Number of segregating sites in all or
specified (virtual) subpopulations.
* numOfSegSites_sp: Number of segregating sites in each
(virtual) subpopulation.
* numOfFixedSites: Number of sites with one non-zero allele in
all or specified (virtual) subpopulations.
* numOfFixedSites_sp: Number of sites with one non-zero allele
in in each (virtual) subpopulations.
* segSites: A list of segregating sites in all or specified
(virtual) subpopulations.
* segSites_sp: A list of segregating sites in each (virtual)
subpopulation.
* fixedSites: A list of sites with one non-zero allele in all or
specified (virtual) subpopulations.
* fixedSites_sp: A list of sites with one non-zero allele in in
each (virtual) subpopulations.numOfMutants: Parameter numOfMutants
accepts a list of loci (loci indexes, names, or ALL_AVAIL) and
count the number of mutants (non-zero alleles) for individuals in
all or specified (virtual) subpopulations. It sets variables
* numOfMutants (default): Number of mutants in all or specified
(virtual) subpopulations.
* numOfMutants_sp: Number of mutants in each (virtual)
subpopulations.alleleFreq: This parameter accepts a list of loci
(loci indexes, names, or ALL_AVAIL), at which allele frequencies
will be calculated. This statistic outputs the following
variables, all of which are dictionary (with loci indexes as keys)
of default dictionaries (with alleles as keys). For example,
alleleFreq[loc][a] returns 0 if allele a does not exist.
* alleleFreq (default): alleleFreq[loc][a] is the frequency of
allele a at locus for all or specified (virtual) subpopulations.
* alleleNum (default): alleleNum[loc][a] is the number of allele
a at locus for all or specified (virtual) subpopulations.
* alleleFreq_sp: Allele frequency in each (virtual)
subpopulation.
* alleleNum_sp: Allele count in each (virtual)
subpopulation.heteroFreq and homoFreq: These parameters accept a
list of loci (by indexes or names), at which the number and
frequency of homozygotes and/or heterozygotes will be calculated.
These statistics are only available for diploid populations. The
following variables will be outputted:
* heteroFreq (default for parameter heteroFreq): A dictionary of
proportion of heterozygotes in all or specified (virtual)
subpopulations, with loci indexes as dictionary keys.
* homoFreq (default for parameter homoFreq): A dictionary of
proportion of homozygotes in all or specified (virtual)
subpopulations.
* heteroNum: A dictionary of number of heterozygotes in all or
specified (virtual) subpopulations.
* homoNum: A dictionary of number of homozygotes in all or
specified (virtual) subpopulations.
* heteroFreq_sp: A dictionary of proportion of heterozygotes in
each (virtual) subpopulation.
* homoFreq_sp: A dictionary of proportion of homozygotes in each
(virtual) subpopulation.
* heteroNum_sp: A dictionary of number of heterozygotes in each
(virtual) subpopulation.
* homoNum_sp: A dictionary of number of homozygotes in each
(virtual) subpopulation.genoFreq: This parameter accept a list of
loci (by indexes or names) at which number and frequency of all
genotypes are outputed as a dictionary (indexed by loci indexes)
of default dictionaries (indexed by tuples of possible indexes).
This statistic is available for all population types with genotype
defined as ordered alleles at a locus. The length of genotype
equals the number of homologous copies of chromosomes (ploidy) of
a population. Genotypes for males or females on sex chromosomes or
in haplodiploid populations will have different length. Because
genotypes are ordered, (1, 0) and (0, 1) (two possible genotypes
in a diploid population) are considered as different genotypes.
This statistic outputs the following variables:
* genoFreq (default): A dictionary (by loci indexes) of default
dictionaries (by genotype) of genotype frequencies. For example,
genoFreq[1][(1, 0)] is the frequency of genotype (1, 0) at locus
1.
* genoNum (default): A dictionary of default dictionaries of
genotype counts of all or specified (virtual) subpopulations.
* genoFreq_sp: genotype frequency in each specified (virtual)
subpopulation.
* genoFreq_sp: genotype count in each specified (virtual)
subpopulation.haploFreq: This parameter accepts one or more lists
of loci (by index) at which number and frequency of haplotypes are
outputted as default dictionaries. [(1,2)] can be abbreviated to
(1,2). For example, using parameter haploFreq=(1,2,4), all
haplotypes at loci 1, 2 and 4 are counted. This statistic saves
results to dictionary (with loci index as keys) of default
dictionaries (with haplotypes as keys) such as
haploFreq[(1,2,4)][(1,1,0)] (frequency of haplotype (1,1,0) at
loci (1,2,3)). This statistic works for all population types.
Number of haplotypes for each individual equals to his/her ploidy
number. Haplodiploid populations are supported in the sense that
the second homologous copy of the haplotype is not counted for
male individuals. This statistic outputs the following variables:
* haploFreq (default): A dictionary (with tuples of loci indexes
as keys) of default dictionaries of haplotype frequencies. For
example, haploFreq[(0, 1)][(1,1)] records the frequency of
haplotype (1,1) at loci (0, 1) in all or specified (virtual)
subpopulations.
* haploNum (default): A dictionary of default dictionaries of
haplotype counts in all or specified (virtual) subpopulations.
* haploFreq_sp: Halptype frequencies in each (virtual)
subpopulation.
* haploNum_sp: Halptype count in each (virtual)
subpopulation.haploHeteroFreq and haploHomoFreq: These parameters
accept a list of haplotypes (list of loci), at which the number
and frequency of haplotype homozygotes and/or heterozygotes will
be calculated. Note that these statistics are observed count of
haplotype heterozygote. The following variables will be outputted:
* haploHeteroFreq (default for parameter haploHeteroFreq): A
dictionary of proportion of haplotype heterozygotes in all or
specified (virtual) subpopulations, with haplotype indexes as
dictionary keys.
* haploHomoFreq (default for parameter haploHomoFreq): A
dictionary of proportion of homozygotes in all or specified
(virtual) subpopulations.
* haploHeteroNum: A dictionary of number of heterozygotes in all
or specified (virtual) subpopulations.
* haploHomoNum: A dictionary of number of homozygotes in all or
specified (virtual) subpopulations.
* haploHeteroFreq_sp: A dictionary of proportion of
heterozygotes in each (virtual) subpopulation.
* haploHomoFreq_sp: A dictionary of proportion of homozygotes in
each (virtual) subpopulation.
* haploHeteroNum_sp: A dictionary of number of heterozygotes in
each (virtual) subpopulation.
* haploHomoNum_sp: A dictionary of number of homozygotes in each
(virtual) subpopulation.sumOfinfo, meanOfInfo, varOfInfo,
maxOfInfo and minOfInfo: Each of these five parameters accepts a
list of information fields. For each information field, the sum,
mean, variance, maximum or minimal (depending on the specified
parameter(s)) of this information field at iddividuals in all or
specified (virtual) subpopulations will be calculated. The results
will be put into the following population variables:
* sumOfInfo (default for sumOfInfo): A dictionary of the sum of
specified information fields of individuals in all or specified
(virtual) subpopulations. This dictionary is indexed by names of
information fields.
* meanOfInfo (default for meanOfInfo): A dictionary of the mean
of information fields of all individuals.
* varOfInfo (default for varOfInfo): A dictionary of the sample
variance of information fields of all individuals.
* maxOfInfo (default for maxOfInfo): A dictionary of the maximum
value of information fields of all individuals.
* minOfInfo (default for minOfInfo): A dictionary of the minimal
value of information fields of all individuals.
* sumOfInfo_sp: A dictionary of the sum of information fields of
individuals in each subpopulation.
* meanOfInfo_sp: A dictionary of the mean of information fields
of individuals in each subpopulation.
* varOfInfo_sp: A dictionary of the sample variance of
information fields of individuals in each subpopulation.
* maxOfInfo_sp: A dictionary of the maximum value of information
fields of individuals in each subpopulation.
* minOfInfo_sp: A dictionary of the minimal value of information
fields of individuals in each subpopulation.LD: Parameter LD
accepts one or a list of loci pairs (e.g. LD=[[0,1], [2,3]]) with
optional primary alleles at both loci (e.g. LD=[0,1,0,0]). For
each pair of loci, this operator calculates linkage disequilibrium
and optional association statistics between two loci. When primary
alleles are specified, signed linkage disequilibrium values are
calculated with non-primary alleles are combined. Otherwise,
absolute values of diallelic measures are combined to yield
positive measure of LD. Association measures are calculated from a
m by n contigency of haplotype counts (m=n=2 if primary alleles
are specified). Please refer to the simuPOP user's guide for
detailed information. This statistic sets the following variables:
* LD (default) Basic LD measure for haplotypes in all or
specified (virtual) subpopulations. Signed if primary alleles are
specified.
* LD_prime (default) Lewontin's D' measure for haplotypes in all
or specified (virtual) subpopulations. Signed if primary alleles
are specified.
* R2 (default) Correlation LD measure for haplotypes in all or
specified (virtual) subpopulations.
* LD_ChiSq ChiSq statistics for a contigency table with
frequencies of haplotypes in all or specified (virtual)
subpopulations.
* LD_ChiSq_p Single side p-value for the ChiSq statistic.
Degrees of freedom is determined by number of alleles at both loci
and the specification of primary alleles.
* CramerV Normalized ChiSq statistics.
* LD_sp Basic LD measure for haplotypes in each (virtual)
subpopulation.
* LD_prime_sp Lewontin's D' measure for haplotypes in each
(virtual) subpopulation.
* R2_sp R2 measure for haplotypes in each (virtual)
subpopulation.
* LD_ChiSq_sp ChiSq statistics for each (virtual) subpopulation.
* LD_ChiSq_p_sp p value for the ChiSq statistics for each
(virtual) subpopulation.
* CramerV_sp Cramer V statistics for each (virtual)
subpopulation.association: Parameter association accepts a list of
loci, which can be a list of indexes, names, or ALL_AVAIL. At each
locus, one or more statistical tests will be performed to test
association between this locus and individual affection status.
Currently, simuPOP provides the following tests:
* An allele-based Chi-square test using alleles counts. This
test can be applied to loci with more than two alleles, and to
haploid populations.
* A genotype-based Chi-square test using genotype counts. This
test can be applied to loci with more than two alleles (more than
3 genotypes) in diploid populations. aA and Aa are considered to
be the same genotype.
* A genotype-based Cochran-Armitage trend test. This test can
only be applied to diallelic loci in diploid populations. A
codominant model is assumed. This statistic sets the following
variables:
* Allele_ChiSq A dictionary of allele-based Chi-Square
statistics for each locus, using cases and controls in all or
specified (virtual) subpopulations.
* Allele_ChiSq_p (default) A dictionary of p-values of the
corresponding Chi-square statistics.
* Geno_ChiSq A dictionary of genotype-based Chi-Square
statistics for each locus, using cases and controls in all or
specified (virtual) subpopulations.
* Geno_ChiSq_p A dictionary of p-values of the corresponding
genotype-based Chi-square test.
* Armitage_p A dictionary of p-values of the Cochran-Armitage
tests, using cases and controls in all or specified (virtual)
subpopulations.
* Allele_ChiSq_sp A dictionary of allele-based Chi-Square
statistics for each locus, using cases and controls from each
subpopulation.
* Allele_ChiSq_p_sp A dictionary of p-values of allele-based
Chi-square tests, using cases and controls from each (virtual)
subpopulation.
* Geno_ChiSq_sp A dictionary of genotype-based Chi-Square tests
for each locus, using cases and controls from each subpopulation.
* Geno_ChiSq_p_sp A dictionary of p-values of genotype-based
Chi-Square tests, using cases and controls from each
subpopulation.
* Armitage_p_sp A dictionary of p-values of the Cochran-
Armitage tests, using cases and controls from each
subpopulation.neutrality: This parameter performs neutrality tests
(detection of natural selection) on specified loci, which can be a
list of loci indexes, names or ALL_AVAIL. It currently only
outputs Pi, which is the average number of pairwise difference
between loci. This statistic outputs the following variables:
* Pi Mean pairwise difference between all sequences from all or
specified (virtual) subpopulations.
* Pi_sp Mean paiewise difference between all sequences in each
(virtual) subpopulation.structure: Parameter structure accepts a
list of loci at which statistics that measure population structure
are calculated. structure accepts a list of loci indexes, names or
ALL_AVAIL. This parameter currently supports the following
statistics:
* Weir and Cockerham's Fst (1984). This is the most widely used
estimator of Wright's fixation index and can be used to measure
Population differentiation. However, this method is designed to
estimate Fst from samples of larger populations and might not be
appropriate for the calculation of Fst of large populations.
* Nei's Gst (1973). The Gst estimator is another estimator for
Wright's fixation index but it is extended for multi-allele (more
than two alleles) and multi-loci cases. This statistics should be
used if you would like to obtain a true Fst value of a large
Population. Nei's Gst uses only allele frequency information so it
is available for all population type (haploid, diploid etc). Weir
and Cockerham's Fst uses heterozygosity frequency so it is best
for autosome of diploid populations. For non-diploid population,
sex, and mitochondrial DNAs, simuPOP uses expected heterozygosity
(1 - sum p_i^2) when heterozygosity is needed. These statistics
output the following variables:
* F_st (default) The WC84 Fst statistic estimated for all *
specified loci.
* F_is The WC84 Fis statistic estimated for all specified loci.
* F_it The WC84 Fit statistic estimated for all specified loci.
* f_st A dictionary of locus level WC84 Fst values.
* f_is A dictionary of locus level WC84 Fis values.
* f_it A dictionary of locus level WC84 Fit values.
* G_st Nei's Gst statistic estimated for all specified loci.
* g_st A dictionary of Nei's Gst statistic estimated for each
locus.HWE: Parameter HWE accepts a list of loci at which exact
two-side tests for Hardy-Weinberg equilibrium will be performed.
This statistic is only available for diallelic loci in diploid
populations. HWE can be a list of loci indexes, names or
ALL_AVAIL. This statistic outputs the following variables:
* HWE (default) A dictionary of p-values of HWE tests using
genotypes in all or specified (virtual) subpopulations.
* HWE_sp A dictionary of p-values of HWS tests using genotypes
in each (virtual) subpopulation.inbreeding: Inbreeding measured by
Identitcal by Decent (and by State). This statistics go through
all loci of individuals in a diploid population and calculate the
number and proportions of alleles that are identitcal by decent
and by state. Because ancestral information is only available in
lineage module, variables IBD_freq are always set to zero in other
modules. Loci on sex and mitochondrial chromosomes, and non-
diploid populations are currently not supported. This statistic
outputs the following variables:
* IBD_freq (default) The frequency of IBD pairs among all allele
pairs. To use this statistic, the population must be initialized
by operator InitLineage() to assign each ancestral allele an
unique identify.
* IBS_freq (default) The proportion of IBS pairs among all
allele pairs.
* IBD_freq_sp frequency of IBD in each (virtual) subpopulations.
* IBS_freq_sp frequency of IBS in each (virtual)
subpopulations.effectiveSize: Parameter effectiveSize accepts a
list of loci at which the effective population size for the whole
or specified (virtual) subpopulations is calculated. effectiveSize
can be a list of loci indexes, names or ALL_AVAIL. Parameter
subPops is usually used to define samples from which effective
sizes are estimated. This statistic allows the calculation of true
effective size based on number of gametes each parents transmit to
the offspring population (per-locus before and after mating), and
estimated effective size based on sample genotypes. Due to the
temporal natural of some methods, more than one Stat operators
might be needed to calculate effective size. The vars parameter
specified which method to use and which variable to set.
Acceptable values include:
* Ne_demo_base When this variable is set before mating, it
stores IDs of breeding parents and, more importantly, assign an
unique lineage value to alleles at specified loci of each
individual. This feature is only available for lineage modules and
will change lineage values at specified loci of all individuals.
* Ne_demo_base_sp Pre-mating information for each (virtual)
subpopulation, used by variable Ne_demo_sp.
* Ne_demo A dictionary of locus-specific demographic effective
population size, calculated using number of gemetes each parent
transmits to the offspring population. The method is vased on Crow
& Denniston 1988 (Ne = KN-1/k-1+Vk/k) and need variable
Ne_demo_base set before mating. Effective size estimated from this
formula is model dependent and might not be applicable to your
mating schemes.
* Ne_demo_sp Calculate subpopulation-specific effective size.
* Ne_temporal_base When this variable is set in parameter vars,
the Stat operator saves baseline allele frequencies and other
information in this variable, which are used by temporary methods
to estimate effective population size according to changes in
allele frequency between the baseline and present generations.
This variable could be set repeatedly to change baselines.
* Ne_temporal_base_sp Set baseline information for each
(virtual) subpopulation specified.
* Ne_tempoFS_P1 Effective population size, 2.5% and 97.5%
confidence interval for sampling plan 1 as a list of size 3,
estimated using a temporal method as described in Jorde & Ryman
(2007), and as implemented by software tempoFS
(http://www.zoologi.su.se/~ryman/). This variable is set to census
population size if no baseline has been set, and to the temporal
effective size between the present and the baseline generation
otherwise. This method uses population size or sum of
subpopulation sizes of specified (virtual) subpopulations as
census population size for the calculation based on plan 1.
* Ne_tempoFS_P2 Effective population size, 2.5% and 97.5%
confidence interval for sampling plan 2 as a list of size 6,
estimated using a temporal method as described in Jorde & Ryman
(2007). This variable is set to census population size no baseline
has been set, and to the temporal effective size between the
present and the baseline generation otherwise. This method assumes
that the sample is drawn from an infinitely-sized population.
* Ne_tempoFS deprecated, use Ne_tempoFS_P2 instead.
* Ne_tempoFS_P1_sp Estimate effective size of each (virtual)
subpopulation using method Jorde & Ryman 2007, assuming sampling
plan 1. The census population sizes for sampling plan 1 are the
sizes for each subpopulation that contain the specified (virtual)
subpopulations.
* Ne_tempoFS_P2_sp Estimate effective size of each (virtual)
subpopulation using method Jorde & Ryman 2007, assuming sampling
plan 2.
* Ne_tempoFS_sp deprecated, use Ne_tempoFS_P2_sp instead.
* Ne_waples89_P1 Effective population size, 2.5% and 97.5%
confidence interval for sampling plan 1 as a list of size 6,
estimated using a temporal method as described in Waples 1989,
Genetics. Because this is a temporal method, Ne_waples89 estimates
effective size between the present and the baseline generation set
by variable Ne_temporal_base. Census population size will be
resutned if no baseline has been set. This method uses population
size or sum of subpopulation sizes of specified (virtual)
subpopulations as census population size for the calculation based
on plan 1.
* Ne_waples89_P2 Effective population size, 2.5% and 97.5%
confidence interval for sampling plan 2 as a list of size 6,
estimated using a temporal method as described in Waples 1989,
Genetics. Because this is a temporal method, Ne_waples89 estimates
effective size between the present and the baseline generation set
by variable Ne_temporal_base. Census population size will be
returned if no baseline has been set.
* Ne_waples89_P1_sp Estimate effective size for each (virtual)
subpopulation using method Waples 89, assuming sampling plan 1.
The census population sizes are the sizes for each subpopulation
that contain the specified (virtual) subpopulation.
* Ne_waples89_P2_sp Estimate effective size for each (virtual)
subpopulation using method Waples 89, assuming sampling plan 2.
* Ne_waples89_sp deprecated, use Ne_waples89_P2_sp instead.
* Ne_LD Lists of length three for effective population size,
2.5% and 97.% confidence interval for cutoff allele frequency 0.,
0.01, 0.02 and 0.05 (as dictionary keys), using a parametric
method, estimated from linkage disequilibrim information of one
sample, using LD method developed by Waples & Do 2006 (LDNe). This
method assumes unlinked loci and uses LD measured from genotypes
at loci. Because this is a sample based method, it should better
be applied to a random sample of the population. 95% CI is
calculated using a Jackknife estimated effective number of
independent alleles. Please refer to relevant papers and the LDNe
user's guide for details.
* Ne_LD_sp Estimate LD-based effective population size for each
specified (virtual) subpopulation.
* Ne_LD_mono A version of Ne_LD that assumes monogamy (see
Waples 2006 for details.
* Ne_LD_mono_sp Ne_LD_mono calculated for each (virtual)
subpopulation.
"""
_simuPOP_op.Stat_swiginit(self, _simuPOP_op.new_Stat(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_Stat
Stat_swigregister = _simuPOP_op.Stat_swigregister
Stat_swigregister(Stat)
class PyOutput(BaseOperator):
"""
Details:
This operator outputs a given string when it is applied to a
population.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
PyOutput(msg="", output=">", begin=0, end=-1, step=1, at=[],
reps=ALL_AVAIL, subPops=ALL_AVAIL, infoFields=[])
Details:
Creates a PyOutput operator that outputs a string msg to output
(default to standard terminal output) when it is applied to a
population. Please refer to class BaseOperator for a detailed
description of common operator parameters such as stage, begin and
output.
"""
_simuPOP_op.PyOutput_swiginit(self, _simuPOP_op.new_PyOutput(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_PyOutput
PyOutput_swigregister = _simuPOP_op.PyOutput_swigregister
PyOutput_swigregister(PyOutput)
class Dumper(BaseOperator):
"""
Details:
This operator dumps the content of a population in a human
readable format. Because this output format is not structured and
can not be imported back to simuPOP, this operator is usually used
to dump a small population to a terminal for demonstration and
debugging purposes.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
Dumper(genotype=True, structure=True, ancGens=UNSPECIFIED,
width=1, max=100, loci=[], output=">", begin=0, end=-1, step=1,
at=[], reps=ALL_AVAIL, subPops=ALL_AVAIL, infoFields=ALL_AVAIL)
Details:
Create a operator that dumps the genotype structure (if structure
is True) and genotype (if genotype is True) to an output ( default
to standard terminal output). Because a population can be large,
this operator will only output the first 100 (parameter max)
individuals of the present generation (parameter ancGens). All
loci will be outputed unless parameter loci are used to specify a
subset of loci. This operator by default output values of all
information fields unless parameter infoFields is used to specify
a subset of info fields to display. If a list of (virtual)
subpopulations are specified, this operator will only output
individuals in these outputs. Please refer to class BaseOperator
for a detailed explanation for common parameters such as output
and stage.
"""
_simuPOP_op.Dumper_swiginit(self, _simuPOP_op.new_Dumper(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_Dumper
Dumper_swigregister = _simuPOP_op.Dumper_swigregister
Dumper_swigregister(Dumper)
class SavePopulation(BaseOperator):
"""
Details:
An operator that save populations to specified files.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
SavePopulation(output="", begin=0, end=-1, step=1, at=[],
reps=ALL_AVAIL, subPops=ALL_AVAIL, infoFields=[])
Details:
Create an operator that saves a population to output when it is
applied to the population. This operator supports all output
specifications ('', 'filename', 'filename' prefixed by one or more
'>' characters, and '!expr') but output from different operators
will always replace existing files (effectively ignore '>'
specification). Parameter subPops is ignored. Please refer to
class BaseOperator for a detailed description about common
operator parameters such as stage and begin.
"""
_simuPOP_op.SavePopulation_swiginit(self, _simuPOP_op.new_SavePopulation(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_SavePopulation
SavePopulation_swigregister = _simuPOP_op.SavePopulation_swigregister
SavePopulation_swigregister(SavePopulation)
class InitSex(BaseOperator):
"""
Details:
This operator initializes sex of individuals, either randomly or
use a list of sexes.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
InitSex(maleFreq=0.5, maleProp=-1, sex=[], begin=0, end=-1,
step=1, at=[], reps=ALL_AVAIL, subPops=ALL_AVAIL, infoFields=[])
Details:
Create an operator that initializes individual sex to MALE or
FEMALE. By default, it assigns sex to individuals randomly, with
equal probability of having a male or a female. This probabability
can be adjusted through parameter maleFreq or be made to exact
proportions by specifying parameter maleProp. Alternatively, a
fixed sequence of sexes can be assigned. For example, if
sex=[MALE, FEMALE], individuals will be assigned MALE and FEMALE
successively. Parameter maleFreq or maleProp are ignored if sex is
given. If a list of (virtual) subpopulation is specified in
parameter subPop, only individuals in these subpopulations will be
initialized. Note that the sex sequence, if used, is assigned
repeatedly regardless of (virtual) subpopulation boundaries so
that you can assign sex to all individuals in a population.
"""
_simuPOP_op.InitSex_swiginit(self, _simuPOP_op.new_InitSex(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_InitSex
InitSex_swigregister = _simuPOP_op.InitSex_swigregister
InitSex_swigregister(InitSex)
class InitInfo(BaseOperator):
"""
Details:
This operator initializes given information fields with a sequence
of values, or a user-provided function such as random.random.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
InitInfo(values, begin=0, end=-1, step=1, at=[], reps=ALL_AVAIL,
subPops=ALL_AVAIL, infoFields=[])
Details:
Create an operator that initialize individual information fields
infoFields using a sequence of values or a user-defined function.
If a list of values are given, it will be used sequentially for
all individuals. The values will be reused if its length is less
than the number of individuals. The values will be assigned
repeatedly regardless of subpopulation boundaries. If a Python
function is given, it will be called, without any argument,
whenever a value is needed. If a list of (virtual) subpopulation
is specified in parameter subPop, only individuals in these
subpopulations will be initialized.
"""
_simuPOP_op.InitInfo_swiginit(self, _simuPOP_op.new_InitInfo(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_InitInfo
InitInfo_swigregister = _simuPOP_op.InitInfo_swigregister
InitInfo_swigregister(InitInfo)
class InitGenotype(BaseOperator):
"""
Details:
This operator assigns alleles at all or part of loci with given
allele frequencies, proportions or values. This operator
initializes all chromosomes, including unused genotype locations
and customized chromosomes.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
InitGenotype(freq=[], genotype=[], prop=[], haplotypes=[],
genotypes=[], loci=ALL_AVAIL, ploidy=ALL_AVAIL, begin=0, end=1,
step=1, at=[], reps=ALL_AVAIL, subPops=ALL_AVAIL, infoFields=[])
Details:
This function creates an initializer that initializes individual
genotypes with random alleles, genotypes, or haplotypes with
specified frequencies (parameter freq) or proportions (parameter
prop). If parameter genotypes or haplotypes is not specified, freq
specifies the allele frequencies of alleles 0, 1, 2...
respectively. Alternatively, you can use parameter prop to
specified the exact proportions of alleles 0, 1, ..., although
alleles with small proportions might not be assigned at all.
Values of parameter prob or prop should add up to 1. In addition
to a vector, parameter prob and prop can also be a function that
accepts optional parameters loc, subPop or vsp and returns a list
of requencies for alleles 0, 1, etc, or a number for frequency of
allele 0 as a speciail case for each locus, subpopulation
(parameter subPop), or virtual subpopulations (parameter vsp, pass
as a tuple). If parameter genotypes is specified, it should
contain a list of genotypes (alleles on different strand of
chromosomes) with length equal to population ploidy. Parameter
prob and prop then specifies frequencies or proportions of each
genotype, which can vary for each subpopulation but not each locus
if the function form of parameters is used. If parameter
haplotypes is specified, it should contain a list of haplotypes
(alleles on the same strand of chromosome) and parameter prob or
prop specifies frequencies or proportions of each haplotype. If
loci, ploidy and/or subPop are specified, only specified loci,
ploidy, and individuals in these (virtual) subpopulations will be
initialized. Parameter loci can be a list of loci indexes, names
or ALL_AVAIL. If the length of a haplotype is not enough to fill
all loci, the haplotype will be reused. If a list (or a single)
haplotypes are specified without freq or prop, they are used with
equal probability. In the last case, if a sequence of genotype is
specified through parameter genotype (not genotypes), it will be
used repeatedly to initialize all alleles sequentially. This works
similar to function Population.setGenotype() except that you can
limit the initialization to certain loci and ploidy.
"""
_simuPOP_op.InitGenotype_swiginit(self, _simuPOP_op.new_InitGenotype(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_InitGenotype
InitGenotype_swigregister = _simuPOP_op.InitGenotype_swigregister
InitGenotype_swigregister(InitGenotype)
class InitLineage(BaseOperator):
"""
Details:
This operator assigns lineages at all or part of loci with given
values. This operator initializes all chromosomes, including
unused lineage locations and customized chromosomes.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
InitLineage(lineage=[], mode=PER_ALLELE, loci=ALL_AVAIL,
ploidy=ALL_AVAIL, begin=0, end=1, step=1, at=[], reps=ALL_AVAIL,
subPops=ALL_AVAIL, infoFields="ind_id")
Details:
This function creates an initializer that initializes lineages
with either a specified set of values or from the field infoFields
(default to ind_id), whose value will be saved as the lineage of
modified alleles. If a list of values is specified in parameter
lineage, each value in this list is applied to one or more alleles
so that each allele (PER_ALLELE, default mode), alleles on each
chromosome (PER_CHROMOSOME), on chromosomes of each ploidy
(PER_PLOIDY), or for each individual (PER_INDIVIDUAL) have the
same lineage. A single value is allowed and values in lineage will
be re-used if not enough values are provided. If an empty list is
provided, values 1, 2, 3, .. will be used to provide an unique
identify for each allele, genotype, chromosome, etc. If a valid
field is specified (default to ind_id), the value of this field
will be used for all alleles of each individual if mode is set to
FROM_INFO, or be adjusted to produce positive values for alleles
on the frist ploidy, and negative values for the second ploidy
(and so on) if mode equals to FROM_INFO_SIGNED. If loci, ploidy
and/or subPops are specified, only specified loci, ploidy, and
individuals in these (virtual) subpopulations will be initialized.
"""
_simuPOP_op.InitLineage_swiginit(self, _simuPOP_op.new_InitLineage(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_InitLineage
InitLineage_swigregister = _simuPOP_op.InitLineage_swigregister
InitLineage_swigregister(InitLineage)
class IdTagger(BaseOperator):
"""
Details:
An IdTagger gives a unique ID for each individual it is applies
to. These ID can be used to uniquely identify an individual in a
multi-generational population and be used to reliably reconstruct
a Pedigree. To ensure uniqueness across populations, a single
source of ID is used for this operator. individual IDs are
assigned consecutively starting from 1. Value 1 instead of 0 is
used because most software applications use 0 as missing values
for parentship. If you would like to reset the sequence or start
from a different number, you can call the reset(startID) function
of any IdTagger. An IdTagger is usually used during-mating to
assign ID to each offspring. However, if it is applied directly to
a population, it will assign unique IDs to all individuals in this
population. This property is usually used in the preOps parameter
of function Simulator.evolve to assign initial ID to a population.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
IdTagger(begin=0, end=-1, step=1, at=[], reps=ALL_AVAIL,
subPops=ALL_AVAIL, output="", infoFields="ind_id")
Details:
Create an IdTagger that assign an unique ID for each individual it
is applied to. The IDs are created sequentially and are stored in
an information field specified in parameter infoFields (default to
ind_id). This operator is considered a during-mating operator but
it can be used to set ID for all individuals of a population when
it is directly applied to the population.
"""
_simuPOP_op.IdTagger_swiginit(self, _simuPOP_op.new_IdTagger(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_IdTagger
def reset(self, startID: 'ULONG'=1) -> "void":
"""
Usage:
x.reset(startID=1)
Details:
Reset the global individual ID number so that IdTaggers will start
from id (default to 1) again.
"""
return _simuPOP_op.IdTagger_reset(self, startID)
IdTagger.reset = new_instancemethod(_simuPOP_op.IdTagger_reset, None, IdTagger)
IdTagger_swigregister = _simuPOP_op.IdTagger_swigregister
IdTagger_swigregister(IdTagger)
class InheritTagger(BaseOperator):
"""
Details:
An inheritance tagger passes values of parental information
field(s) to the corresponding fields of offspring. If there are
two parental values from parents of a sexual mating event, a
parameter mode is used to specify how to assign offspring
information fields.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
InheritTagger(mode=PATERNAL, begin=0, end=-1, step=1, at=[],
reps=ALL_AVAIL, subPops=ALL_AVAIL, output="", infoFields=[])
Details:
Creates an inheritance tagger that passes values of parental
information fields (parameter infoFields) to the corresponding
fields of offspring. If there is only one parent, values at the
specified information fields are copied directly. If there are two
parents, parameter mode specifies how to pass them to an
offspring. More specifically,
* mode=MATERNAL Passing the value from mother.
* mode=PATERNAL Passing the value from father.
* mode=MEAN Passing the average of two values.
* mode=MAXIMUM Passing the maximum value of two values.
* mode=MINIMUM Passing the minimum value of two values.
* mode=SUMMATION Passing the summation of two values.
* mode=MULTIPLICATION Passing the multiplication of two values.
An RuntimeError will be raised if any of the parents does not
exist. This operator does not support parameter subPops and does
not output any information.
"""
_simuPOP_op.InheritTagger_swiginit(self, _simuPOP_op.new_InheritTagger(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_InheritTagger
InheritTagger_swigregister = _simuPOP_op.InheritTagger_swigregister
InheritTagger_swigregister(InheritTagger)
class SummaryTagger(BaseOperator):
"""
Details:
A summary tagger summarize values of one or more parental
information field to another information field of an offspring. If
mating is sexual, two sets of parental values will be involved.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
SummaryTagger(mode=MEAN, begin=0, end=-1, step=1, at=[],
reps=ALL_AVAIL, subPops=ALL_AVAIL, output="", infoFields=[])
Details:
Creates a summary tagger that summarize values of one or more
parental information field (infoFields[:-1]) to an offspring
information field (infoFields[-1]). A parameter mode specifies how
to pass summarize parental values. More specifically,
* mode=MEAN Passing the average of values.
* mode=MAXIMUM Passing the maximum value of values.
* mode=Minumum Passing the minimum value of values.
* mode=SUMMATION Passing the sum of values.
* mode=MULTIPLICATION Passing the multiplication of values. This
operator does not support parameter subPops and does not output
any information.
"""
_simuPOP_op.SummaryTagger_swiginit(self, _simuPOP_op.new_SummaryTagger(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_SummaryTagger
SummaryTagger_swigregister = _simuPOP_op.SummaryTagger_swigregister
SummaryTagger_swigregister(SummaryTagger)
class ParentsTagger(BaseOperator):
"""
Details:
This tagging operator records the indexes of parents (relative to
the parental generation) of each offspring in specified
information fields ( default to father_idx and mother_idx). Only
one information field should be specified if an asexsual mating
scheme is used so there is one parent for each offspring.
Information recorded by this operator is intended to be used to
look up parents of each individual in multi-generational
Population.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
ParentsTagger(begin=0, end=-1, step=1, at=[], reps=ALL_AVAIL,
subPops=ALL_AVAIL, output="", infoFields=["father_idx",
"mother_idx"])
Details:
Create a parents tagger that records the indexes of parents of
each offspring when it is applied to an offspring during-mating.
If two information fields are specified (parameter infoFields,
with default value ['father_idx', 'mother_idx']), they are used to
record the indexes of each individual's father and mother. Value
-1 will be assigned if any of the parent is missing. If only one
information field is given, it will be used to record the index of
the first valid parent (father if both parents are valid). This
operator ignores parameters output and subPops.
"""
_simuPOP_op.ParentsTagger_swiginit(self, _simuPOP_op.new_ParentsTagger(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_ParentsTagger
ParentsTagger_swigregister = _simuPOP_op.ParentsTagger_swigregister
ParentsTagger_swigregister(ParentsTagger)
class OffspringTagger(BaseOperator):
"""
Details:
This tagging operator records the indexes of offspring within a
family (sharing the same parent or parents) in specified
information field (default to offspring_idx). This tagger can be
used to control the number of offspring during mating.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
OffspringTagger(begin=0, end=-1, step=1, at=[], reps=ALL_AVAIL,
subPops=ALL_AVAIL, output="", infoFields=ALL_AVAIL)
Details:
Create an offspring tagger that records the indexes of offspring
within a family. The index is determined by successful production
of offspring during a mating events so the it does not increase
the index if a previous offspring is discarded, and it resets
index even if adjacent families share the same parents. This
operator ignores parameters stage, output, and subPops.
"""
_simuPOP_op.OffspringTagger_swiginit(self, _simuPOP_op.new_OffspringTagger(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_OffspringTagger
OffspringTagger_swigregister = _simuPOP_op.OffspringTagger_swigregister
OffspringTagger_swigregister(OffspringTagger)
class PedigreeTagger(BaseOperator):
"""
Details:
This tagging operator records the ID of parents of each offspring
in specified information fields (default to father_id and
mother_id). Only one information field should be specified if an
asexsual mating scheme is used so there is one parent for each
offspring. Information recorded by this operator is intended to be
used to record full pedigree information of an evolutionary
process.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
PedigreeTagger(idField="ind_id", output="", outputFields=[],
outputLoci=[], begin=0, end=-1, step=1, at=[], reps=ALL_AVAIL,
subPops=ALL_AVAIL, infoFields=["father_id", "mother_id"])
Details:
Create a pedigree tagger that records the ID of parents of each
offspring when it is applied to an offspring during-mating. If two
information fields are specified (parameter infoFields, with
default value ['father_id', 'mother_id']), they are used to record
the ID of each individual's father and mother stored in the
idField (default to ind_id) field of the parents. Value -1 will be
assigned if any of the parent is missing. If only one information
field is given, it will be used to record the ID of the first
valid parent (father if both pedigree are valid). This operator
by default does not send any output. If a valid output stream is
given (should be in the form of '>>filename' so that output will
be concatenated), this operator will output the ID of offspring,
IDs of his or her parent(s), sex and affection status of
offspring, and values at specified information fields
(outputFields) and loci (outputLoci) in the format of off_id
father_id mother_id M/F A/U fields genotype. father_id or
mother_id will be ignored if only one parent is involved. This
file format can be loaded using function loadPedigree. Because
only offspring will be outputed, individuals in the top-most
ancestral generation will not be outputed. This is usually not a
problem because individuals who have offspring in the next
generation will be constructed by function loadPedigree, although
their information fields and genotype will be missing. If you
would like to create a file with complete pedigree information,
you can apply this operator before evolution in the initOps
parameter of functions Population.evolve or Simulator.evolve. This
will output all individuals in the initial population (the top-
most ancestral population after evolution) in the same format.
Note that sex, affection status and genotype can be changed by
other operators so this operator should usually be applied after
all other operators are applied.
"""
_simuPOP_op.PedigreeTagger_swiginit(self, _simuPOP_op.new_PedigreeTagger(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_PedigreeTagger
PedigreeTagger_swigregister = _simuPOP_op.PedigreeTagger_swigregister
PedigreeTagger_swigregister(PedigreeTagger)
class PyTagger(BaseOperator):
"""
Details:
A Python tagger takes some information fields from both parents,
pass them to a user provided Python function and set the offspring
individual fields with the return values.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
PyTagger(func=None, begin=0, end=-1, step=1, at=[],
reps=ALL_AVAIL, subPops=ALL_AVAIL, output="", infoFields=[])
Details:
Create a hybrid tagger that provides an user provided function
func with values of specified information fields (determined by
parameter names of this function) of parents and assign
corresponding information fields of offspring with its return
value. If more than one parent are available, maternal values are
passed after paternal values. For example, if a function func(A,
B) is passed, this operator will send two tuples with parental
values of information fields 'A' and 'B' to this function and
assign its return values to fields 'A' and 'B' of each offspring.
The return value of this function should be a list, although a
single value will be accepted if only one information field is
specified. This operator ignores parameters stage, output and
subPops.
"""
_simuPOP_op.PyTagger_swiginit(self, _simuPOP_op.new_PyTagger(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_PyTagger
PyTagger_swigregister = _simuPOP_op.PyTagger_swigregister
PyTagger_swigregister(PyTagger)
class Migrator(BaseOperator):
"""
Details:
This operator migrates individuals from (virtual) subpopulations
to other subpopulations, according to either pre-specified
destination subpopulation stored in an information field, or
randomly according to a migration matrix. In the former case,
values in a specified information field (default to migrate_to)
are considered as destination subpopulation for each individual.
If subPops is given, only individuals in specified (virtual)
subpopulations will be migrated where others will stay in their
original subpopulation. Negative values are not allowed in this
information field because they do not represent a valid
destination subpopulation ID. In the latter case, a migration
matrix is used to randomly assign destination subpoulations to
each individual. The elements in this matrix can be probabilities
to migrate, proportions of individuals to migrate, or exact number
of individuals to migrate. By default, the migration matrix
should have m by m elements if there are m subpopulations. Element
(i, j) in this matrix represents migration probability, rate or
count from subpopulation i to j. If subPops (length m) and/or
toSubPops (length n) are given, the matrix should have m by n
elements, corresponding to specified source and destination
subpopulations. Subpopulations in subPops can be virtual
subpopulations, which makes it possible to migrate, for example,
males and females at different rates from a subpopulation. If a
subpopulation in toSubPops does not exist, it will be created. In
case that all individuals from a subpopulation are migrated, the
empty subpopulation will be kept. If migration is applied by
probability, the row of the migration matrix corresponding to a
source subpopulation is intepreted as probabilities to migrate to
each destination subpopulation. Each individual's detination
subpopulation is assigned randomly according to these
probabilities. Note that the probability of staying at the present
subpopulation is automatically calculated so the corresponding
matrix elements are ignored. If migration is applied by
proportion, the row of the migration matrix corresponding to a
source subpopulation is intepreted as proportions to migrate to
each destination subpopulation. The number of migrants to each
destination subpopulation is determined before random
indidividuals are chosen to migrate. If migration is applied by
counts, the row of the migration matrix corresponding to a source
subpopulation is intepreted as number of individuals to migrate to
each detination subpopulation. The migrants are chosen randomly.
This operator goes through all source (virtual) subpopulations and
assign detination subpopulation of each individual to an
information field. Unexpected results may happen if individuals
migrate from overlapping virtual subpopulations.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
Migrator(rate=[], mode=BY_PROBABILITY, toSubPops=ALL_AVAIL,
begin=0, end=-1, step=1, at=[], reps=ALL_AVAIL,
subPops=ALL_AVAIL, infoFields="migrate_to")
Details:
Create a Migrator that moves individuals from source (virtual)
subpopulations subPops (default to migrate from all
subpopulations) to destination subpopulations toSubPops (default
to all subpopulations), according to existing values in an
information field infoFields[0], or randomly according to a
migration matrix rate. In the latter case, the size of the matrix
should match the number of source and destination subpopulations.
Depending on the value of parameter mode, elements in the
migration matrix (rate) are interpreted as either the
probabilities to migrate from source to destination subpopulations
(mode = BY_PROBABILITY), proportions of individuals in the source
(virtual) subpopulations to the destination subpopulations (mode =
BY_PROPORTION), numbers of migrants in the source (virtual)
subpopulations (mode = BY_COUNTS), or ignored completely (mode =
BY_IND_INFO). In the last case, parameter subPops is respected
(only individuals in specified (virtual) subpopulations will
migrate) but toSubPops is ignored. Please refer to operator
BaseOperator for a detailed explanation for all parameters.
"""
_simuPOP_op.Migrator_swiginit(self, _simuPOP_op.new_Migrator(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_Migrator
Migrator_swigregister = _simuPOP_op.Migrator_swigregister
Migrator_swigregister(Migrator)
class BackwardMigrator(BaseOperator):
"""
Details:
This operator migrates individuals between all available or
specified subpopulations, according to a backward migration
matrix. It differs from Migrator in how migration matrixes are
interpreted. Due to the limit of this model, this operator does
not support migration by information field, migration by count
(mode = BY_COUNT), migration from virtual subpopulations,
migration between different number of subpopulations, and the
creation of new subpopulation, as operator Migrator provides. In
contrast to a forward migration matrix where $m_{ij}$ is
considered the probability (proportion or count) of individuals
migrating from subpopulation i to j, elements in a reverse
migration matrix $m_{ij}$ is considered the probability
(proportion or count) of individuals migrating from subpopulation
j to i, namely the probability (proportion or count) of
individuals originats from subpopulation j. If migration is
applied by probability, the row of the migration matrix
corresponding to a destination subpopulation is intepreted as
probabilities to orignate from each source subpopulation. Each
individual's source subpopulation is assigned randomly according
to these probabilities. Note that the probability of originating
from the present subpopulation is automatically calculated so the
corresponding matrix elements are ignored. If migration is
applied by proportion, the row of the migration matrix
corresponding to a destination subpopulation is intepreted as
proportions to originate from each source subpopulation. The
number of migrants from each source subpopulation is determined
before random indidividuals are chosen to migrate. Unlike the
forward migration matrix that describes how migration should be
performed, the backward migration matrix describes the result of
migration. The underlying forward migration matrix is calculated
at each generation and is in theory not the same across
generations. This operator calculates the corresponding forward
migration matrix from backward matrix and current population size.
This process is not always feasible so an error will raise if no
valid ending population size or forward migration matrix could be
determined. Please refer to the simuPOP user's guide for an
explanation of the theory behind forward and backward migration
matrices.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
BackwardMigrator(rate=[], mode=BY_PROBABILITY, begin=0, end=-1,
step=1, at=[], reps=ALL_AVAIL, subPops=ALL_AVAIL,
infoFields="migrate_to")
Details:
Create a BackwardMigrator that moves individuals between subPop
subpopulations randomly according to a backward migration matrix
rate. The size of the matrix should match the number of
subpopulations. Depending on the value of parameter mode,
elements in the migration matrix (rate) are interpreted as either
the probabilities to originate from source subpopulations (mode =
BY_PROBABILITY) or proportions of individuals originate from the
source (virtual) subpopulations (mode = BY_PROPORTION). Migration
by count is not supported by this operator. Please refer to
operator BaseOperator for a detailed explanation for all
parameters.
"""
_simuPOP_op.BackwardMigrator_swiginit(self, _simuPOP_op.new_BackwardMigrator(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_BackwardMigrator
BackwardMigrator_swigregister = _simuPOP_op.BackwardMigrator_swigregister
BackwardMigrator_swigregister(BackwardMigrator)
class SplitSubPops(BaseOperator):
"""
Details:
Split a given list of subpopulations according to either sizes of
the resulting subpopulations, proportion of individuals, or an
information field. The resulting subpopulations will have the same
name as the original subpopulation.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
SplitSubPops(subPops=ALL_AVAIL, sizes=[], proportions=[],
names=[], randomize=True, begin=0, end=-1, step=1, at=[],
reps=ALL_AVAIL, infoFields=[])
Details:
Split a list of subpopulations subPops into finer subpopulations.
A single subpopulation is acceptable but virtual subpopulations
are not allowed. All subpopulations will be split if subPops is
not specified. The subpopulations can be split in three ways:
* If parameter sizes is given, each subpopulation will be split
into subpopulations with given size. The sizes should add up to
the size of all orignal subpopulations.
* If parameter proportions is given, each subpopulation will be
split into subpopulations with corresponding proportion of
individuals. proportions should add up to 1.
* If an information field is given (parameter infoFields),
individuals having the same value at this information field will
be grouped into a subpopulation. The number of resulting
subpopulations is determined by the number of distinct values at
this information field. If parameter randomize is True (default),
individuals will be randomized before a subpopulation is split.
This is designed to remove artificial order of individuals
introduced by, for example, some non- random mating schemes. Note
that, however, the original individual order is not guaranteed
even if this parameter is set to False. Unless the last
subpopulation is split, the indexes of existing subpopulations
will be changed. If a subpopulation has a name, this name will
become the name for all subpopulations separated from this
subpopulation. Optionally, you can assign names to the new
subpopulations using a list of names specified in parameter names.
Because the same set of names will be used for all subpopulations,
this parameter is not recommended when multiple subpopulations are
split. Please refer to operator BaseOperator for a detailed
explanation for all parameters.
Note:
Unlike operator Migrator, this operator does not require an
information field such as migrate_to.
"""
_simuPOP_op.SplitSubPops_swiginit(self, _simuPOP_op.new_SplitSubPops(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_SplitSubPops
SplitSubPops_swigregister = _simuPOP_op.SplitSubPops_swigregister
SplitSubPops_swigregister(SplitSubPops)
class MergeSubPops(BaseOperator):
"""
Details:
This operator merges subpopulations subPops to a single
subpopulation. If subPops is ignored, all subpopulations will be
merged. Virtual subpopulations are not allowed in subPops.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
MergeSubPops(subPops=ALL_AVAIL, name="", begin=0, end=-1,
step=1, at=[], reps=ALL_AVAIL, infoFields=[])
Details:
Create an operator that merges subpopulations subPops to a single
subpopulation. If subPops is not given, all subpopulations will be
merged. The merged subpopulation will take the name of the first
subpopulation being merged unless a new name is given. Please
refer to operator BaseOperator for a detailed explanation for all
parameters.
"""
_simuPOP_op.MergeSubPops_swiginit(self, _simuPOP_op.new_MergeSubPops(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_MergeSubPops
MergeSubPops_swigregister = _simuPOP_op.MergeSubPops_swigregister
MergeSubPops_swigregister(MergeSubPops)
class ResizeSubPops(BaseOperator):
"""
Details:
This operator resizes subpopulations to specified sizes.
individuals are added or removed depending on the new
subpopulation sizes.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
ResizeSubPops(subPops=ALL_AVAIL, sizes=[], proportions=[],
propagate=True, begin=0, end=-1, step=1, at=[], reps=ALL_AVAIL,
infoFields=[])
Details:
Resize given subpopulations subPops to new sizes size, or sizes
proportional to original sizes (parameter proportions). All
subpopulations will be resized if subPops is not specified. If the
new size of a subpopulation is smaller than its original size,
extra individuals will be removed. If the new size is larger, new
individuals with empty genotype will be inserted, unless parameter
propagate is set to True (default). In this case, existing
individuals will be copied sequentially, and repeatedly if needed.
Please refer to operator BaseOperator for a detailed explanation
for all parameters.
"""
_simuPOP_op.ResizeSubPops_swiginit(self, _simuPOP_op.new_ResizeSubPops(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_ResizeSubPops
ResizeSubPops_swigregister = _simuPOP_op.ResizeSubPops_swigregister
ResizeSubPops_swigregister(ResizeSubPops)
class BaseMutator(BaseOperator):
"""
Details:
Class mutator is the base class of all mutators. It handles all
the work of picking an allele at specified loci from certain
(virtual) subpopulation with certain probability, and calling a
derived mutator to mutate the allele. Alleles can be changed
before and after mutation if existing allele numbers do not match
those of a mutation model.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
BaseMutator(rates=[], loci=ALL_AVAIL, mapIn=[], mapOut=[],
context=0, output="", begin=0, end=-1, step=1, at=[],
reps=ALL_AVAIL, subPops=ALL_AVAIL, infoFields="ind_id",
lineageMode=FROM_INFO)
Details:
A mutator mutates alleles from one state to another with given
probability. This base mutator does not perform any mutation but
it defines common behaviors of all mutators. By default, a
mutator mutates all alleles in all populations of a simulator at
all generations. A number of parameters can be used to restrict
mutations to certain generations (parameters begin, end, step and
at), replicate populations (parameter rep), (virtual)
subpopulations (parameter subPops) and loci (parameter loci).
Parameter loci can be a list of loci indexes, names, list of
chromosome position pairs, ALL_AVAIL, or a function with optional
parameter pop that will be called at each ganeeration to determine
indexes of loci. Please refer to class BaseOperator for a detailed
explanation of these parameters. Parameter rate or its
equivalence specifies the probability that a a mutation event
happens. The exact form and meaning of rate is mutator-specific.
If a single rate is specified, it will be applied to all loci. If
a list of mutation rates are given, they will be applied to each
locus specified in parameter loci. Note that not all mutators
allow specification of multiple mutation rate, especially when the
mutation rate itself is a list or matrix. Alleles at a locus are
non-negative numbers 0, 1, ... up to the maximum allowed allele
for the loaded module (1 for binary, 255 for short and 65535 for
long modules). Whereas some general mutation models treat alleles
as numbers, other models assume specific interpretation of
alleles. For example, an AcgtMutator assumes alleles 0, 1, 2 and 3
as nucleotides A, C, G and T. Using a mutator that is incompatible
with your simulation will certainly yield erroneous results. If
your simulation assumes different alleles with a mutation model,
you can map an allele to the allele used in the model and map the
mutated allele back. This is achieved using a mapIn list with its
i-th item being the corresponding allele of real allele i, and a
mapOut list with its i-th item being the real allele of allele i
assumed in the model. For example mapIn=[0, 0, 1] and mapOut=[1,
2] would allow the use of a SNPMutator to mutate between alleles 1
and 2, instead of 0 and 1. Parameters mapIn and mapOut also accept
a user-defined Python function that returns a corresponding allele
for a given allele. This allows easier mapping between a large
number of alleles and advanced models such as random emission of
alleles. If a valid information field is specified for parameter
infoFields (default to ind_id) for modules with lineage allele
type, the lineage of the mutated alleles will be the ID (stored in
the first field of infoFields) of individuals that harbor the
mutated alleles if lineageMode is set to FROM_INFO (default). If
lineageMode is set to FROM_INFO_SIGNED, the IDs will be assigned a
sign depending on the ploidy the mutation happens (1 for ploidy 0,
-1 for ploidy 1, etc). The lineage information will be transmitted
along with the alleles so this feature allows you to track the
source of mutants during evolution.A A mutator by default does
not produce any output. However, if an non-empty output is
specified, the operator will output generation number, locus,
ploidy, original allele, mutant, and values of all information
field specified by parameter infoFields (e.g. individual ID if
ind_id is specified). Some mutation models are context dependent.
Namely, how an allele mutates will depend on its adjecent alleles.
Whereas most simuPOP mutators are context independent, some of
them accept a parameter context which is the number of alleles to
the left and right of the mutated allele. For example context=1
will make the alleles to the immediate left and right to a mutated
allele available to a mutator. These alleles will be mapped in if
parameter mapIn is defined. How exactly a mutator makes use of
these information is mutator dependent.
"""
_simuPOP_op.BaseMutator_swiginit(self, _simuPOP_op.new_BaseMutator(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_BaseMutator
BaseMutator_swigregister = _simuPOP_op.BaseMutator_swigregister
BaseMutator_swigregister(BaseMutator)
class MatrixMutator(BaseMutator):
"""
Details:
A matrix mutator mutates alleles 0, 1, ..., n-1 using a n by n
matrix, which specifies the probability at which each allele
mutates to another. Conceptually speaking, this mutator goes
through all mutable allele and mutate it to another state
according to probabilities in the corresponding row of the rate
matrix. Only one mutation rate matrix can be specified which will
be used for all specified loci. #
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
MatrixMutator(rate, loci=ALL_AVAIL, mapIn=[], mapOut=[],
output="", begin=0, end=-1, step=1, at=[], reps=ALL_AVAIL,
subPops=ALL_AVAIL, infoFields="ind_id", lineageMode=FROM_INFO)
Details:
Create a mutator that mutates alleles 0, 1, ..., n-1 using a n by
n matrix rate. Item (i,j) of this matrix specifies the probability
at which allele i mutates to allele j. Diagnal items (i, i) are
ignored because they are automatically determined by other
probabilities. Only one mutation rate matrix can be specified
which will be used for all loci in the applied population, or loci
specified by parameter loci. If alleles other than 0, 1, ..., n-1
exist in the population, they will not be mutated although a
warning message will be given if debugging code DBG_WARNING is
turned on. Please refer to classes mutator and BaseOperator for
detailed explanation of other parameters.
"""
_simuPOP_op.MatrixMutator_swiginit(self, _simuPOP_op.new_MatrixMutator(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_MatrixMutator
MatrixMutator_swigregister = _simuPOP_op.MatrixMutator_swigregister
MatrixMutator_swigregister(MatrixMutator)
class KAlleleMutator(BaseMutator):
"""
Details:
This mutator implements a k-allele mutation model that assumes k
allelic states (alleles 0, 1, 2, ..., k-1) at each locus. When a
mutation event happens, it mutates an allele to any other states
with equal probability.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
KAlleleMutator(k, rates=[], loci=ALL_AVAIL, mapIn=[], mapOut=[],
output="", begin=0, end=-1, step=1, at=[], reps=ALL_AVAIL,
subPops=ALL_AVAIL, infoFields="ind_id", lineageMode=FROM_INFO)
Details:
Create a k-allele mutator that mutates alleles to one of the other
k-1 alleles with equal probability. This mutator by default
applies to all loci unless parameter loci is specified. A single
mutation rate will be used for all loci if a single value of
parameter rates is given. Otherwise, a list of mutation rates can
be specified for each locus in parameter loci. If the mutated
allele is larger than or equal to k, it will not be mutated. A
warning message will be displayed if debugging code DBG_WARNING is
turned on. Please refer to classes mutator and BaseOperator for
descriptions of other parameters.
"""
_simuPOP_op.KAlleleMutator_swiginit(self, _simuPOP_op.new_KAlleleMutator(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_KAlleleMutator
KAlleleMutator_swigregister = _simuPOP_op.KAlleleMutator_swigregister
KAlleleMutator_swigregister(KAlleleMutator)
class StepwiseMutator(BaseMutator):
"""
Details:
A stepwise mutation model treats alleles at a locus as the number
of tandem repeats of microsatellite or minisatellite markers. When
a mutation event happens, the number of repeats (allele) either
increase or decrease. A standard stepwise mutation model increases
of decreases an allele by 1 with equal probability. More complex
models (generalized stepwise mutation model) are also allowed.
Note that an allele cannot be mutated beyond boundaries (0 and
maximum allowed allele).
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
StepwiseMutator(rates=[], loci=ALL_AVAIL, incProb=0.5,
maxAllele=0, mutStep=[], mapIn=[], mapOut=[], output="",
begin=0, end=-1, step=1, at=[], reps=ALL_AVAIL,
subPops=ALL_AVAIL, infoFields="ind_id", lineageMode=FROM_INFO)
Details:
Create a stepwise mutation mutator that mutates an allele by
increasing or decreasing it. This mutator by default applies to
all loci unless parameter loci is specified. A single mutation
rate will be used for all loci if a single value of parameter
rates is given. Otherwise, a list of mutation rates can be
specified for each locus in parameter loci. When a mutation event
happens, this operator increases or decreases an allele by mutStep
steps. Acceptable input of parameter mutStep include
* A number: This is the default mode with default value 1.
* (GEOMETRIC_DISTRIBUTION, p): The number of steps follows a a
geometric distribution with parameter p.
* A Python function: This user defined function accepts the
allele being mutated and return the steps to mutate. The mutation
process is usually neutral in the sense that mutating up and down
is equally likely. You can adjust parameter incProb to change this
behavior. If you need to use other generalized stepwise mutation
models, you can implement them using a PyMutator. If performance
becomes a concern, I may add them to this operator if provided
with a reliable reference.
"""
_simuPOP_op.StepwiseMutator_swiginit(self, _simuPOP_op.new_StepwiseMutator(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_StepwiseMutator
StepwiseMutator_swigregister = _simuPOP_op.StepwiseMutator_swigregister
StepwiseMutator_swigregister(StepwiseMutator)
class PyMutator(BaseMutator):
"""
Details:
This hybrid mutator accepts a Python function that determines how
to mutate an allele when an mutation event happens.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
PyMutator(rates=[], loci=ALL_AVAIL, func=None, context=0,
mapIn=[], mapOut=[], output="", begin=0, end=-1, step=1, at=[],
reps=ALL_AVAIL, subPops=ALL_AVAIL, infoFields="ind_id",
lineageMode=FROM_INFO)
Details:
Create a hybrid mutator that uses a user-provided function to
mutate an allele when a mutation event happens. This function
(parameter func) accepts the allele to be mutated as parameter
allele, locus index locus, and optional array of alleles as
parameter context, which are context alleles the left and right of
the mutated allele. Invalid context alleles (e.g. left allele to
the first locus of a chromosome) will be marked by -1. The return
value of this function will be used to mutate the passed allele.
The passed, returned and context alleles might be altered if
parameter mapIn and mapOut are used. This mutator by default
applies to all loci unless parameter loci is specified. A single
mutation rate will be used for all loci if a single value of
parameter rates is given. Otherwise, a list of mutation rates can
be specified for each locus in parameter loci. Please refer to
classes mutator and BaseOperator for descriptions of other
parameters.
"""
_simuPOP_op.PyMutator_swiginit(self, _simuPOP_op.new_PyMutator(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_PyMutator
PyMutator_swigregister = _simuPOP_op.PyMutator_swigregister
PyMutator_swigregister(PyMutator)
class MixedMutator(BaseMutator):
"""
Details:
This mixed mutator accepts a list of mutators and use one of them
to mutate an allele when an mutation event happens.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
MixedMutator(rates=[], loci=ALL_AVAIL, mutators=[], prob=[],
mapIn=[], mapOut=[], context=0, output="", begin=0, end=-1,
step=1, at=[], reps=ALL_AVAIL, subPops=ALL_AVAIL,
infoFields="ind_id", lineageMode=FROM_INFO)
Details:
Create a mutator that randomly chooses one of the specified
mutators to mutate an allele when a mutation event happens. The
mutators are choosen according to a list of probabilities ( prob)
that should add up to 1. The passed and returned alleles might be
changed if parameters mapIn and mapOut are used. Most parameters,
including loci, mapIn, mapOut, rep, and subPops of mutators
specified in parameter mutators are ignored. This mutator by
default applies to all loci unless parameter loci is specified.
Please refer to classes mutator and BaseOperator for descriptions
of other parameters.
"""
_simuPOP_op.MixedMutator_swiginit(self, _simuPOP_op.new_MixedMutator(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_MixedMutator
MixedMutator_swigregister = _simuPOP_op.MixedMutator_swigregister
MixedMutator_swigregister(MixedMutator)
class ContextMutator(BaseMutator):
"""
Details:
This context-dependent mutator accepts a list of mutators and use
one of them to mutate an allele depending on the context of the
mutated allele.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
ContextMutator(rates=[], loci=ALL_AVAIL, mutators=[],
contexts=[], mapIn=[], mapOut=[], output="", begin=0, end=-1,
step=1, at=[], reps=ALL_AVAIL, subPops=ALL_AVAIL,
infoFields="ind_id", lineageMode=FROM_INFO)
Details:
Create a mutator that choose one of the specified mutators to
mutate an allele when a mutation event happens. The mutators are
choosen according to the context of the mutated allele, which is
specified as a list of alleles to the left and right of an allele
( contexts). For example, contexts=[(0,0), (0,1), (1,1)] indicates
which mutators should be used to mutate allele X in the context of
0X0, 0X1, and 1X1. A context can include more than one alleles at
both left and right sides of a mutated allele but all contexts
should have the same (even) number of alleles. If an allele does
not have full context (e.g. when a locus is the first locus on a
chromosome), unavailable alleles will be marked as -1. There
should be a mutator for each context but an additional mutator can
be specified as the default mutator for unmatched contexts. If
parameters mapIn is specified, both mutated allele and its context
alleles will be mapped. Most parameters, including loci, mapIn,
mapOut, rep, and subPops of mutators specified in parameter
mutators are ignored. This mutator by default applies to all loci
unless parameter loci is specified. Please refer to classes
mutator and BaseOperator for descriptions of other parameters.
"""
_simuPOP_op.ContextMutator_swiginit(self, _simuPOP_op.new_ContextMutator(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_ContextMutator
ContextMutator_swigregister = _simuPOP_op.ContextMutator_swigregister
ContextMutator_swigregister(ContextMutator)
class PointMutator(BaseOperator):
"""
Details:
A point mutator is different from all other mutators because
mutations in this mutator do not happen randomly. Instead, it
happens to specific loci and mutate an allele to a specific state,
regardless of its original state. This mutator is usually used to
introduce a mutant to a population.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
PointMutator(loci, allele, ploidy=[], 0, inds=[], output="",
begin=0, end=-1, step=1, at=[], reps=ALL_AVAIL, subPops=0,
infoFields="ind_id", lineageMode=FROM_INFO)
Details:
Create a point mutator that mutates alleles at specified loci to a
given allele of individuals inds. If there are multiple alleles at
a locus (e.g. individuals in a diploid population), only the first
allele is mutated unless indexes of alleles are listed in
parameter ploidy. This operator is by default applied to
individuals in the first subpopulation but you can apply it to a
different or more than one (virtual) subpopulations using
parameter subPops (AllAvail is also accepted). Please refer to
class BaseOperator for detailed descriptions of other parameters.
"""
_simuPOP_op.PointMutator_swiginit(self, _simuPOP_op.new_PointMutator(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_PointMutator
PointMutator_swigregister = _simuPOP_op.PointMutator_swigregister
PointMutator_swigregister(PointMutator)
class RevertFixedSites(BaseOperator):
"""
Details:
This operator checks all or specified loci of a population and
revert all mutants at loci to wildtype alleles if they are fixed
in the population. If a list of (virtual) subpopulations are
specified, alleles are reverted if they are fixed in each
subpopulation, regardless if the alleles are fixed in other
subpopulations.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
RevertFixedSites(loci=ALL_AVAIL, output="", begin=0, end=-1,
step=1, at=[], reps=ALL_AVAIL, subPops=ALL_AVAIL,
infoFields="ind_id")
Details:
Create an operator to set all alleles to zero at specified
(parameter loci) or all loci if they are fixed (having one non-
zero allele) at these loci. If parameter subPops are specified,
only individuals in these subpopulations are considered.
"""
_simuPOP_op.RevertFixedSites_swiginit(self, _simuPOP_op.new_RevertFixedSites(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_RevertFixedSites
RevertFixedSites_swigregister = _simuPOP_op.RevertFixedSites_swigregister
RevertFixedSites_swigregister(RevertFixedSites)
class FiniteSitesMutator(BaseOperator):
"""
Details:
This is an infite site mutation model in mutational space. The
alleles in the population is assumed to be locations of mutants. A
mutation rate is given that mutate alleles in 'regions'. If number
of mutants for an individual exceed the number of loci, 10 loci
will be added to everyone in the population.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
FiniteSitesMutator(rate, ranges, model=1, output="", begin=0,
end=-1, step=1, at=[], reps=ALL_AVAIL, subPops=ALL_AVAIL,
infoFields="ind_id", lineageMode=FROM_INFO)
Details:
This operator accepts a list of ranges which is the 'real range'
of each chromosome. Mutation happens with muation rate rate and
mutants will be recorded to the population (instead of alleles).
By default, this mutator assumes an finite-allele model where all
mutations are allowed and if a mutant (allele 1) is mutated, it
will be mutated to allele 0 (back mutation). Alternatively (model
= 2), an infinite-sites mutation model can be used where mutations
can happen only at a new locus. Mutations happen at a locus with
existing mutants will be moved to a random locus without existing
mutant. A warning message will be printed if there is no vacant
locus available. If a valid output is given, mutants will be
outputted in the format of "gen mutant ind type" where type is 0
for forward (0->1), 1 for backward (1->0), 2 for relocated
mutations, and 3 for ignored mutation because no vacent locus is
available. The second mode has the advantage that all mutants in
the simulated population can be traced to a single mutation event.
If the regions are reasonably wide and mutation rates are low,
these two mutation models should yield similar results.
"""
_simuPOP_op.FiniteSitesMutator_swiginit(self, _simuPOP_op.new_FiniteSitesMutator(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_FiniteSitesMutator
FiniteSitesMutator_swigregister = _simuPOP_op.FiniteSitesMutator_swigregister
FiniteSitesMutator_swigregister(FiniteSitesMutator)
class GenoTransmitter(BaseOperator):
"""
Details:
This during mating operator is the base class of all genotype
transmitters. It is made available to users because it provides a
few member functions that can be used by derived transmitters, and
by customized Python during mating operators.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
GenoTransmitter(output="", begin=0, end=-1, step=1, at=[],
reps=ALL_AVAIL, subPops=ALL_AVAIL, infoFields=[])
Details:
Create a base genotype transmitter.
"""
_simuPOP_op.GenoTransmitter_swiginit(self, _simuPOP_op.new_GenoTransmitter(*args, **kwargs))
def clearChromosome(self, ind: 'Individual', ploidy: 'int', chrom: 'size_t') -> "void":
"""
Usage:
x.clearChromosome(ind, ploidy, chrom)
Details:
Clear (set alleles to zero) chromosome chrom on the ploidy-th
homologous set of chromosomes of individual ind. It is equivalent
to ind.setGenotype([0], ploidy, chrom), except that it also clears
allele lineage if it is executed in a module with lineage allele
type.
"""
return _simuPOP_op.GenoTransmitter_clearChromosome(self, ind, ploidy, chrom)
def copyChromosome(self, parent: 'Individual', parPloidy: 'int', offspring: 'Individual', ploidy: 'int', chrom: 'size_t') -> "void":
"""
Usage:
x.copyChromosome(parent, parPloidy, offspring, ploidy, chrom)
Details:
Transmit chromosome chrom on the parPloidy set of homologous
chromosomes from parent to the ploidy set of homologous
chromosomes of offspring. It is equivalent to
offspring.setGenotype(parent.genotype(parPloidy, chrom), polidy,
chrom), except that it also copies allelic lineage when it is
executed in a module with lineage allele type.
"""
return _simuPOP_op.GenoTransmitter_copyChromosome(self, parent, parPloidy, offspring, ploidy, chrom)
def copyChromosomes(self, parent: 'Individual', parPloidy: 'int', offspring: 'Individual', ploidy: 'int') -> "void":
"""
Usage:
x.copyChromosomes(parent, parPloidy, offspring, ploidy)
Details:
Transmit the parPloidy set of homologous chromosomes from parent
to the ploidy set of homologous chromosomes of offspring.
Customized chromosomes are not copied. It is equivalent to
offspring.setGenotype(parent.genotype(parPloidy), ploidy), except
that it also copies allelic lineage when it is executed in a
module with lineage allele type.
"""
return _simuPOP_op.GenoTransmitter_copyChromosomes(self, parent, parPloidy, offspring, ploidy)
__swig_destroy__ = _simuPOP_op.delete_GenoTransmitter
GenoTransmitter.clearChromosome = new_instancemethod(_simuPOP_op.GenoTransmitter_clearChromosome, None, GenoTransmitter)
GenoTransmitter.copyChromosome = new_instancemethod(_simuPOP_op.GenoTransmitter_copyChromosome, None, GenoTransmitter)
GenoTransmitter.copyChromosomes = new_instancemethod(_simuPOP_op.GenoTransmitter_copyChromosomes, None, GenoTransmitter)
GenoTransmitter_swigregister = _simuPOP_op.GenoTransmitter_swigregister
GenoTransmitter_swigregister(GenoTransmitter)
class CloneGenoTransmitter(GenoTransmitter):
"""
Details:
This during mating operator copies parental genotype directly to
offspring. This operator works for all mating schemes when one or
two parents are involved. If both parents are passed, maternal
genotype are copied. In addition to genotypes on all non-
customized or specified chromosomes, sex and information fields
are by default also coped copied from parent to offspring.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
CloneGenoTransmitter(output="", chroms=ALL_AVAIL, begin=0,
end=-1, step=1, at=[], reps=ALL_AVAIL, subPops=ALL_AVAIL,
infoFields=ALL_AVAIL)
Details:
Create a clone genotype transmitter (a during-mating operator)
that copies genotypes from parents to offspring. If two parents
are specified, genotypes are copied maternally. After genotype
transmission, offspring sex and affection status is copied from
the parent even if sex has been determined by an offspring
generator. All or specified information fields (parameter
infoFields, default to ALL_AVAIL) will also be copied from parent
to offspring. Parameters subPops is ignored. This operator by
default copies genotypes on all autosome and sex chromosomes
(excluding customized chromosomes), unless a parameter chroms is
used to specify which chromosomes to copy. This operator also
copies allelic lineage when it is executed in a module with
lineage allele type.
"""
_simuPOP_op.CloneGenoTransmitter_swiginit(self, _simuPOP_op.new_CloneGenoTransmitter(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_CloneGenoTransmitter
CloneGenoTransmitter_swigregister = _simuPOP_op.CloneGenoTransmitter_swigregister
CloneGenoTransmitter_swigregister(CloneGenoTransmitter)
class MendelianGenoTransmitter(GenoTransmitter):
"""
Details:
This Mendelian offspring generator accepts two parents and pass
their genotypes to an offspring following Mendel's laws. Sex
chromosomes are handled according to the sex of the offspring,
which is usually determined in advance by an offspring generator.
Customized chromosomes are not handled.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
MendelianGenoTransmitter(output="", begin=0, end=-1, step=1,
at=[], reps=ALL_AVAIL, subPops=ALL_AVAIL, infoFields=[])
Details:
Create a Mendelian genotype transmitter (a during-mating operator)
that transmits genotypes from parents to offspring following
Mendel's laws. Autosomes and sex chromosomes are handled but
customized chromosomes are ignored. Parameters subPops and
infoFields are ignored. This operator also copies allelic lineage
when it is executed in a module with lineage allele type.
"""
_simuPOP_op.MendelianGenoTransmitter_swiginit(self, _simuPOP_op.new_MendelianGenoTransmitter(*args, **kwargs))
def transmitGenotype(self, parent: 'Individual', offspring: 'Individual', ploidy: 'int') -> "void":
"""
Usage:
x.transmitGenotype(parent, offspring, ploidy)
Details:
Transmit genotype from parent to offspring, and fill the ploidy
homologous set of chromosomes. This function does not set
genotypes of customized chromosomes and handles sex chromosomes
properly, according to offspring sex and ploidy.
"""
return _simuPOP_op.MendelianGenoTransmitter_transmitGenotype(self, parent, offspring, ploidy)
__swig_destroy__ = _simuPOP_op.delete_MendelianGenoTransmitter
MendelianGenoTransmitter.transmitGenotype = new_instancemethod(_simuPOP_op.MendelianGenoTransmitter_transmitGenotype, None, MendelianGenoTransmitter)
MendelianGenoTransmitter_swigregister = _simuPOP_op.MendelianGenoTransmitter_swigregister
MendelianGenoTransmitter_swigregister(MendelianGenoTransmitter)
class SelfingGenoTransmitter(MendelianGenoTransmitter):
"""
Details:
A genotype transmitter (during-mating operator) that transmits
parental genotype of a parent through self-fertilization. That is
to say, the offspring genotype is formed according to Mendel's
laws, only that a parent serves as both maternal and paternal
parents.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
SelfingGenoTransmitter(output="", begin=0, end=-1, step=1,
at=[], reps=ALL_AVAIL, subPops=ALL_AVAIL, infoFields=[])
Details:
Create a self-fertilization genotype transmitter that transmits
genotypes of a parent to an offspring through self-fertilization.
Cutsomized chromosomes are not handled. Parameters subPops and
infoFields are ignored. This operator also copies allelic lineage
when it is executed in a module with lineage allele type.
"""
_simuPOP_op.SelfingGenoTransmitter_swiginit(self, _simuPOP_op.new_SelfingGenoTransmitter(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_SelfingGenoTransmitter
SelfingGenoTransmitter_swigregister = _simuPOP_op.SelfingGenoTransmitter_swigregister
SelfingGenoTransmitter_swigregister(SelfingGenoTransmitter)
class HaplodiploidGenoTransmitter(MendelianGenoTransmitter):
"""
Details:
A genotype transmitter (during-mating operator) for haplodiploid
populations. The female parent is considered as diploid and the
male parent is considered as haploid (only the first homologous
copy is valid). If the offspring is FEMALE, she will get a random
copy of two homologous chromosomes of her mother, and get the only
paternal copy from her father. If the offspring is MALE, he will
only get a set of chromosomes from his mother.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
HaplodiploidGenoTransmitter(output="", begin=0, end=-1, step=1,
at=[], reps=ALL_AVAIL, subPops=ALL_AVAIL, infoFields=[])
Details:
Create a haplodiploid genotype transmitter (during-mating
operator) that transmit parental genotypes from parents to
offspring in a haplodiploid population. Parameters subPops and
infoFields are ignored. This operator also copies allelic lineage
when it is executed in a module with lineage allele type.
"""
_simuPOP_op.HaplodiploidGenoTransmitter_swiginit(self, _simuPOP_op.new_HaplodiploidGenoTransmitter(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_HaplodiploidGenoTransmitter
HaplodiploidGenoTransmitter_swigregister = _simuPOP_op.HaplodiploidGenoTransmitter_swigregister
HaplodiploidGenoTransmitter_swigregister(HaplodiploidGenoTransmitter)
class MitochondrialGenoTransmitter(GenoTransmitter):
"""
Details:
This geno transmitter transmits the first homologous copy of a
Mitochondrial chromosome. If no mitochondrial chromosome is
present, it assumes that the first homologous copy of several (or
all) Customized chromosomes are copies of mitochondrial
chromosomes. This operator transmits the mitochondrial chromosome
from the female parent to offspring for sexsual reproduction, and
any parent to offspring for asexual reproduction. If there are
multiple chromosomes, the organelles are selected randomly. If
this transmitter is applied to populations with more than one
homologous copies of chromosomes, it transmits the first
homologous copy of chromosomes and clears alleles (set to zero) on
other homologous copies.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
MitochondrialGenoTransmitter(output="", chroms=ALL_AVAIL,
begin=0, end=-1, step=1, at=[], reps=ALL_AVAIL,
subPops=ALL_AVAIL, infoFields=[])
Details:
Createa a mitochondrial genotype transmitter that treats the
Mitochondiral chromosome, or Customized chromosomes if no
Mitochondrial chromosome is specified, or a list of chromosomes
specified by chroms, as human mitochondrial chromosomes. These
chromosomes should have the same length and the same number of
loci. This operator transmits these chromosomes randomly from the
female parent to offspring of both sexes. It also copies allelic
lineage when it is executed in a module with lineage allele type.
"""
_simuPOP_op.MitochondrialGenoTransmitter_swiginit(self, _simuPOP_op.new_MitochondrialGenoTransmitter(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_MitochondrialGenoTransmitter
MitochondrialGenoTransmitter_swigregister = _simuPOP_op.MitochondrialGenoTransmitter_swigregister
MitochondrialGenoTransmitter_swigregister(MitochondrialGenoTransmitter)
class Recombinator(GenoTransmitter):
"""
Details:
A genotype transmitter (during-mating operator) that transmits
parental chromosomes to offspring, subject to recombination and
gene conversion. This can be used to replace
MendelianGenoTransmitter and SelfingGenoTransmitter. It does not
work in haplodiploid populations, although a customized genotype
transmitter that makes uses this operator could be defined. Please
refer to the simuPOP user's guide or online cookbook for details.
Recombination could be applied to all adjacent markers or after
specified loci. Recombination rate between two adjacent markers
could be specified directly, or calculated using physical distance
between them. In the latter case, a recombination intensity is
multiplied by physical distance between markers. Gene conversion
is interpreted as double-recombination events. That is to say, if
a recombination event happens, it has a certain probability (can
be 1) to become a conversion event, namely triggering another
recombination event down the chromosome. The length of the
converted chromosome can be controlled in a number of ways.
Note:
simuPOP does not assume any unit to loci positions so
recombination intensity could be explained differntly (e.g. cM/Mb,
Morgan/Mb) depending on your intepretation of loci positions. For
example, if basepair is used for loci position, intensity=10^-8
indicates 10^-8 per basepair, which is equivalent to 10^-2 per Mb
or 1 cM/Mb. If Mb is used for physical positions, the same
recombination intensity could be achieved by intensity=0.01.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
Recombinator(rates=[], intensity=-1, loci=ALL_AVAIL,
convMode=NO_CONVERSION, output="", begin=0, end=-1, step=1,
at=[], reps=ALL_AVAIL, subPops=ALL_AVAIL, infoFields=[])
Details:
Create a Recombinator (a mendelian genotype transmitter with
recombination and gene conversion) that passes genotypes from
parents (or a parent in case of self-fertilization) to offspring.
Recombination happens by default between all adjacent markers but
can be limited to a given set of loci, which can be a list of loci
indexes, names, list of chromosome position pairs, ALL_AVAIL, or a
function with optional parameter pop that will be called at each
ganeeration to determine indexes of loci. Each locus in this list
specifies a recombination point between the locus and the locus
immediately after it. Loci that are the last locus on each
chromosome are ignored. If a single recombination rate (parameter
rates) is specified, it will used for all loci (all loci or loci
specified by parameter loci), regardless of physical distances
between adjacent loci. If a list of recombination rates are
specified in rates, different recombination rates could be applied
after a list of specified loci (between loci and their immediate
neighbor to the right). The loci should be specified by parameter
loci as a list with the same length as rates, or ALL_AVAIL
(default) in which case the length of rates should equal to the
total number of loci. Note that recombination rates specified for
the last locus on each chromosome are ignored because simuPOP
assumes free recombination between chromosomes. A recombination
intensity (intensity) can be used to specify recombination rates
that are proportional to physical distances between adjacent
markers. If the physical distance between two markers is d, the
recombination rate between them will be intensity * d. No unit is
assume for loci position and recombination intensity. Gene
conversion is controlled using parameter convMode, which can be
* NoConversion: no gene conversion (default).
* (NUM_MARKERS, prob, n): With probability prob, convert a fixed
number (n) of markers if a recombination event happens.
* (GEOMETRIC_DISTRIBUTION, prob, p): With probability prob,
convert a random number of markers if a recombination event
happens. The number of markes converted follows a geometric
distribution with probability p.
* (TRACT_LENGTH, prob, n): With probability prob, convert a
region of fixed tract length (n) if a recombination event happens.
The actual number of markers converted depends on loci positions
of surrounding loci. The starting position of this tract is the
middle of two adjacent markers. For example, if four loci are
located at 0, 1, 2, 3 respectively, a conversion event happens
between 0 and 1, with a tract length 2 will start at 0.5 and end
at 2.5, covering the second and third loci.
* (EXPONENTIAL_DISTRIBUTION, prob, p): With probability prob,
convert a region of random tract length if a recombination event
happens. The distribution of tract length follows a exponential
distribution with probability p. The actual number of markers
converted depends on loci positions of surrounding loci. simuPOP
uses this probabilistic model of gene conversion because when a
recombination event happens, it may become a recombination event
if the if the Holliday junction is resolved/repaired successfully,
or a conversion event if the junction is not resolved/repaired.
The probability, however, is more commonly denoted by the ratio of
conversion to recombination events in the literature. This ratio
varies greatly from study to study, ranging from 0.1 to 15 (Chen
et al, Nature Review Genetics, 2007). This translate to
0.1/0.9~0.1 to 15/16~0.94 of the gene conversion probability. A
Recombinator usually does not send any output. However, if an
information field is given (parameter infoFields), this operator
will treat this information field as an unique ID of parents and
offspring and output all recombination events in the format of
offspring_id parent_id starting_ploidy loc1 loc2 ... where
starting_ploidy indicates which homologous copy genotype
replication starts from (0 or 1), loc1, loc2 etc are loci after
which recombination events happens. If there are multiple
chromosomes on the genome, you will see a lot of (fake)
recombination events because of independent segregation of
chromosomes. Such a record will be generated for each set of
homologous chromosomes so an diploid offspring will have two lines
of output. Note that individual IDs need to be set (using a
IdTagger operator) before this Recombinator is applied. In
addition to genotypes, this operator also copies alleleic lineage
if it is executed in a module with lineage allele type.
Note:
There is no recombination between sex chromosomes (Chromosomes X
and Y), although recombination is possible between pesudoautosomal
regions on these chromosomes. If such a feature is required, you
will have to simulate the pesudoautosomal regions as separate
chromosomes.
"""
_simuPOP_op.Recombinator_swiginit(self, _simuPOP_op.new_Recombinator(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_Recombinator
def transmitGenotype(self, parent: 'Individual', offspring: 'Individual', ploidy: 'int') -> "void":
"""
Usage:
x.transmitGenotype(parent, offspring, ploidy)
Details:
This function transmits genotypes from a parent to the ploidy-th
homologous set of chromosomes of an offspring. It can be used, for
example, by a customized genotype transmitter to use sex-specific
recombination rates to transmit parental genotypes to offspring.
"""
return _simuPOP_op.Recombinator_transmitGenotype(self, parent, offspring, ploidy)
Recombinator.transmitGenotype = new_instancemethod(_simuPOP_op.Recombinator_transmitGenotype, None, Recombinator)
Recombinator_swigregister = _simuPOP_op.Recombinator_swigregister
Recombinator_swigregister(Recombinator)
class BaseSelector(BaseOperator):
"""
Details:
This class is the base class to all selectors, namely operators
that perform natural selection. It defines a common interface for
all selectors. A selector can be applied before mating or during
mating. If a selector is applied to one or more (virtual)
subpopulations of a parental population before mating, it sets
individual fitness values to all involved parents to an
information field (default to fitness). When a mating scheme that
supports natural selection is applied to the parental population,
it will select parents with probabilities that are proportional to
individual fitness stored in an information field (default to
fitness). Individual fitness is considered relative fitness and
can be any non-negative number. This simple process has some
implications that can lead to advanced usages of natural selection
in simuPOP:
* It is up to the mating scheme how to handle individual
fitness. Some mating schemes do not support natural selection at
all.
* A mating scheme performs natural selection according to
fitness values stored in an information field. It does not care
how these values are set. For example, fitness values can be
inherited from a parent using a tagging operator, or set directly
using a Python operator.
* A mating scheme can treat any information field as fitness
field. If an specified information field does not exist, or if all
individuals have the same fitness values (e.g. 0), the mating
scheme selects parents randomly.
* Multiple selectors can be applied to the same parental
generation. individual fitness is determined by the last fitness
value it is assigned.
* A selection operator can be applied to virtual subpopulations
and set fitness values only to part of the individuals.
* individuals with zero fitness in a subpopulation with anyone
having a positive fitness value will not be selected to produce
offspring. This can sometimes lead to unexpected behaviors. For
example, if you only assign fitness value to part of the
individuals in a subpopulation, the rest of them will be
effectively discarded. If you migrate individuals with valid
fitness values to a subpopulation with all individuals having zero
fitness, the migrants will be the only mating parents.
* It is possible to assign multiple fitness values to different
information fields so that different homogeneous mating schemes
can react to different fitness schemes when they are used in a
heterogeneous mating scheme.
* You can apply a selector to the offspring generation using the
postOps parameter of Simulator.evolve, these fitness values will
be used when the offspring generation becomes parental generation
in the next generation. Alternatively, a selector can be used as a
during mating operator. In this case, it caculates fitness value
for each offspring which will be treated as absolute fitness,
namely the probability for each offspring to survive. This process
uses the fact that an individual will be discarded when any of the
during mating operators returns False. It is important to remember
that:
* individual fitness needs to be between 0 and 1 in this case.
* Fitness values are not stored so the population does not need
an information field fitness.
* This method applies natural selection to offspring instead of
parents. These two implementation can be identical or different
depending on the mating scheme used.
* Seleting offspring is less efficient than the selecting
parents, especially when fitness values are low.
* Parameter subPops are applied to the offspring population and
is used to judge if an operator should be applied. It thus does
not make sense to apply a selector to a virtual subpopulation with
affected individuals.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
BaseSelector(output="", begin=0, end=-1, step=1, at=[],
reps=ALL_AVAIL, subPops=ALL_AVAIL, infoFields=ALL_AVAIL)
Details:
Create a base selector object. This operator should not be created
directly.
"""
_simuPOP_op.BaseSelector_swiginit(self, _simuPOP_op.new_BaseSelector(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_BaseSelector
BaseSelector_swigregister = _simuPOP_op.BaseSelector_swigregister
BaseSelector_swigregister(BaseSelector)
class MapSelector(BaseSelector):
"""
Details:
This selector assigns individual fitness values using a user-
specified dictionary. This operator can be applied to populations
with arbitrary number of homologous chromosomes.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
MapSelector(loci, fitness, begin=0, end=-1, step=1, at=[],
reps=ALL_AVAIL, subPops=ALL_AVAIL, infoFields=ALL_AVAIL)
Details:
Create a selector that assigns individual fitness values using a
dictionary fitness with genotype at loci as keys, and fitness as
values. Parameter loci can be a list of indexes, loci names, list
of chromosome position pairs, ALL_AVAIL, or a function with
optional parameter pop that will be called at each ganeeration to
determine indexes of loci. For each individual (parents if this
operator is applied before mating, and offspring if this operator
is applied during mating), genotypes at loci are collected one by
one (e.g. p0_loc0, p1_loc0, p0_loc1, p1_loc1... for a diploid
individual, with number of alleles varying for sex and
mitochondrial DNAs) and are looked up in the dictionary. If a
genotype cannot be found, it will be looked up again without phase
information (e.g. (1,0) will match key (0,1)). If the genotype
still can not be found, a ValueError will be raised. This operator
supports sex chromosomes and haplodiploid populations. In these
cases, only valid genotypes should be used to generator the
dictionary keys.
"""
_simuPOP_op.MapSelector_swiginit(self, _simuPOP_op.new_MapSelector(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_MapSelector
MapSelector_swigregister = _simuPOP_op.MapSelector_swigregister
MapSelector_swigregister(MapSelector)
class MaSelector(BaseSelector):
"""
Details:
This operator is called a 'multi-allele' selector because it
groups multiple alleles into two groups: wildtype and non-wildtype
alleles. Alleles in each allele group are assumed to have the same
effect on individual fitness. If we denote all wildtype alleles as
A, and all non-wildtype alleles a, this operator assign individual
fitness according to genotype AA, Aa, aa in the diploid case, and
A and a in the haploid case.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
MaSelector(loci, fitness, wildtype=0, begin=0, end=-1, step=1,
at=[], reps=ALL_AVAIL, subPops=ALL_AVAIL, infoFields=ALL_AVAIL)
Details:
Creates a multi-allele selector that groups multiple alleles into
a wildtype group (with alleles wildtype, default to [0]), and a
non-wildtype group. A list of fitness values is specified through
parameter fitness, for genotypes at one or more loci. Parameter
loci can be a list of indexes, loci names , list of chromosome
position pairs, ALL_AVAIL, or a function with optional parameter
pop that will be called at each ganeeration to determine indexes
of loci. If we denote wildtype alleles using capital letters A, B
... and non-wildtype alleles using small letters a, b ..., the
fitness values should be for
* genotypes A and a for the haploid single-locus case,
* genotypes AB, Ab, aB and bb for haploid two=locus cases,
* genotypes AA, Aa and aa for diploid single-locus cases,
* genotypes AABB, AABb, AAbb, AaBB, AaBb, Aabb, aaBB, aaBb, and
aabb for diploid two-locus cases,
* and in general 2**n for diploid and 3**n for haploid cases if
there are n loci. This operator does not support haplodiploid
populations, sex and mitochondrial chromosomes.
"""
_simuPOP_op.MaSelector_swiginit(self, _simuPOP_op.new_MaSelector(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_MaSelector
MaSelector_swigregister = _simuPOP_op.MaSelector_swigregister
MaSelector_swigregister(MaSelector)
class MlSelector(BaseSelector):
"""
Details:
This selector is created by a list of selectors. When it is
applied to an individual, it applies these selectors to the
individual, obtain a list of fitness values, and compute a
combined fitness value from them. ADDITIVE, multiplicative, and a
heterogeneour multi-locus model are supported.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
MlSelector(ops, mode=MULTIPLICATIVE, begin=0, end=-1, step=1,
at=[], reps=ALL_AVAIL, subPops=ALL_AVAIL, infoFields=ALL_AVAIL)
Details:
Create a multiple-locus selector from a list selection operator
selectors. When this operator is applied to an individual (parents
when used before mating and offspring when used during mating), it
applies these operators to the individual and obtain a list of
(usually single-locus) fitness values. These fitness values are
combined to a single fitness value using
* Prod(f_i), namely the product of individual fitness if mode =
MULTIPLICATIVE,
* 1-sum(1 - f_i) if mode = ADDITIVE,
* 1-Prod(1 - f_i) if mode = HETEROGENEITY, and
* exp(- sum(1 - f_i)) if mode = EXPONENTIAL, zero will be
returned if the combined fitness value is less than zero.
Applicability parameters (begin, end, step, at, reps, subPops)
could be used in both MlSelector and selectors in parameter ops,
but parameters in MlSelector will be interpreted first.
"""
_simuPOP_op.MlSelector_swiginit(self, _simuPOP_op.new_MlSelector(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_MlSelector
MlSelector_swigregister = _simuPOP_op.MlSelector_swigregister
MlSelector_swigregister(MlSelector)
class PySelector(BaseSelector):
"""
Details:
This selector assigns fitness values by calling a user provided
function. It accepts a list of loci (parameter loci) and a Python
function func which should be defined with one or more of
parameters geno, mut, gen, ind, pop or names of information
fields. Parameter loci can be a list of loci indexes, names, list
of chromosome position pairs, ALL_AVAIL, or a function with
optional parameter pop that will be called at each ganeeration to
determine indexes of loci. When this operator is applied to a
population, it passes genotypes or mutants at specified loci,
generation number, a reference to an individual, a reference to
the current population (usually used to retrieve population
variable), and values at specified information fields to
respective parameters of this function. Genotypes are passed as a
tuple of alleles arranged locus by locus (in the order of
A1,A2,B1,B2 for loci A and B). Mutants are passed as a default
dictionary of loci index (with respect to all genotype of
individuals, not just the first ploidy) and alleles. The returned
value will be used to determine the fitness of each individual.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
PySelector(func, loci=[], begin=0, end=-1, step=1, at=[],
reps=ALL_AVAIL, output="", subPops=ALL_AVAIL,
infoFields=ALL_AVAIL)
Details:
Create a Python hybrid selector that passes genotype at specified
loci, values at specified information fields (if requested) and a
generation number to a user-defined function func. The return
value will be treated as individual fitness.
"""
_simuPOP_op.PySelector_swiginit(self, _simuPOP_op.new_PySelector(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_PySelector
PySelector_swigregister = _simuPOP_op.PySelector_swigregister
PySelector_swigregister(PySelector)
class PyMlSelector(BaseSelector):
"""
Details:
This selector is a multi-locus Python selector that assigns
fitness to individuals by combining locus and genotype specific
fitness values. It differs from a PySelector in that the python
function is responsible for assigning fitness values for each
gentoype type at each locus, which can potentially be random, and
locus or gentoype-specific.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
PyMlSelector(func, mode=EXPONENTIAL, loci=ALL_AVAIL, output="",
begin=0, end=-1, step=1, at=[], reps=ALL_AVAIL,
subPops=ALL_AVAIL, infoFields=ALL_AVAIL)
Details:
Create a selector that assigns individual fitness values by
combining locus-specific fitness values that are determined by a
Python call-back function. The callback function accepts parameter
loc, alleles (both optional) and returns location- or genotype-
specific fitness values that can be constant or random. The
fitness values for each genotype will be cached so the same
fitness values will be assigned to genotypes with previously
assigned values. Note that a function that does not examine the
genotype naturally assumes a dominant model where genotypes with
one or two mutants have the same fitness effect. Because genotypes
at a locus are passed separately and in no particular order, this
function is also responsible for assigning consistent fitness
values for genotypes at the same locus (a class is usually used).
This operator currently ignores chromosome types so unused alleles
will be passed for loci on sex or mitochondrial chromosomes. It
also ignores phase of genotype so it will use the same fitness
value for genotype (a,b) and (b,a). Individual fitness will be
combined in ADDITIVE, MULTIPLICATIVE, HETEROGENEITY, or
EXPONENTIAL mode from fitness values of loci with at least one
non-zero allele (See MlSelector for details). If an output is
given, location, genotype, fitness and generation at which the new
genotype is assgined the value will be written to the output, in
the format of 'loc a1 a2 fitness gen' for loci on autosomes of
diploid populations.
"""
_simuPOP_op.PyMlSelector_swiginit(self, _simuPOP_op.new_PyMlSelector(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_PyMlSelector
PyMlSelector_swigregister = _simuPOP_op.PyMlSelector_swigregister
PyMlSelector_swigregister(PyMlSelector)
class BaseQuanTrait(BaseOperator):
"""
Details:
A quantitative trait in simuPOP is simply an information field. A
quantitative trait model simply assigns values to one or more
information fields (called trait fields) of each individual
according to its genetic (genotype) and environmental (information
field) factors. It can be applied at any stage of an evolutionary
cycle. If a quantitative trait operator is applied before or after
mating, it will set the trait fields of all parents and offspring.
If it is applied during mating, it will set the trait fields of
each offspring. When a quantitative trait operator is applied to
a population, it is only applied to the current generation. You
can, however, use parameter ancGen=-1 to set the trait field of
all ancestral generations, or a generation index to apply to only
ancestral generation younger than ancGen. Note that this parameter
is ignored if the operator is applied during mating.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
BaseQuanTrait(ancGens=UNSPECIFIED, begin=0, end=-1, step=1,
at=[], reps=ALL_AVAIL, subPops=ALL_AVAIL, infoFields=[])
Details:
Create a base quantitative trait operator. This operator assigns
one or more quantitative traits to trait fields in the present
generation (default). If ALL_AVAIL or a list of ancestral
generations are specified, this operator will be applied to
individuals in these generations as well. A quantitative trait
operator can be applied to specified (virtual) subpopulations
(parameter subPops) and replicates (parameter reps).
"""
_simuPOP_op.BaseQuanTrait_swiginit(self, _simuPOP_op.new_BaseQuanTrait(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_BaseQuanTrait
BaseQuanTrait_swigregister = _simuPOP_op.BaseQuanTrait_swigregister
BaseQuanTrait_swigregister(BaseQuanTrait)
class PyQuanTrait(BaseQuanTrait):
"""
Details:
This quantitative trait operator assigns a trait field by calling
a user provided function. It accepts a list of loci (parameter
loci), and a Python function func which should be defined with one
or more of parameters geno, mut, gen, ind, or names of information
fields. When this operator is applied to a population, it passes
genotypes or mutants (non-zero alleles) of each individual at
specified loci, generation number, a reference to an individual,
and values at specified information fields to respective
parameters of this function. Genotypes of each individual are
passed as a tuple of alleles arranged locus by locus (in the order
of A1,A2,B1,B2 for loci A and B). Mutants are passed as a default
dictionary of loci index (with respect to all genotype of
individuals, not just the first ploidy) and alleles. The return
values will be assigned to specified trait fields.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
PyQuanTrait(func, loci=[], ancGens=UNSPECIFIED, begin=0, end=-1,
step=1, at=[], reps=ALL_AVAIL, subPops=ALL_AVAIL, infoFields=[])
Details:
Create a Python hybrid quantitative trait operator that passes
genotype at specified loci, optional values at specified
information fields (if requested), and an optional generation
number to a user-defined function func. Parameter loci can be a
list of loci indexes, names, or ALL_AVAIL. The return value will
be assigned to specified trait fields (infoField). If only one
trait field is specified, a number or a sequence of one element is
acceptable. Otherwise, a sequence of values will be accepted and
be assigned to each trait field.
"""
_simuPOP_op.PyQuanTrait_swiginit(self, _simuPOP_op.new_PyQuanTrait(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_PyQuanTrait
PyQuanTrait_swigregister = _simuPOP_op.PyQuanTrait_swigregister
PyQuanTrait_swigregister(PyQuanTrait)
class BasePenetrance(BaseOperator):
"""
Details:
A penetrance model models the probability that an individual has a
certain disease provided that he or she has certain genetic
(genotype) and environmental (information field) riske factors. A
penetrance operator calculates this probability according to
provided information and set his or her affection status randomly.
For example, an individual will have probability 0.8 to be
affected if the penetrance is 0.8. This class is the base class to
all penetrance operators and defines a common interface for all
penetrance operators. A penetrance operator can be applied at any
stage of an evolutionary cycle. If it is applied before or after
mating, it will set affection status of all parents and offspring,
respectively. If it is applied during mating, it will set the
affection status of each offspring. You can also apply a
penetrance operator to an individual using its applyToIndividual
member function. By default, a penetrance operator assigns
affection status of individuals but does not save the actual
penetrance value. However, if an information field is specified,
penetrance values will be saved to this field for future analysis.
When a penetrance operator is applied to a population, it is only
applied to the current generation. You can, however, use parameter
ancGens to set affection status for all ancestral generations
(ALL_AVAIL), or individuals in specified generations if a list of
ancestral generations is specified. Note that this parameter is
ignored if the operator is applied during mating.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
BasePenetrance(ancGens=UNSPECIFIED, begin=0, end=-1, step=1,
at=[], reps=ALL_AVAIL, subPops=ALL_AVAIL, infoFields=[])
Details:
Create a base penetrance operator. This operator assign individual
affection status in the present generation (default). If ALL_AVAIL
or a list of ancestral generations are spcified in parameter
ancGens, individuals in specified ancestral generations will be
processed. A penetrance operator can be applied to specified
(virtual) subpopulations (parameter subPops) and replicates
(parameter reps). If an informatio field is given, penetrance
value will be stored in this information field of each individual.
"""
_simuPOP_op.BasePenetrance_swiginit(self, _simuPOP_op.new_BasePenetrance(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_BasePenetrance
def applyToIndividual(self, ind: 'Individual', pop: 'Population'=None) -> "bool":
"""
Usage:
x.applyToIndividual(ind, pop=None)
Details:
Apply the penetrance operator to a single individual ind and set
his or her affection status. A population reference can be passed
if the penetrance model depends on population properties such as
generation number. This function returns the affection status.
"""
return _simuPOP_op.BasePenetrance_applyToIndividual(self, ind, pop)
BasePenetrance.applyToIndividual = new_instancemethod(_simuPOP_op.BasePenetrance_applyToIndividual, None, BasePenetrance)
BasePenetrance_swigregister = _simuPOP_op.BasePenetrance_swigregister
BasePenetrance_swigregister(BasePenetrance)
class MapPenetrance(BasePenetrance):
"""
Details:
This penetrance operator assigns individual affection status using
a user-specified penetrance dictionary.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
MapPenetrance(loci, penetrance, ancGens=UNSPECIFIED, begin=0,
end=-1, step=1, at=[], reps=ALL_AVAIL, subPops=ALL_AVAIL,
infoFields=[])
Details:
Create a penetrance operator that get penetrance value from a
dictionary penetrance with genotype at loci as keys, and
penetrance as values. For each individual, genotypes at loci are
collected one by one (e.g. p0_loc0, p1_loc0, p0_loc1, p1_loc1...
for a diploid individual) and are looked up in the dictionary.
Parameter loci can be a list of loci indexes, names, list of
chromosome position pairs, ALL_AVAIL, or a function with optional
parameter pop that will be called at each ganeeration to determine
indexes of loci. If a genotype cannot be found, it will be looked
up again without phase information (e.g. (1,0) will match key
(0,1)). If the genotype still can not be found, a ValueError will
be raised. This operator supports sex chromosomes and haplodiploid
populations. In these cases, only valid genotypes should be used
to generator the dictionary keys.
"""
_simuPOP_op.MapPenetrance_swiginit(self, _simuPOP_op.new_MapPenetrance(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_MapPenetrance
MapPenetrance_swigregister = _simuPOP_op.MapPenetrance_swigregister
MapPenetrance_swigregister(MapPenetrance)
class MaPenetrance(BasePenetrance):
"""
Details:
This operator is called a 'multi-allele' penetrance operator
because it groups multiple alleles into two groups: wildtype and
non-wildtype alleles. Alleles in each allele group are assumed to
have the same effect on individual penetrance. If we denote all
wildtype alleles as A, and all non-wildtype alleles a, this
operator assign Individual penetrance according to genotype AA,
Aa, aa in the diploid case, and A and a in the haploid case.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
MaPenetrance(loci, penetrance, wildtype=0, ancGens=UNSPECIFIED,
begin=0, end=-1, step=1, at=[], reps=ALL_AVAIL,
subPops=ALL_AVAIL, infoFields=[])
Details:
Creates a multi-allele penetrance operator that groups multiple
alleles into a wildtype group (with alleles wildtype, default to
[0]), and a non-wildtype group. A list of penetrance values is
specified through parameter penetrance, for genotypes at one or
more loci. Parameter loci can be a list of loci indexes, names,
list of chromosome position pairs, ALL_AVAIL, or a function with
optional parameter pop that will be called at each ganeeration to
determine indexes of loci. If we denote wildtype alleles using
capital letters A, B ... and non-wildtype alleles using small
letters a, b ..., the penetrance values should be for
* genotypes A and a for the haploid single-locus case,
* genotypes AB, Ab, aB and bb for haploid two=locus cases,
* genotypes AA, Aa and aa for diploid single-locus cases,
* genotypes AABB, AABb, AAbb, AaBB, AaBb, Aabb, aaBB, aaBb, and
aabb for diploid two-locus cases,
* and in general 2**n for diploid and 3**n for haploid cases if
there are n loci. This operator does not support haplodiploid
populations and sex chromosomes.
"""
_simuPOP_op.MaPenetrance_swiginit(self, _simuPOP_op.new_MaPenetrance(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_MaPenetrance
MaPenetrance_swigregister = _simuPOP_op.MaPenetrance_swigregister
MaPenetrance_swigregister(MaPenetrance)
class MlPenetrance(BasePenetrance):
"""
Details:
This penetrance operator is created by a list of penetrance
operators. When it is applied to an individual, it applies these
penetrance operators to the individual, obtain a list of
penetrance values, and compute a combined penetrance value from
them and assign affection status accordingly. ADDITIVE,
multiplicative, and a heterogeneour multi-locus model are
supported. Please refer to Neil Rish (1989) "Linkage Strategies
for Genetically Complex Traits" for some analysis of these models.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
MlPenetrance(ops, mode=MULTIPLICATIVE, ancGens=UNSPECIFIED,
begin=0, end=-1, step=1, at=[], reps=ALL_AVAIL,
subPops=ALL_AVAIL, infoFields=[])
Details:
Create a multiple-locus penetrance operator from a list penetrance
operator ops. When this operator is applied to an individual
(parents when used before mating and offspring when used during
mating), it applies these operators to the individual and obtain a
list of (usually single-locus) penetrance values. These penetrance
values are combined to a single penetrance value using
* Prod(f_i), namely the product of individual penetrance if mode
= MULTIPLICATIVE,
* sum(f_i) if mode = ADDITIVE, and
* 1-Prod(1 - f_i) if mode = HETEROGENEITY 0 or 1 will be
returned if the combined penetrance value is less than zero or
greater than 1. Applicability parameters (begin, end, step, at,
reps, subPops) could be used in both MlSelector and selectors in
parameter ops, but parameters in MlSelector will be interpreted
first.
"""
_simuPOP_op.MlPenetrance_swiginit(self, _simuPOP_op.new_MlPenetrance(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_MlPenetrance
MlPenetrance_swigregister = _simuPOP_op.MlPenetrance_swigregister
MlPenetrance_swigregister(MlPenetrance)
class PyPenetrance(BasePenetrance):
"""
Details:
This penetrance operator assigns penetrance values by calling a
user provided function. It accepts a list of loci (parameter
loci), and a Python function func which should be defined with one
or more of parameters geno, mut, gen, ind, pop, or names of
information fields. When this operator is applied to a population,
it passes genotypes or mutants (non-zero alleles) at specified
loci at specified loci, generation number, a reference to an
individual, a reference to the current population (usually used to
retrieve population variables) and values at specified information
fields to respective parameters of this function. Genotypes of
each individual are passed as a tuple of alleles arranged locus by
locus (in the order of A1,A2,B1,B2 for loci A and B). Mutants are
passed as a default dictionary of loci index (with respect to all
genotype of individuals, not just the first ploidy) and alleles.
The returned penetrance values will be used to determine the
affection status of each individual.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
PyPenetrance(func, loci=[], ancGens=UNSPECIFIED, begin=0,
end=-1, step=1, at=[], reps=ALL_AVAIL, subPops=ALL_AVAIL,
infoFields=[])
Details:
Create a Python hybrid penetrance operator that passes genotype at
specified loci, values at specified information fields (if
requested), and a generation number to a user-defined function
func. Parameter loci can be a list of loci indexes, names, list of
chromosome position pairs, ALL_AVAIL, or a function with optional
parameter pop that will be called at each ganeeration to determine
indexes of loci. The return value will be treated as Individual
penetrance.
"""
_simuPOP_op.PyPenetrance_swiginit(self, _simuPOP_op.new_PyPenetrance(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_PyPenetrance
PyPenetrance_swigregister = _simuPOP_op.PyPenetrance_swigregister
PyPenetrance_swigregister(PyPenetrance)
class PyMlPenetrance(BasePenetrance):
"""
Details:
This penetrance operator is a multi-locus Python penetrance
operator that assigns penetrance values by combining locus and
genotype specific penetrance values. It differs from a
PyPenetrance in that the python function is responsible for
penetrance values values for each gentoype type at each locus,
which can potentially be random, and locus or gentoype-specific.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
PyMlPenetrance(func, mode=MULTIPLICATIVE, loci=ALL_AVAIL,
ancGens=UNSPECIFIED, output="", begin=0, end=-1, step=1, at=[],
reps=ALL_AVAIL, subPops=ALL_AVAIL, infoFields=[])
Details:
Create a penetrance operator that assigns individual affection
status according to penetrance values combined from locus-specific
penetrance values that are determined by a Python call-back
function. The callback function accepts parameter loc, alleles
(both optional) and returns location- or genotype-specific
penetrance values that can be constant or random. The penetrance
values for each genotype will be cached so the same penetrance
values will be assigned to genotypes with previously assigned
values. Note that a function that does not examine the genotype
naturally assumes a dominant model where genotypes with one or two
mutants have the same penetrance value. Because genotypes at a
locus are passed separately and in no particular order, this
function is also responsible for assigning consistent fitness
values for genotypes at the same locus (a class is usually used).
This operator currently ignores chromosome types so unused alleles
will be passed for loci on sex or mitochondrial chromosomes. This
operator also ignores the phase of genotype so genotypes (a,b) and
(b,a) are assumed to have the same fitness effect. Individual
penetrance will be combined in ADDITIVE, MULTIPLICATIVE, or
HETEROGENEITY mode from penetrance values of loci with at least
one non-zero allele (See MlPenetrance for details).
"""
_simuPOP_op.PyMlPenetrance_swiginit(self, _simuPOP_op.new_PyMlPenetrance(*args, **kwargs))
__swig_destroy__ = _simuPOP_op.delete_PyMlPenetrance
PyMlPenetrance_swigregister = _simuPOP_op.PyMlPenetrance_swigregister
PyMlPenetrance_swigregister(PyMlPenetrance)
class Pedigree(Population):
"""
Details:
The pedigree class is derived from the population class. Unlike a
population class that emphasizes on individual properties, the
pedigree class emphasizes on relationship between individuals. An
unique ID for all individuals is needed to create a pedigree
object from a population object. Compared to the Population class,
a Pedigree object is optimized for access individuals by their
IDs, regardless of population structure and ancestral generations.
Note that the implementation of some algorithms rely on the fact
that parental IDs are smaller than their offspring because
individual IDs are assigned sequentially during evolution.
Pedigrees with manually assigned IDs should try to obey such a
rule.
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
Usage:
Pedigree(pop, loci=[], infoFields=[], ancGens=ALL_AVAIL,
idField="ind_id", fatherField="father_id",
motherField="mother_id", stealPop=False)
Details:
Create a pedigree object from a population, using a subset of loci
(parameter loci, can be a list of loci indexes, names, or
ALL_AVAIL, default to no locus), information fields (parameter
infoFields, default to no information field besides idField,
fatherField and motherField), and ancestral generations (parameter
ancGens, default to all ancestral generations). By default,
information field father_id (parameter fatherField) and mother_id
(parameter motherField) are used to locate parents identified by
ind_id (parameter idField), which should store an unique ID for
all individuals. Multiple individuls with the same ID are allowed
and will be considered as the same individual, but a warning will
be given if they actually differ in genotype or information
fields. Operators IdTagger and PedigreeTagger are usually used to
assign such IDs, although function sampling.indexToID could be
used to assign unique IDs and construct parental IDs from index
based relationship recorded by operator ParentsTagger. A pedigree
object could be constructed with one or no parent but certain
functions such as relative tracking will not be available for such
pedigrees. In case that your are no longer using your population
object, you could steal the content from the population by setting
stealPop to True.
"""
_simuPOP_op.Pedigree_swiginit(self, _simuPOP_op.new_Pedigree(*args, **kwargs))
def clone(self) -> "simuPOP::Pedigree *":
"""
Usage:
x.clone()
Details:
Create a cloned copy of a Pedigree.
"""
return _simuPOP_op.Pedigree_clone(self)
def save(self, *args, **kwargs) -> "void":
"""
Usage:
x.save(filename, infoFields=[], loci=[])
Details:
Save a pedigree to file filename. This function goes through all
individuals of a pedigree and outputs in each line the ID of
individual, IDs of his or her parents, sex ('M' or 'F'), affection
status ('A' or 'U'), values of specified information fields
infoFields and genotypes at specified loci (parameter loci, which
can be a list of loci indexes, names, or ALL_AVAIL). Allele
numbers, instead of their names are outputed. Two columns are used
for each locus if the population is diploid. This file can be
loaded using function loadPedigree although additional information
such as names of information fields need to be specified. This
format differs from a .ped file used in some genetic analysis
software in that there is no family ID and IDs of all individuals
have to be unique. Note that parental IDs will be set to zero if
the parent is not in the pedigree object. Therefore, the parents
of individuals in the top-most ancestral generation will always be
zero.
"""
return _simuPOP_op.Pedigree_save(self, *args, **kwargs)
def indByID(self, id: 'double') -> "simuPOP::Individual &":
"""
Usage:
x.indByID(id)
Details:
Return a reference to individual with id. An IndexError will be
raised if no individual with id is found. An float id is
acceptable as long as it rounds closely to an integer.
"""
return _simuPOP_op.Pedigree_indByID(self, id)
def numParents(self) -> "size_t":
"""Obsolete or undocumented function."""
return _simuPOP_op.Pedigree_numParents(self)
def locateRelatives(self, *args, **kwargs) -> "void":
"""
Usage:
x.locateRelatives(relType, resultFields=[], sex=ANY_SEX,
affectionStatus=ANY_AFFECTION_STATUS, ancGens=ALL_AVAIL)
Details:
This function locates relatives (of type relType) of each
individual and store their IDs in information fields relFields.
The length of relFields determines how many relatives an
individual can have. Parameter relType specifies what type of
relative to locate, which can be
* SPOUSE locate spouses with whom an individual has at least one
common offspring.
* OUTBRED_SPOUSE locate non-slibling spouses, namely spouses
with no shared parent.
* OFFSPRING all offspring of each individual.
* COMMON_OFFSPRING common offspring between each individual and
its spouse (located by SPOUSE or OUTBRED_SPOUSE). relFields should
consist of an information field for spouse and m-1 fields for
offspring where m is the number of fields.
* FULLSIBLING siblings with common father and mother,
* SIBLING siblings with at least one common parent. Optionally,
you can specify the sex and affection status of relatives you
would like to locate, using parameters sex and affectionStatus.
sex can be ANY_SEX (default), MALE_ONLY, FEMALE_ONLY, SAME_SEX or
OPPOSITE_SEX, and affectionStatus can be AFFECTED, UNAFFECTED or
ANY_AFFECTION_STATUS (default). Only relatives with specified
properties will be located. This function will by default go
through all ancestral generations and locate relatives for all
individuals. This can be changed by setting parameter ancGens to
certain ancestral generations you would like to process.
"""
return _simuPOP_op.Pedigree_locateRelatives(self, *args, **kwargs)
def traceRelatives(self, *args, **kwargs) -> "bool":
"""
Usage:
x.traceRelatives(fieldPath, sex=[], affectionStatus=[],
resultFields=[], ancGens=ALL_AVAIL)
Details:
Trace a relative path in a population and record the result in the
given information fields resultFields. This function is used to
locate more distant relatives based on the relatives located by
function locateRelatives. For example, after siblings and
offspring of all individuals are located, you can locate mother's
sibling's offspring using a relative path, and save their indexes
in each individuals information fields resultFields. A relative
path consits of a fieldPath that specifies which information
fields to look for at each step, a sex specifies sex choices at
each generation, and a affectionStatus that specifies affection
status at each generation. fieldPath should be a list of
information fields, sex and affectionStatus are optional. If
specified, they should be a list of ANY_SEX, MALE_ONLY,
FEMALE_ONLY, SAME_SEX and OppsiteSex for parameter sex, and a list
of UNAFFECTED, AFFECTED and ANY_AFFECTION_STATUS for parameter
affectionStatus. For example, if fieldPath = [['father_id',
'mother_id'], ['sib1', 'sib2'], ['off1', 'off2']], and sex =
[ANY_SEX, MALE_ONLY, FEMALE_ONLY], this function will locate
father_id and mother_id for each individual, find all individuals
referred by father_id and mother_id, find informaton fields sib1
and sib2 from these parents and locate male individuals referred
by these two information fields. Finally, the information fields
off1 and off2 from these siblings are located and are used to
locate their female offspring. The results are father or mother's
brother's daughters. Their indexes will be saved in each
individuals information fields resultFields. If a list of
ancestral generations is given in parameter ancGens is given, only
individuals in these ancestral generations will be processed.
"""
return _simuPOP_op.Pedigree_traceRelatives(self, *args, **kwargs)
def individualsWithRelatives(self, *args, **kwargs) -> "vectoru":
"""
Usage:
x.individualsWithRelatives(infoFields, sex=[],
affectionStatus=[], subPops=ALL_AVAIL, ancGens=ALL_AVAIL)
Details:
Return a list of IDs of individuals who have non-negative values
at information fields infoFields. Additional requirements could be
specified by parameters sex and affectionStatus. sex can be
ANY_SEX (default), MALE_ONLY, FEMALE_ONLY, SAME_SEX or
OPPOSITE_SEX, and affectionStatus can be AFFECTED, UNAFFECTED or
ANY_AFFECTION_STATUS (default). This function by default check all
individuals in all ancestral generations, but you could limit the
search using parameter subPops (a list of (virtual)
subpopulations) and ancestral generations ancGens. Relatives fall
out of specified subpopulations and ancestral generaions will be
considered invalid.
"""
return _simuPOP_op.Pedigree_individualsWithRelatives(self, *args, **kwargs)
def identifyFamilies(self, *args, **kwargs) -> "vectoru":
"""
Usage:
x.identifyFamilies(pedField="", subPops=ALL_AVAIL,
ancGens=ALL_AVAIL)
Details:
This function goes through all individuals in a pedigree and group
related individuals into families. If an information field
pedField is given, indexes of families will be assigned to this
field of each family member. The return value is a list of family
sizes corresponding to families 0, 1, 2, ... etc. If a list of
(virtual) subpopulations (parameter subPops) or ancestral
generations are specified (parameter ancGens), the search will be
limited to individuals in these subpopulations and generations.
"""
return _simuPOP_op.Pedigree_identifyFamilies(self, *args, **kwargs)
def identifyAncestors(self, *args, **kwargs) -> "vectoru":
"""
Usage:
x.identifyAncestors(IDs=ALL_AVAIL, subPops=ALL_AVAIL,
ancGens=ALL_AVAIL)
Details:
If a list of individuals (IDs) is given, this function traces
backward in time and find all ancestors of these individuals. If
IDs is ALL_AVAIL, ancestors of all individuals in the present
generation will be located. If a list of (virtual) subpopulations
(subPops) or ancestral geneartions (ancGens) is given, the search
will be limited to individuals in these subpopulations and
generations. This could be used to, for example, find all fathers
of IDs. This function returns a list of IDs, which includes valid
specified IDs. Invalid IDs will be silently ignored. Note that
parameters subPops and ancGens will limit starting IDs if IDs is
set to ALL_AVAIL, but specified IDs will not be trimmed according
to these parameters.
"""
return _simuPOP_op.Pedigree_identifyAncestors(self, *args, **kwargs)
def identifyOffspring(self, *args, **kwargs) -> "vectoru":
"""
Usage:
x.identifyOffspring(IDs=[], subPops=ALL_AVAIL,
ancGens=ALL_AVAIL)
Details:
This function traces forward in time and find all offspring of
individuals specified in parameter IDs. If a list of (virtual)
subpopulations (subPops) or ancestral geneartions (ancGens) is
given, the search will be limited to individuals in these
subpopulations and generations. This could be used to, for
example, find all male offspring of IDs. This function returns a
list of IDs, which includes valid starting IDs. Invalid IDs are
silently ignored. Note that parameters subPops and ancGens will
limit search result but will not be used to trim specified IDs.
"""
return _simuPOP_op.Pedigree_identifyOffspring(self, *args, **kwargs)
def removeIndividuals(self, *args, **kwargs) -> "void":
"""Obsolete or undocumented function."""
return _simuPOP_op.Pedigree_removeIndividuals(self, *args, **kwargs)
def removeSubPops(self, subPops: 'subPopList') -> "void":
"""Obsolete or undocumented function."""
return _simuPOP_op.Pedigree_removeSubPops(self, subPops)
def push(self, pop: 'Population') -> "void":
"""Obsolete or undocumented function."""
return _simuPOP_op.Pedigree_push(self, pop)
def addChrom(self, *args, **kwargs) -> "void":
"""Obsolete or undocumented function."""
return _simuPOP_op.Pedigree_addChrom(self, *args, **kwargs)
def addChromFrom(self, pop: 'Population') -> "void":
"""Obsolete or undocumented function."""
return _simuPOP_op.Pedigree_addChromFrom(self, pop)
def addIndFrom(self, pop: 'Population') -> "void":
"""Obsolete or undocumented function."""
return _simuPOP_op.Pedigree_addIndFrom(self, pop)
def mergeSubPops(self, *args, **kwargs) -> "size_t":
"""Obsolete or undocumented function."""
return _simuPOP_op.Pedigree_mergeSubPops(self, *args, **kwargs)
def resize(self, sizes: 'uintList', propagate: 'bool'=False) -> "void":
"""Obsolete or undocumented function."""
return _simuPOP_op.Pedigree_resize(self, sizes, propagate)
def setSubPopByIndInfo(self, field: 'string const &') -> "void":
"""Obsolete or undocumented function."""
return _simuPOP_op.Pedigree_setSubPopByIndInfo(self, field)
__swig_destroy__ = _simuPOP_op.delete_Pedigree
Pedigree.clone = new_instancemethod(_simuPOP_op.Pedigree_clone, None, Pedigree)
Pedigree.save = new_instancemethod(_simuPOP_op.Pedigree_save, None, Pedigree)
Pedigree.indByID = new_instancemethod(_simuPOP_op.Pedigree_indByID, None, Pedigree)
Pedigree.numParents = new_instancemethod(_simuPOP_op.Pedigree_numParents, None, Pedigree)
Pedigree.locateRelatives = new_instancemethod(_simuPOP_op.Pedigree_locateRelatives, None, Pedigree)
Pedigree.traceRelatives = new_instancemethod(_simuPOP_op.Pedigree_traceRelatives, None, Pedigree)
Pedigree.individualsWithRelatives = new_instancemethod(_simuPOP_op.Pedigree_individualsWithRelatives, None, Pedigree)
Pedigree.identifyFamilies = new_instancemethod(_simuPOP_op.Pedigree_identifyFamilies, None, Pedigree)
Pedigree.identifyAncestors = new_instancemethod(_simuPOP_op.Pedigree_identifyAncestors, None, Pedigree)
Pedigree.identifyOffspring = new_instancemethod(_simuPOP_op.Pedigree_identifyOffspring, None, Pedigree)
Pedigree.removeIndividuals = new_instancemethod(_simuPOP_op.Pedigree_removeIndividuals, None, Pedigree)
Pedigree.removeSubPops = new_instancemethod(_simuPOP_op.Pedigree_removeSubPops, None, Pedigree)
Pedigree.push = new_instancemethod(_simuPOP_op.Pedigree_push, None, Pedigree)
Pedigree.addChrom = new_instancemethod(_simuPOP_op.Pedigree_addChrom, None, Pedigree)
Pedigree.addChromFrom = new_instancemethod(_simuPOP_op.Pedigree_addChromFrom, None, Pedigree)
Pedigree.addIndFrom = new_instancemethod(_simuPOP_op.Pedigree_addIndFrom, None, Pedigree)
Pedigree.mergeSubPops = new_instancemethod(_simuPOP_op.Pedigree_mergeSubPops, None, Pedigree)
Pedigree.resize = new_instancemethod(_simuPOP_op.Pedigree_resize, None, Pedigree)
Pedigree.setSubPopByIndInfo = new_instancemethod(_simuPOP_op.Pedigree_setSubPopByIndInfo, None, Pedigree)
Pedigree_swigregister = _simuPOP_op.Pedigree_swigregister
Pedigree_swigregister(Pedigree)
def loadPedigree(*args, **kwargs) -> "simuPOP::Pedigree":
"""
Usage:
loadPedigree(file, idField="ind_id", fatherField="father_id",
motherField="mother_id", ploidy=2, loci=[], chromTypes=[],
lociPos=[], chromNames=[], alleleNames=[], lociNames=[],
subPopNames=[], infoFields=[])
Details:
Load a pedigree from a file saved by operator PedigreeTagger or
function Pedigree.save. This file contains the ID of each
offspring and their parent(s) and optionally sex ('M' or 'F'),
affection status ('A' or 'U'), values of information fields and
genotype at some loci. IDs of each individual and their parents
are loaded to information fields idField, fatherField and
motherField. Only numeric IDs are allowed, and individual IDs must
be unique across all generations. Because this file does not
contain generation information, generations to which offspring
belong are determined by the parent-offspring relationships.
Individuals without parents are assumed to be in the top-most
ancestral generation. This is the case for individuals in the top-
most ancestral generation if the file is saved by function
Pedigree.save(), and for individuals who only appear as another
individual's parent, if the file is saved by operator
PedigreeTagger. The order at which offsprng is specified is not
important because this function essentially creates a top-most
ancestral generation using IDs without parents, and creates the
next generation using offspring of these parents, and so on until
all generations are recreated. That is to say, if you have a
mixture of pedigrees with different generations, they will be
lined up from the top most ancestral generation. If individual
sex is not specified, sex of of parents are determined by their
parental roles (father or mother) but the sex of individuals in
the last generation can not be determined so they will all be
males. If additional information fields are given, their names
have to be specified using parameter infoFields. The rest of the
columns are assued to be alleles, arranged ploidy consecutive
columns for each locus. If paraemter loci is not specified, the
number of loci is calculated by number of columns divided by
ploidy (default to 2). All loci are assumed to be on one
chromosome unless parameter loci is used to specified number of
loci on each chromosome. Additional parameters such as ploidy,
chromTypes, lociPos, chromNames, alleleNames, lociNames could be
used to specified the genotype structured of the loaded pedigree.
Please refer to class Population for details about these
parameters.
"""
return _simuPOP_op.loadPedigree(*args, **kwargs)
defdict = _simuPOP_op.defdict
|
gpl-2.0
|
HPCGISLab/STDataViz
|
WorkingVersion/LibTry/Traits/traits_fileDialog.py
|
1
|
1490
|
#-- Imports --------------------------------------------------------------------
from traits.api \
import HasTraits, File, Button
from traitsui.api \
import View, HGroup, Item
from traitsui.file_dialog \
import open_file, FileInfo, TextInfo, ImageInfo
#-- FileDialogDemo Class -------------------------------------------------------
# Demo specific file dialig id:
demo_id = 'traitsui.demo.standard_editors.file_dialog.multiple_info'
# The list of file dialog extensions to use:
extensions = [ FileInfo(), TextInfo(), ImageInfo() ]
class FileDialogDemo ( HasTraits ):
# The name of the selected file:
file_name = File
# The button used to display the file dialog:
open = Button( 'Open...' )
#-- Traits View Definitions ------------------------------------------------
view = View(
HGroup(
Item( 'open', show_label = False ),
'_',
Item( 'file_name', style = 'readonly', springy = True )
),
width = 0.5
)
#-- Traits Event Handlers --------------------------------------------------
def _open_changed ( self ):
""" Handles the user clicking the 'Open...' button.
"""
file_name = open_file( extensions = extensions, id = demo_id )
if file_name != '':
self.file_name = file_name
# Create the demo:
demo = FileDialogDemo()
# Run the demo (if invoked from the command line):
if __name__ == '__main__':
demo.configure_traits()
|
bsd-3-clause
|
zero-ui/miniblink49
|
third_party/WebKit/Source/devtools/scripts/concatenate_application_code.py
|
22
|
11385
|
#!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Release:
- Concatenates autostart modules, application modules' module.json descriptors,
and the application loader into a single script.
- Concatenates all workers' dependencies into individual worker loader scripts.
- Builds app.html referencing the application script.
Debug:
- Copies the module directories into their destinations.
- Copies app.html as-is.
"""
from cStringIO import StringIO
from os import path
from os.path import join
from modular_build import read_file, write_file, bail_error
import copy
import modular_build
import os
import re
import shutil
import sys
try:
import simplejson as json
except ImportError:
import json
rjsmin_path = path.abspath(join(
path.dirname(__file__),
'..',
'..',
'build',
'scripts'))
sys.path.append(rjsmin_path)
import rjsmin
def resource_source_url(url):
return '\n/*# sourceURL=' + url + ' */'
def minify_js(javascript):
return rjsmin.jsmin(javascript)
def concatenated_module_filename(module_name, output_dir):
return join(output_dir, module_name + '_module.js')
def symlink_or_copy_file(src, dest, safe=False):
if safe and path.exists(dest):
os.remove(dest)
if hasattr(os, 'symlink'):
os.symlink(src, dest)
else:
shutil.copy(src, dest)
def symlink_or_copy_dir(src, dest):
if path.exists(dest):
shutil.rmtree(dest)
for src_dir, dirs, files in os.walk(src):
subpath = path.relpath(src_dir, src)
dest_dir = path.normpath(join(dest, subpath))
os.mkdir(dest_dir)
for name in files:
src_name = join(os.getcwd(), src_dir, name)
dest_name = join(dest_dir, name)
symlink_or_copy_file(src_name, dest_name)
class AppBuilder:
def __init__(self, application_name, descriptors, application_dir, output_dir):
self.application_name = application_name
self.descriptors = descriptors
self.application_dir = application_dir
self.output_dir = output_dir
def app_file(self, extension):
return self.application_name + '.' + extension
def core_resource_names(self):
result = []
for module in self.descriptors.sorted_modules():
if self.descriptors.application[module].get('type') != 'autostart':
continue
resources = self.descriptors.modules[module].get('resources')
if not resources:
continue
for resource_name in resources:
result.append(path.join(module, resource_name))
return result
# Outputs:
# <app_name>.html
# <app_name>.js
# <module_name>_module.js
class ReleaseBuilder(AppBuilder):
def __init__(self, application_name, descriptors, application_dir, output_dir):
AppBuilder.__init__(self, application_name, descriptors, application_dir, output_dir)
def build_app(self):
self._build_html()
self._build_app_script()
for module in filter(lambda desc: (not desc.get('type') or desc.get('type') == 'remote'), self.descriptors.application.values()):
self._concatenate_dynamic_module(module['name'])
for module in filter(lambda desc: desc.get('type') == 'worker', self.descriptors.application.values()):
self._concatenate_worker(module['name'])
def _build_html(self):
html_name = self.app_file('html')
output = StringIO()
with open(join(self.application_dir, html_name), 'r') as app_input_html:
for line in app_input_html:
if '<script ' in line or '<link ' in line:
continue
if '</head>' in line:
output.write(self._generate_include_tag(self.app_file('css')))
output.write(self._generate_include_tag(self.app_file('js')))
output.write(line)
write_file(join(self.output_dir, html_name), output.getvalue())
output.close()
def _build_app_script(self):
script_name = self.app_file('js')
output = StringIO()
self._concatenate_application_script(output)
write_file(join(self.output_dir, script_name), minify_js(output.getvalue()))
output.close()
def _generate_include_tag(self, resource_path):
if (resource_path.endswith('.js')):
return ' <script type="text/javascript" src="%s"></script>\n' % resource_path
elif (resource_path.endswith('.css')):
return ' <link rel="stylesheet" type="text/css" href="%s">\n' % resource_path
else:
assert resource_path
def _release_module_descriptors(self):
module_descriptors = self.descriptors.modules
result = []
for name in module_descriptors:
module = copy.copy(module_descriptors[name])
# Clear scripts, as they are not used at runtime
# (only the fact of their presence is important).
resources = module.get('resources', None)
if module.get('scripts') or resources:
module['scripts'] = []
# Resources list is not used at runtime.
if resources is not None:
del module['resources']
condition = self.descriptors.application[name].get('condition')
if condition:
module['condition'] = condition
type = self.descriptors.application[name].get('type')
if type == 'remote':
module['remote'] = True
result.append(module)
return json.dumps(result)
def _write_module_resources(self, resource_names, output):
for resource_name in resource_names:
resource_name = path.normpath(resource_name).replace('\\', '/')
output.write('Runtime.cachedResources["%s"] = "' % resource_name)
resource_content = read_file(path.join(self.application_dir, resource_name)) + resource_source_url(resource_name)
resource_content = resource_content.replace('\\', '\\\\')
resource_content = resource_content.replace('\n', '\\n')
resource_content = resource_content.replace('"', '\\"')
output.write(resource_content)
output.write('";\n')
def _concatenate_autostart_modules(self, output):
non_autostart = set()
sorted_module_names = self.descriptors.sorted_modules()
for name in sorted_module_names:
desc = self.descriptors.modules[name]
name = desc['name']
type = self.descriptors.application[name].get('type')
if type == 'autostart':
deps = set(desc.get('dependencies', []))
non_autostart_deps = deps & non_autostart
if len(non_autostart_deps):
bail_error('Non-autostart dependencies specified for the autostarted module "%s": %s' % (name, non_autostart_deps))
output.write('\n/* Module %s */\n' % name)
modular_build.concatenate_scripts(desc.get('scripts'), join(self.application_dir, name), self.output_dir, output)
elif type != 'worker':
non_autostart.add(name)
def _concatenate_application_script(self, output):
runtime_contents = read_file(join(self.application_dir, 'Runtime.js'))
runtime_contents = re.sub('var allDescriptors = \[\];', 'var allDescriptors = %s;' % self._release_module_descriptors().replace('\\', '\\\\'), runtime_contents, 1)
output.write('/* Runtime.js */\n')
output.write(runtime_contents)
output.write('\n/* Autostart modules */\n')
self._concatenate_autostart_modules(output)
output.write('/* Application descriptor %s */\n' % self.app_file('json'))
output.write('applicationDescriptor = ')
output.write(self.descriptors.application_json())
output.write(';\n/* Core resources */\n')
self._write_module_resources(self.core_resource_names(), output)
output.write('\n/* Application loader */\n')
output.write(read_file(join(self.application_dir, self.app_file('js'))))
def _concatenate_dynamic_module(self, module_name):
module = self.descriptors.modules[module_name]
scripts = module.get('scripts')
resources = self.descriptors.module_resources(module_name)
module_dir = join(self.application_dir, module_name)
output = StringIO()
if scripts:
modular_build.concatenate_scripts(scripts, module_dir, self.output_dir, output)
if resources:
self._write_module_resources(resources, output)
output_file_path = concatenated_module_filename(module_name, self.output_dir)
write_file(output_file_path, minify_js(output.getvalue()))
output.close()
def _concatenate_worker(self, module_name):
descriptor = self.descriptors.modules[module_name]
scripts = descriptor.get('scripts')
if not scripts:
return
output = StringIO()
output.write('/* Worker %s */\n' % module_name)
dep_descriptors = []
for dep_name in self.descriptors.sorted_dependencies_closure(module_name):
dep_descriptor = self.descriptors.modules[dep_name]
dep_descriptors.append(dep_descriptor)
scripts = dep_descriptor.get('scripts')
if scripts:
output.write('\n/* Module %s */\n' % dep_name)
modular_build.concatenate_scripts(scripts, join(self.application_dir, dep_name), self.output_dir, output)
output_file_path = concatenated_module_filename(module_name, self.output_dir)
write_file(output_file_path, minify_js(output.getvalue()))
output.close()
# Outputs:
# <app_name>.html as-is
# <app_name>.js as-is
# <module_name>/<all_files>
class DebugBuilder(AppBuilder):
def __init__(self, application_name, descriptors, application_dir, output_dir):
AppBuilder.__init__(self, application_name, descriptors, application_dir, output_dir)
def build_app(self):
self._build_html()
js_name = self.app_file('js')
src_name = join(os.getcwd(), self.application_dir, js_name)
symlink_or_copy_file(src_name, join(self.output_dir, js_name), True)
for module_name in self.descriptors.modules:
module = self.descriptors.modules[module_name]
input_module_dir = join(self.application_dir, module_name)
output_module_dir = join(self.output_dir, module_name)
symlink_or_copy_dir(input_module_dir, output_module_dir)
def _build_html(self):
html_name = self.app_file('html')
symlink_or_copy_file(join(os.getcwd(), self.application_dir, html_name), join(self.output_dir, html_name), True)
def build_application(application_name, loader, application_dir, output_dir, release_mode):
descriptors = loader.load_application(application_name + '.json')
if release_mode:
builder = ReleaseBuilder(application_name, descriptors, application_dir, output_dir)
else:
builder = DebugBuilder(application_name, descriptors, application_dir, output_dir)
builder.build_app()
|
gpl-3.0
|
dtroyer/python-openstacksdk
|
openstack/identity/v3/policy.py
|
1
|
1426
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.identity import identity_service
from openstack import resource
class Policy(resource.Resource):
resource_key = 'policy'
resources_key = 'policies'
base_path = '/policies'
service = identity_service.IdentityService()
# capabilities
allow_create = True
allow_get = True
allow_update = True
allow_delete = True
allow_list = True
update_method = 'PATCH'
# Properties
#: The policy rule set itself, as a serialized blob. *Type: string*
blob = resource.Body('blob')
#: The links for the policy resource.
links = resource.Body('links')
#: The ID for the project.
project_id = resource.Body('project_id')
#: The MIME Media Type of the serialized policy blob. *Type: string*
type = resource.Body('type')
#: The ID of the user who owns the policy
user_id = resource.Body('user_id')
|
apache-2.0
|
d-k-b/udacity-deep-learning
|
seq2seq/helper.py
|
1
|
1420
|
import os
def load_data(path):
input_file = os.path.join(path)
with open(input_file, "r", encoding='utf-8', errors='ignore') as f:
data = f.read()
return data
def extract_vocab(data):
special_words = ['<pad>', '<unk>', '<s>', '<\s>']
set_words = set([word for line in data.split('\n') for word in line.split()])
int_to_vocab = {word_i: word for word_i, word in enumerate(special_words + list(set_words))}
vocab_to_int = {word: word_i for word_i, word in int_to_vocab.items()}
return int_to_vocab, vocab_to_int
def pad_id_sequences(source_ids, source_vocab_to_int, target_ids, target_vocab_to_int, sequence_length):
new_source_ids = [list(reversed(sentence + [source_vocab_to_int['<pad>']] * (sequence_length - len(sentence)))) \
for sentence in source_ids]
new_target_ids = [sentence + [target_vocab_to_int['<pad>']] * (sequence_length - len(sentence)) \
for sentence in target_ids]
return new_source_ids, new_target_ids
def batch_data(source, target, batch_size):
"""
Batch source and target together
"""
for batch_i in range(0, len(source)//batch_size):
start_i = batch_i * batch_size
source_batch = source[start_i:start_i + batch_size]
target_batch = target[start_i:start_i + batch_size]
yield source_batch, target_batch
|
mit
|
tinchoss/Python_Android
|
python-build/python-libs/gdata/src/atom/data.py
|
136
|
8060
|
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import atom.core
ATOM_TEMPLATE = '{http://www.w3.org/2005/Atom}%s'
APP_TEMPLATE_V1 = '{http://purl.org/atom/app#}%s'
APP_TEMPLATE_V2 = '{http://www.w3.org/2007/app}%s'
class Name(atom.core.XmlElement):
"""The atom:name element."""
_qname = ATOM_TEMPLATE % 'name'
class Email(atom.core.XmlElement):
"""The atom:email element."""
_qname = ATOM_TEMPLATE % 'email'
class Uri(atom.core.XmlElement):
"""The atom:uri element."""
_qname = ATOM_TEMPLATE % 'uri'
class Person(atom.core.XmlElement):
"""A foundation class which atom:author and atom:contributor extend.
A person contains information like name, email address, and web page URI for
an author or contributor to an Atom feed.
"""
name = Name
email = Email
uri = Uri
class Author(Person):
"""The atom:author element.
An author is a required element in Feed unless each Entry contains an Author.
"""
_qname = ATOM_TEMPLATE % 'author'
class Contributor(Person):
"""The atom:contributor element."""
_qname = ATOM_TEMPLATE % 'contributor'
class Link(atom.core.XmlElement):
"""The atom:link element."""
_qname = ATOM_TEMPLATE % 'link'
href = 'href'
rel = 'rel'
type = 'type'
hreflang = 'hreflang'
title = 'title'
length = 'length'
class Generator(atom.core.XmlElement):
"""The atom:generator element."""
_qname = ATOM_TEMPLATE % 'generator'
uri = 'uri'
version = 'version'
class Text(atom.core.XmlElement):
"""A foundation class from which atom:title, summary, etc. extend.
This class should never be instantiated.
"""
type = 'type'
class Title(Text):
"""The atom:title element."""
_qname = ATOM_TEMPLATE % 'title'
class Subtitle(Text):
"""The atom:subtitle element."""
_qname = ATOM_TEMPLATE % 'subtitle'
class Rights(Text):
"""The atom:rights element."""
_qname = ATOM_TEMPLATE % 'rights'
class Summary(Text):
"""The atom:summary element."""
_qname = ATOM_TEMPLATE % 'summary'
class Content(Text):
"""The atom:content element."""
_qname = ATOM_TEMPLATE % 'content'
src = 'src'
class Category(atom.core.XmlElement):
"""The atom:category element."""
_qname = ATOM_TEMPLATE % 'category'
term = 'term'
scheme = 'scheme'
label = 'label'
class Id(atom.core.XmlElement):
"""The atom:id element."""
_qname = ATOM_TEMPLATE % 'id'
class Icon(atom.core.XmlElement):
"""The atom:icon element."""
_qname = ATOM_TEMPLATE % 'icon'
class Logo(atom.core.XmlElement):
"""The atom:logo element."""
_qname = ATOM_TEMPLATE % 'logo'
class Draft(atom.core.XmlElement):
"""The app:draft element which indicates if this entry should be public."""
_qname = (APP_TEMPLATE_V1 % 'draft', APP_TEMPLATE_V2 % 'draft')
class Control(atom.core.XmlElement):
"""The app:control element indicating restrictions on publication.
The APP control element may contain a draft element indicating whether or
not this entry should be publicly available.
"""
_qname = (APP_TEMPLATE_V1 % 'control', APP_TEMPLATE_V2 % 'control')
draft = Draft
class Date(atom.core.XmlElement):
"""A parent class for atom:updated, published, etc."""
class Updated(Date):
"""The atom:updated element."""
_qname = ATOM_TEMPLATE % 'updated'
class Published(Date):
"""The atom:published element."""
_qname = ATOM_TEMPLATE % 'published'
class LinkFinder(object):
"""An "interface" providing methods to find link elements
Entry elements often contain multiple links which differ in the rel
attribute or content type. Often, developers are interested in a specific
type of link so this class provides methods to find specific classes of
links.
This class is used as a mixin in Atom entries and feeds.
"""
def find_url(self, rel):
"""Returns the URL in a link with the desired rel value."""
for link in self.link:
if link.rel == rel and link.href:
return link.href
return None
FindUrl = find_url
def get_link(self, rel):
"""Returns a link object which has the desired rel value.
If you are interested in the URL instead of the link object,
consider using find_url instead.
"""
for link in self.link:
if link.rel == rel and link.href:
return link
return None
GetLink = get_link
def find_self_link(self):
"""Find the first link with rel set to 'self'
Returns:
A str containing the link's href or None if none of the links had rel
equal to 'self'
"""
return self.find_url('self')
FindSelfLink = find_self_link
def get_self_link(self):
return self.get_link('self')
GetSelfLink = get_self_link
def find_edit_link(self):
return self.find_url('edit')
FindEditLink = find_edit_link
def get_edit_link(self):
return self.get_link('edit')
GetEditLink = get_edit_link
def find_edit_media_link(self):
link = self.find_url('edit-media')
# Search for media-edit as well since Picasa API used media-edit instead.
if link is None:
return self.find_url('media-edit')
return link
FindEditMediaLink = find_edit_media_link
def get_edit_media_link(self):
link = self.get_link('edit-media')
if link is None:
return self.get_link('media-edit')
return link
GetEditMediaLink = get_edit_media_link
def find_next_link(self):
return self.find_url('next')
FindNextLink = find_next_link
def get_next_link(self):
return self.get_link('next')
GetNextLink = get_next_link
def find_license_link(self):
return self.find_url('license')
FindLicenseLink = find_license_link
def get_license_link(self):
return self.get_link('license')
GetLicenseLink = get_license_link
def find_alternate_link(self):
return self.find_url('alternate')
FindAlternateLink = find_alternate_link
def get_alternate_link(self):
return self.get_link('alternate')
GetAlternateLink = get_alternate_link
class FeedEntryParent(atom.core.XmlElement, LinkFinder):
"""A super class for atom:feed and entry, contains shared attributes"""
author = [Author]
category = [Category]
contributor = [Contributor]
id = Id
link = [Link]
rights = Rights
title = Title
updated = Updated
def __init__(self, atom_id=None, text=None, *args, **kwargs):
if atom_id is not None:
self.id = atom_id
atom.core.XmlElement.__init__(self, text=text, *args, **kwargs)
class Source(FeedEntryParent):
"""The atom:source element."""
_qname = ATOM_TEMPLATE % 'source'
generator = Generator
icon = Icon
logo = Logo
subtitle = Subtitle
class Entry(FeedEntryParent):
"""The atom:entry element."""
_qname = ATOM_TEMPLATE % 'entry'
content = Content
published = Published
source = Source
summary = Summary
control = Control
class Feed(Source):
_qname = ATOM_TEMPLATE % 'feed'
entry = [Entry]
class ExtensionElement(atom.core.XmlElement):
"""Provided for backwards compatibility to the v1 atom.ExtensionElement."""
def __init__(self, tag=None, namespace=None, attributes=None,
children=None, text=None, *args, **kwargs):
if namespace:
self._qname = '{%s}%s' % (namespace, tag)
else:
self._qname = tag
self.children = children or []
self.attributes = attributes or {}
self.text = text
_BecomeChildElement = atom.core.XmlElement._become_child
|
apache-2.0
|
bswartz/cinder
|
cinder/volume/drivers/nexenta/nfs.py
|
1
|
33897
|
# Copyright 2016 Nexenta Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import os
import re
import six
from eventlet import greenthread
from oslo_log import log as logging
from oslo_utils import units
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder import interface
from cinder.volume.drivers.nexenta import jsonrpc
from cinder.volume.drivers.nexenta import options
from cinder.volume.drivers.nexenta import utils
from cinder.volume.drivers import nfs
VERSION = '1.3.0'
LOG = logging.getLogger(__name__)
@interface.volumedriver
class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921
"""Executes volume driver commands on Nexenta Appliance.
Version history:
.. code-block:: none
1.0.0 - Initial driver version.
1.1.0 - Auto sharing for enclosing folder.
1.1.1 - Added caching for NexentaStor appliance 'volroot' value.
1.1.2 - Ignore "folder does not exist" error in delete_volume and
delete_snapshot method.
1.1.3 - Redefined volume_backend_name attribute inherited from
RemoteFsDriver.
1.2.0 - Added migrate and retype methods.
1.3.0 - Extend volume method.
"""
driver_prefix = 'nexenta'
volume_backend_name = 'NexentaNfsDriver'
VERSION = VERSION
VOLUME_FILE_NAME = 'volume'
def __init__(self, *args, **kwargs):
super(NexentaNfsDriver, self).__init__(*args, **kwargs)
if self.configuration:
self.configuration.append_config_values(
options.NEXENTA_CONNECTION_OPTS)
self.configuration.append_config_values(
options.NEXENTA_NFS_OPTS)
self.configuration.append_config_values(
options.NEXENTA_DATASET_OPTS)
self.configuration.append_config_values(
options.NEXENTA_RRMGR_OPTS)
self.nms_cache_volroot = self.configuration.nexenta_nms_cache_volroot
self.rrmgr_compression = self.configuration.nexenta_rrmgr_compression
self.rrmgr_tcp_buf_size = self.configuration.nexenta_rrmgr_tcp_buf_size
self.rrmgr_connections = self.configuration.nexenta_rrmgr_connections
self.nfs_mount_point_base = self.configuration.nexenta_mount_point_base
self.volume_compression = (
self.configuration.nexenta_dataset_compression)
self.volume_deduplication = self.configuration.nexenta_dataset_dedup
self.volume_description = (
self.configuration.nexenta_dataset_description)
self.sparsed_volumes = self.configuration.nexenta_sparsed_volumes
self._nms2volroot = {}
self.share2nms = {}
self.nfs_versions = {}
@property
def backend_name(self):
backend_name = None
if self.configuration:
backend_name = self.configuration.safe_get('volume_backend_name')
if not backend_name:
backend_name = self.__class__.__name__
return backend_name
def do_setup(self, context):
shares_config = getattr(self.configuration, self.driver_prefix +
'_shares_config')
if shares_config:
self.configuration.nfs_shares_config = shares_config
super(NexentaNfsDriver, self).do_setup(context)
self._load_shares_config(shares_config)
self._mount_subfolders()
def check_for_setup_error(self):
"""Verify that the volume for our folder exists.
:raise: :py:exc:`LookupError`
"""
if self.share2nms:
for nfs_share in self.share2nms:
nms = self.share2nms[nfs_share]
volume_name, dataset = self._get_share_datasets(nfs_share)
if not nms.volume.object_exists(volume_name):
raise LookupError(_("Volume %s does not exist in Nexenta "
"Store appliance"), volume_name)
folder = '%s/%s' % (volume_name, dataset)
if not nms.folder.object_exists(folder):
raise LookupError(_("Folder %s does not exist in Nexenta "
"Store appliance"), folder)
self._share_folder(nms, volume_name, dataset)
def migrate_volume(self, ctxt, volume, host):
"""Migrate if volume and host are managed by Nexenta appliance.
:param ctxt: context
:param volume: a dictionary describing the volume to migrate
:param host: a dictionary describing the host to migrate to
"""
LOG.debug('Enter: migrate_volume: id=%(id)s, host=%(host)s',
{'id': volume['id'], 'host': host})
false_ret = (False, None)
if volume['status'] not in ('available', 'retyping'):
LOG.warning(_LW("Volume status must be 'available' or 'retyping'."
" Current volume status: %s"), volume['status'])
return false_ret
if 'capabilities' not in host:
LOG.warning(_LW("Unsupported host. No capabilities found"))
return false_ret
capabilities = host['capabilities']
ns_shares = capabilities['ns_shares']
dst_parts = capabilities['location_info'].split(':')
dst_host, dst_volume = dst_parts[1:]
if (capabilities.get('vendor_name') != 'Nexenta' or
dst_parts[0] != self.__class__.__name__ or
capabilities['free_capacity_gb'] < volume['size']):
return false_ret
nms = self.share2nms[volume['provider_location']]
ssh_bindings = nms.appliance.ssh_list_bindings()
shares = []
for bind in ssh_bindings:
for share in ns_shares:
if (share.startswith(ssh_bindings[bind][3]) and
ns_shares[share] >= volume['size']):
shares.append(share)
if len(shares) == 0:
LOG.warning(_LW("Remote NexentaStor appliance at %s should be "
"SSH-bound."), share)
return false_ret
share = sorted(shares, key=ns_shares.get, reverse=True)[0]
snapshot = {
'volume_name': volume['name'],
'volume_id': volume['id'],
'name': utils.get_migrate_snapshot_name(volume)
}
self.create_snapshot(snapshot)
location = volume['provider_location']
src = '%(share)s/%(volume)s@%(snapshot)s' % {
'share': location.split(':')[1].split('volumes/')[1],
'volume': volume['name'],
'snapshot': snapshot['name']
}
dst = ':'.join([dst_host, dst_volume.split('/volumes/')[1]])
try:
nms.appliance.execute(self._get_zfs_send_recv_cmd(src, dst))
except exception.NexentaException as exc:
LOG.warning(_LW("Cannot send source snapshot %(src)s to "
"destination %(dst)s. Reason: %(exc)s"),
{'src': src, 'dst': dst, 'exc': exc})
return false_ret
finally:
try:
self.delete_snapshot(snapshot)
except exception.NexentaException as exc:
LOG.warning(_LW("Cannot delete temporary source snapshot "
"%(src)s on NexentaStor Appliance: %(exc)s"),
{'src': src, 'exc': exc})
try:
self.delete_volume(volume)
except exception.NexentaException as exc:
LOG.warning(_LW("Cannot delete source volume %(volume)s on "
"NexentaStor Appliance: %(exc)s"),
{'volume': volume['name'], 'exc': exc})
dst_nms = self._get_nms_for_url(capabilities['nms_url'])
dst_snapshot = '%s/%s@%s' % (dst_volume.split('volumes/')[1],
volume['name'], snapshot['name'])
try:
dst_nms.snapshot.destroy(dst_snapshot, '')
except exception.NexentaException as exc:
LOG.warning(_LW("Cannot delete temporary destination snapshot "
"%(dst)s on NexentaStor Appliance: %(exc)s"),
{'dst': dst_snapshot, 'exc': exc})
return True, {'provider_location': share}
def _get_zfs_send_recv_cmd(self, src, dst):
"""Returns rrmgr command for source and destination."""
return utils.get_rrmgr_cmd(src, dst,
compression=self.rrmgr_compression,
tcp_buf_size=self.rrmgr_tcp_buf_size,
connections=self.rrmgr_connections)
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info.
:param volume: volume reference
:param connector: connector reference
"""
export = '%s/%s' % (volume['provider_location'], volume['name'])
data = {'export': export, 'name': 'volume'}
if volume['provider_location'] in self.shares:
data['options'] = self.shares[volume['provider_location']]
return {
'driver_volume_type': self.driver_volume_type,
'data': data
}
def retype(self, context, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
LOG.debug('Retype volume request %(vol)s to be %(type)s '
'(host: %(host)s), diff %(diff)s.',
{'vol': volume['name'],
'type': new_type,
'host': host,
'diff': diff})
options = dict(
compression='compression',
dedup='dedup',
description='nms:description'
)
retyped = False
migrated = False
model_update = None
src_backend = self.__class__.__name__
dst_backend = host['capabilities']['location_info'].split(':')[0]
if src_backend != dst_backend:
LOG.warning(_LW('Cannot retype from %(src_backend)s to '
'%(dst_backend)s.'),
{
'src_backend': src_backend,
'dst_backend': dst_backend
})
return False
hosts = (volume['host'], host['host'])
old, new = hosts
if old != new:
migrated, provider_location = self.migrate_volume(
context, volume, host)
if not migrated:
provider_location = volume['provider_location']
nms = self.share2nms[provider_location]
else:
nms_url = host['capabilities']['nms_url']
nms = self._get_nms_for_url(nms_url)
model_update = provider_location
provider_location = provider_location['provider_location']
share = provider_location.split(':')[1].split('volumes/')[1]
folder = '%(share)s/%(volume)s' % {
'share': share,
'volume': volume['name']
}
for opt in options:
old, new = diff.get('extra_specs').get(opt, (False, False))
if old != new:
LOG.debug('Changing %(opt)s from %(old)s to %(new)s.',
{'opt': opt, 'old': old, 'new': new})
try:
nms.folder.set_child_prop(
folder, options[opt], new)
retyped = True
except exception.NexentaException:
LOG.error(_LE('Error trying to change %(opt)s'
' from %(old)s to %(new)s'),
{'opt': opt, 'old': old, 'new': new})
return False, None
return retyped or migrated, model_update
def _do_create_volume(self, volume):
nfs_share = volume['provider_location']
nms = self.share2nms[nfs_share]
vol, dataset = self._get_share_datasets(nfs_share)
folder = '%s/%s' % (dataset, volume['name'])
LOG.debug('Creating folder on Nexenta Store %s', folder)
nms.folder.create_with_props(
vol, folder,
{'compression': self.configuration.nexenta_dataset_compression}
)
volume_path = self.remote_path(volume)
volume_size = volume['size']
try:
self._share_folder(nms, vol, folder)
if getattr(self.configuration,
self.driver_prefix + '_sparsed_volumes'):
self._create_sparsed_file(nms, volume_path, volume_size)
else:
folder_path = '%s/%s' % (vol, folder)
compression = nms.folder.get_child_prop(
folder_path, 'compression')
if compression != 'off':
# Disable compression, because otherwise will not use space
# on disk.
nms.folder.set_child_prop(
folder_path, 'compression', 'off')
try:
self._create_regular_file(nms, volume_path, volume_size)
finally:
if compression != 'off':
# Backup default compression value if it was changed.
nms.folder.set_child_prop(
folder_path, 'compression', compression)
self._set_rw_permissions_for_all(nms, volume_path)
if self._get_nfs_server_version(nfs_share) < 4:
sub_share, mnt_path = self._get_subshare_mount_point(nfs_share,
volume)
self._ensure_share_mounted(sub_share, mnt_path)
except exception.NexentaException:
try:
nms.folder.destroy('%s/%s' % (vol, folder))
except exception.NexentaException:
LOG.warning(_LW("Cannot destroy created folder: "
"%(vol)s/%(folder)s"),
{'vol': vol, 'folder': folder})
raise
def create_volume_from_snapshot(self, volume, snapshot):
"""Create new volume from other's snapshot on appliance.
:param volume: reference of volume to be created
:param snapshot: reference of source snapshot
"""
self._ensure_shares_mounted()
snapshot_vol = self._get_snapshot_volume(snapshot)
nfs_share = snapshot_vol['provider_location']
volume['provider_location'] = nfs_share
nms = self.share2nms[nfs_share]
vol, dataset = self._get_share_datasets(nfs_share)
snapshot_name = '%s/%s/%s@%s' % (vol, dataset, snapshot['volume_name'],
snapshot['name'])
folder = '%s/%s' % (dataset, volume['name'])
nms.folder.clone(snapshot_name, '%s/%s' % (vol, folder))
try:
self._share_folder(nms, vol, folder)
except exception.NexentaException:
try:
nms.folder.destroy('%s/%s' % (vol, folder), '')
except exception.NexentaException:
LOG.warning(_LW("Cannot destroy cloned folder: "
"%(vol)s/%(folder)s"),
{'vol': vol, 'folder': folder})
raise
if self._get_nfs_server_version(nfs_share) < 4:
sub_share, mnt_path = self._get_subshare_mount_point(nfs_share,
volume)
self._ensure_share_mounted(sub_share, mnt_path)
if (('size' in volume) and (
volume['size'] > snapshot['volume_size'])):
self.extend_volume(volume, volume['size'])
return {'provider_location': volume['provider_location']}
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume.
:param volume: new volume reference
:param src_vref: source volume reference
"""
LOG.info(_LI('Creating clone of volume: %s'), src_vref['id'])
snapshot = {'volume_name': src_vref['name'],
'volume_id': src_vref['id'],
'volume_size': src_vref['size'],
'name': self._get_clone_snapshot_name(volume)}
# We don't delete this snapshot, because this snapshot will be origin
# of new volume. This snapshot will be automatically promoted by NMS
# when user will delete its origin.
self.create_snapshot(snapshot)
try:
return self.create_volume_from_snapshot(volume, snapshot)
except exception.NexentaException:
LOG.error(_LE('Volume creation failed, deleting created snapshot '
'%(volume_name)s@%(name)s'), snapshot)
try:
self.delete_snapshot(snapshot)
except (exception.NexentaException, exception.SnapshotIsBusy):
LOG.warning(_LW('Failed to delete zfs snapshot '
'%(volume_name)s@%(name)s'), snapshot)
raise
def delete_volume(self, volume):
"""Deletes a logical volume.
:param volume: volume reference
"""
nfs_share = volume.get('provider_location')
if nfs_share:
nms = self.share2nms[nfs_share]
vol, parent_folder = self._get_share_datasets(nfs_share)
folder = '%s/%s/%s' % (vol, parent_folder, volume['name'])
mount_path = self.remote_path(volume).strip(
'/%s' % self.VOLUME_FILE_NAME)
if mount_path in self._remotefsclient._read_mounts():
self._execute('umount', mount_path, run_as_root=True)
try:
props = nms.folder.get_child_props(folder, 'origin') or {}
nms.folder.destroy(folder, '-r')
except exception.NexentaException as exc:
if 'does not exist' in exc.args[0]:
LOG.info(_LI('Folder %s does not exist, it was '
'already deleted.'), folder)
return
raise
origin = props.get('origin')
if origin and self._is_clone_snapshot_name(origin):
try:
nms.snapshot.destroy(origin, '')
except exception.NexentaException as exc:
if 'does not exist' in exc.args[0]:
LOG.info(_LI('Snapshot %s does not exist, it was '
'already deleted.'), origin)
return
raise
def extend_volume(self, volume, new_size):
"""Extend an existing volume.
:param volume: volume reference
:param new_size: volume new size in GB
"""
LOG.info(_LI('Extending volume: %(id)s New size: %(size)s GB'),
{'id': volume['id'], 'size': new_size})
nfs_share = volume['provider_location']
nms = self.share2nms[nfs_share]
volume_path = self.remote_path(volume)
if getattr(self.configuration,
self.driver_prefix + '_sparsed_volumes'):
self._create_sparsed_file(nms, volume_path, new_size)
else:
block_size_mb = 1
block_count = ((new_size - volume['size']) * units.Gi /
(block_size_mb * units.Mi))
nms.appliance.execute(
'dd if=/dev/zero seek=%(seek)d of=%(path)s'
' bs=%(bs)dM count=%(count)d' % {
'seek': volume['size'] * units.Gi / block_size_mb,
'path': volume_path,
'bs': block_size_mb,
'count': block_count
}
)
def create_snapshot(self, snapshot):
"""Creates a snapshot.
:param snapshot: snapshot reference
"""
volume = self._get_snapshot_volume(snapshot)
nfs_share = volume['provider_location']
nms = self.share2nms[nfs_share]
vol, dataset = self._get_share_datasets(nfs_share)
folder = '%s/%s/%s' % (vol, dataset, volume['name'])
nms.folder.create_snapshot(folder, snapshot['name'], '-r')
def delete_snapshot(self, snapshot):
"""Deletes a snapshot.
:param snapshot: snapshot reference
"""
volume = self._get_snapshot_volume(snapshot)
nfs_share = volume['provider_location']
nms = self.share2nms[nfs_share]
vol, dataset = self._get_share_datasets(nfs_share)
folder = '%s/%s/%s' % (vol, dataset, volume['name'])
try:
nms.snapshot.destroy('%s@%s' % (folder, snapshot['name']), '')
except exception.NexentaException as exc:
if 'does not exist' in exc.args[0]:
LOG.info(_LI('Snapshot %(folder)s@%(snapshot)s does not '
'exist, it was already deleted.'),
{
'folder': folder,
'snapshot': snapshot,
})
return
elif 'has dependent clones' in exc.args[0]:
LOG.info(_LI('Snapshot %(folder)s@%(snapshot)s has dependent '
'clones, it will be deleted later.'),
{
'folder': folder,
'snapshot': snapshot,
})
return
def _create_sparsed_file(self, nms, path, size):
"""Creates file with 0 disk usage.
:param nms: nms object
:param path: path to new file
:param size: size of file
"""
nms.appliance.execute(
'truncate --size %(size)dG %(path)s' % {
'path': path,
'size': size
}
)
def _create_regular_file(self, nms, path, size):
"""Creates regular file of given size.
Takes a lot of time for large files.
:param nms: nms object
:param path: path to new file
:param size: size of file
"""
block_size_mb = 1
block_count = size * units.Gi / (block_size_mb * units.Mi)
LOG.info(_LI('Creating regular file: %s.'
'This may take some time.'), path)
nms.appliance.execute(
'dd if=/dev/zero of=%(path)s bs=%(bs)dM count=%(count)d' % {
'path': path,
'bs': block_size_mb,
'count': block_count
}
)
LOG.info(_LI('Regular file: %s created.'), path)
def _set_rw_permissions_for_all(self, nms, path):
"""Sets 666 permissions for the path.
:param nms: nms object
:param path: path to file
"""
nms.appliance.execute('chmod ugo+rw %s' % path)
def local_path(self, volume):
"""Get volume path (mounted locally fs path) for given volume.
:param volume: volume reference
"""
nfs_share = volume['provider_location']
return os.path.join(self._get_mount_point_for_share(nfs_share),
volume['name'], 'volume')
def _get_mount_point_for_share(self, nfs_share):
"""Returns path to mount point NFS share.
:param nfs_share: example 172.18.194.100:/var/nfs
"""
nfs_share = nfs_share.encode('utf-8')
return os.path.join(self.configuration.nexenta_mount_point_base,
hashlib.md5(nfs_share).hexdigest())
def remote_path(self, volume):
"""Get volume path (mounted remotely fs path) for given volume.
:param volume: volume reference
"""
nfs_share = volume['provider_location']
share = nfs_share.split(':')[1].rstrip('/')
return '%s/%s/volume' % (share, volume['name'])
def _share_folder(self, nms, volume, folder):
"""Share NFS folder on NexentaStor Appliance.
:param nms: nms object
:param volume: volume name
:param folder: folder name
"""
path = '%s/%s' % (volume, folder.lstrip('/'))
share_opts = {
'read_write': '*',
'read_only': '',
'root': 'nobody',
'extra_options': 'anon=0',
'recursive': 'true',
'anonymous_rw': 'true',
}
LOG.debug('Sharing folder %s on Nexenta Store', folder)
nms.netstorsvc.share_folder('svc:/network/nfs/server:default', path,
share_opts)
def _load_shares_config(self, share_file):
self.shares = {}
self.share2nms = {}
for share in self._read_config_file(share_file):
# A configuration line may be either:
# host:/share_name http://user:pass@host:[port]/
# or
# host:/share_name http://user:pass@host:[port]/
# -o options=123,rw --other
if not share.strip():
continue
if share.startswith('#'):
continue
share_info = re.split(r'\s+', share, 2)
share_address = share_info[0].strip()
nms_url = share_info[1].strip()
share_opts = share_info[2].strip() if len(share_info) > 2 else None
if not re.match(r'.+:/.+', share_address):
LOG.warning(_LW("Share %s ignored due to invalid format. "
"Must be of form address:/export."),
share_address)
continue
self.shares[share_address] = share_opts
self.share2nms[share_address] = self._get_nms_for_url(nms_url)
LOG.debug('Shares loaded: %s', self.shares)
def _get_subshare_mount_point(self, nfs_share, volume):
mnt_path = '%s/%s' % (
self._get_mount_point_for_share(nfs_share), volume['name'])
sub_share = '%s/%s' % (nfs_share, volume['name'])
return sub_share, mnt_path
def _ensure_share_mounted(self, nfs_share, mount_path=None):
"""Ensure that NFS share is mounted on the host.
Unlike the parent method this one accepts mount_path as an optional
parameter and uses it as a mount point if provided.
:param nfs_share: NFS share name
:param mount_path: mount path on the host
"""
mnt_flags = []
if self.shares.get(nfs_share) is not None:
mnt_flags = self.shares[nfs_share].split()
num_attempts = max(1, self.configuration.nfs_mount_attempts)
for attempt in range(num_attempts):
try:
if mount_path is None:
self._remotefsclient.mount(nfs_share, mnt_flags)
else:
if mount_path in self._remotefsclient._read_mounts():
LOG.info(_LI('Already mounted: %s'), mount_path)
return
self._execute('mkdir', '-p', mount_path,
check_exit_code=False)
self._remotefsclient._mount_nfs(nfs_share, mount_path,
mnt_flags)
return
except Exception as e:
if attempt == (num_attempts - 1):
LOG.error(_LE('Mount failure for %(share)s after '
'%(count)d attempts.'), {
'share': nfs_share,
'count': num_attempts})
raise exception.NfsException(six.text_type(e))
LOG.warning(
_LW('Mount attempt %(attempt)d failed: %(error)s. '
'Retrying mount ...'), {
'attempt': attempt,
'error': e})
greenthread.sleep(1)
def _mount_subfolders(self):
ctxt = context.get_admin_context()
vol_entries = self.db.volume_get_all_by_host(ctxt, self.host)
for vol in vol_entries:
nfs_share = vol['provider_location']
if ((nfs_share in self.shares) and
(self._get_nfs_server_version(nfs_share) < 4)):
sub_share, mnt_path = self._get_subshare_mount_point(
nfs_share, vol)
self._ensure_share_mounted(sub_share, mnt_path)
def _get_nfs_server_version(self, share):
if not self.nfs_versions.get(share):
nms = self.share2nms[share]
nfs_opts = nms.netsvc.get_confopts(
'svc:/network/nfs/server:default', 'configure')
try:
self.nfs_versions[share] = int(
nfs_opts['nfs_server_versmax']['current'])
except KeyError:
self.nfs_versions[share] = int(
nfs_opts['server_versmax']['current'])
return self.nfs_versions[share]
def _get_capacity_info(self, nfs_share):
"""Calculate available space on the NFS share.
:param nfs_share: example 172.18.194.100:/var/nfs
"""
nms = self.share2nms[nfs_share]
ns_volume, ns_folder = self._get_share_datasets(nfs_share)
folder_props = nms.folder.get_child_props('%s/%s' % (ns_volume,
ns_folder),
'used|available')
free = utils.str2size(folder_props['available'])
allocated = utils.str2size(folder_props['used'])
return free + allocated, free, allocated
def _get_nms_for_url(self, url):
"""Returns initialized nms object for url."""
auto, scheme, user, password, host, port, path = (
utils.parse_nms_url(url))
return jsonrpc.NexentaJSONProxy(scheme, host, port, path, user,
password, auto=auto)
def _get_snapshot_volume(self, snapshot):
ctxt = context.get_admin_context()
return db.volume_get(ctxt, snapshot['volume_id'])
def _get_volroot(self, nms):
"""Returns volroot property value from NexentaStor appliance."""
if not self.nms_cache_volroot:
return nms.server.get_prop('volroot')
if nms not in self._nms2volroot:
self._nms2volroot[nms] = nms.server.get_prop('volroot')
return self._nms2volroot[nms]
def _get_share_datasets(self, nfs_share):
nms = self.share2nms[nfs_share]
volroot = self._get_volroot(nms)
path = nfs_share.split(':')[1][len(volroot):].strip('/')
volume_name = path.split('/')[0]
folder_name = '/'.join(path.split('/')[1:])
return volume_name, folder_name
def _get_clone_snapshot_name(self, volume):
"""Return name for snapshot that will be used to clone the volume."""
return 'cinder-clone-snapshot-%(id)s' % volume
def _is_clone_snapshot_name(self, snapshot):
"""Check if snapshot is created for cloning."""
name = snapshot.split('@')[-1]
return name.startswith('cinder-clone-snapshot-')
def _update_volume_stats(self):
"""Retrieve stats info for NexentaStor appliance."""
LOG.debug('Updating volume stats')
total_space = 0
free_space = 0
shares_with_capacities = {}
for mounted_share in self._mounted_shares:
total, free, allocated = self._get_capacity_info(mounted_share)
shares_with_capacities[mounted_share] = utils.str2gib_size(total)
if total_space < utils.str2gib_size(total):
total_space = utils.str2gib_size(total)
if free_space < utils.str2gib_size(free):
free_space = utils.str2gib_size(free)
share = mounted_share
location_info = '%(driver)s:%(share)s' % {
'driver': self.__class__.__name__,
'share': share
}
nms_url = self.share2nms[share].url
self._stats = {
'vendor_name': 'Nexenta',
'dedup': self.volume_deduplication,
'compression': self.volume_compression,
'description': self.volume_description,
'nms_url': nms_url,
'ns_shares': shares_with_capacities,
'driver_version': self.VERSION,
'storage_protocol': 'NFS',
'total_capacity_gb': total_space,
'free_capacity_gb': free_space,
'reserved_percentage': self.configuration.reserved_percentage,
'QoS_support': False,
'location_info': location_info,
'volume_backend_name': self.backend_name,
'nfs_mount_point_base': self.nfs_mount_point_base
}
|
apache-2.0
|
mhild/Sick-Beard
|
lib/hachoir_parser/misc/pifv.py
|
90
|
8492
|
"""
EFI Platform Initialization Firmware Volume parser.
Author: Alexandre Boeglin
Creation date: 08 jul 2007
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet,
UInt8, UInt16, UInt24, UInt32, UInt64, Enum,
CString, String, PaddingBytes, RawBytes, NullBytes)
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.tools import paddingSize, humanFilesize
from lib.hachoir_parser.common.win32 import GUID
EFI_SECTION_COMPRESSION = 0x1
EFI_SECTION_GUID_DEFINED = 0x2
EFI_SECTION_PE32 = 0x10
EFI_SECTION_PIC = 0x11
EFI_SECTION_TE = 0x12
EFI_SECTION_DXE_DEPEX = 0x13
EFI_SECTION_VERSION = 0x14
EFI_SECTION_USER_INTERFACE = 0x15
EFI_SECTION_COMPATIBILITY16 = 0x16
EFI_SECTION_FIRMWARE_VOLUME_IMAGE = 0x17
EFI_SECTION_FREEFORM_SUBTYPE_GUID = 0x18
EFI_SECTION_RAW = 0x19
EFI_SECTION_PEI_DEPEX = 0x1b
EFI_SECTION_TYPE = {
EFI_SECTION_COMPRESSION: "Encapsulation section where other sections" \
+ " are compressed",
EFI_SECTION_GUID_DEFINED: "Encapsulation section where other sections" \
+ " have format defined by a GUID",
EFI_SECTION_PE32: "PE32+ Executable image",
EFI_SECTION_PIC: "Position-Independent Code",
EFI_SECTION_TE: "Terse Executable image",
EFI_SECTION_DXE_DEPEX: "DXE Dependency Expression",
EFI_SECTION_VERSION: "Version, Text and Numeric",
EFI_SECTION_USER_INTERFACE: "User-Friendly name of the driver",
EFI_SECTION_COMPATIBILITY16: "DOS-style 16-bit EXE",
EFI_SECTION_FIRMWARE_VOLUME_IMAGE: "PI Firmware Volume image",
EFI_SECTION_FREEFORM_SUBTYPE_GUID: "Raw data with GUID in header to" \
+ " define format",
EFI_SECTION_RAW: "Raw data",
EFI_SECTION_PEI_DEPEX: "PEI Dependency Expression",
}
EFI_FV_FILETYPE_RAW = 0x1
EFI_FV_FILETYPE_FREEFORM = 0x2
EFI_FV_FILETYPE_SECURITY_CORE = 0x3
EFI_FV_FILETYPE_PEI_CORE = 0x4
EFI_FV_FILETYPE_DXE_CORE = 0x5
EFI_FV_FILETYPE_PEIM = 0x6
EFI_FV_FILETYPE_DRIVER = 0x7
EFI_FV_FILETYPE_COMBINED_PEIM_DRIVER = 0x8
EFI_FV_FILETYPE_APPLICATION = 0x9
EFI_FV_FILETYPE_FIRMWARE_VOLUME_IMAGE = 0xb
EFI_FV_FILETYPE_FFS_PAD = 0xf0
EFI_FV_FILETYPE = {
EFI_FV_FILETYPE_RAW: "Binary data",
EFI_FV_FILETYPE_FREEFORM: "Sectioned data",
EFI_FV_FILETYPE_SECURITY_CORE: "Platform core code used during the SEC" \
+ " phase",
EFI_FV_FILETYPE_PEI_CORE: "PEI Foundation",
EFI_FV_FILETYPE_DXE_CORE: "DXE Foundation",
EFI_FV_FILETYPE_PEIM: "PEI module (PEIM)",
EFI_FV_FILETYPE_DRIVER: "DXE driver",
EFI_FV_FILETYPE_COMBINED_PEIM_DRIVER: "Combined PEIM/DXE driver",
EFI_FV_FILETYPE_APPLICATION: "Application",
EFI_FV_FILETYPE_FIRMWARE_VOLUME_IMAGE: "Firmware volume image",
EFI_FV_FILETYPE_FFS_PAD: "Pad File For FFS",
}
for x in xrange(0xc0, 0xe0):
EFI_FV_FILETYPE[x] = "OEM File"
for x in xrange(0xe0, 0xf0):
EFI_FV_FILETYPE[x] = "Debug/Test File"
for x in xrange(0xf1, 0x100):
EFI_FV_FILETYPE[x] = "Firmware File System Specific File"
class BlockMap(FieldSet):
static_size = 8*8
def createFields(self):
yield UInt32(self, "num_blocks")
yield UInt32(self, "len")
def createDescription(self):
return "%d blocks of %s" % (
self["num_blocks"].value, humanFilesize(self["len"].value))
class FileSection(FieldSet):
COMPRESSION_TYPE = {
0: 'Not Compressed',
1: 'Standard Compression',
}
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = self["size"].value * 8
section_type = self["type"].value
if section_type in (EFI_SECTION_DXE_DEPEX, EFI_SECTION_PEI_DEPEX):
# These sections can sometimes be longer than what their size
# claims! It's so nice to have so detailled specs and not follow
# them ...
if self.stream.readBytes(self.absolute_address +
self._size, 1) == '\0':
self._size = self._size + 16
def createFields(self):
# Header
yield UInt24(self, "size")
yield Enum(UInt8(self, "type"), EFI_SECTION_TYPE)
section_type = self["type"].value
if section_type == EFI_SECTION_COMPRESSION:
yield UInt32(self, "uncomp_len")
yield Enum(UInt8(self, "comp_type"), self.COMPRESSION_TYPE)
elif section_type == EFI_SECTION_FREEFORM_SUBTYPE_GUID:
yield GUID(self, "sub_type_guid")
elif section_type == EFI_SECTION_GUID_DEFINED:
yield GUID(self, "section_definition_guid")
yield UInt16(self, "data_offset")
yield UInt16(self, "attributes")
elif section_type == EFI_SECTION_USER_INTERFACE:
yield CString(self, "file_name", charset="UTF-16-LE")
elif section_type == EFI_SECTION_VERSION:
yield UInt16(self, "build_number")
yield CString(self, "version", charset="UTF-16-LE")
# Content
content_size = (self.size - self.current_size) // 8
if content_size == 0:
return
if section_type == EFI_SECTION_COMPRESSION:
compression_type = self["comp_type"].value
if compression_type == 1:
while not self.eof:
yield RawBytes(self, "compressed_content", content_size)
else:
while not self.eof:
yield FileSection(self, "section[]")
elif section_type == EFI_SECTION_FIRMWARE_VOLUME_IMAGE:
yield FirmwareVolume(self, "firmware_volume")
else:
yield RawBytes(self, "content", content_size,
EFI_SECTION_TYPE.get(self["type"].value,
"Unknown Section Type"))
def createDescription(self):
return EFI_SECTION_TYPE.get(self["type"].value,
"Unknown Section Type")
class File(FieldSet):
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = self["size"].value * 8
def createFields(self):
# Header
yield GUID(self, "name")
yield UInt16(self, "integrity_check")
yield Enum(UInt8(self, "type"), EFI_FV_FILETYPE)
yield UInt8(self, "attributes")
yield UInt24(self, "size")
yield UInt8(self, "state")
# Content
while not self.eof:
yield FileSection(self, "section[]")
def createDescription(self):
return "%s: %s containing %d section(s)" % (
self["name"].value,
self["type"].display,
len(self.array("section")))
class FirmwareVolume(FieldSet):
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
if not self._size:
self._size = self["volume_len"].value * 8
def createFields(self):
# Header
yield NullBytes(self, "zero_vector", 16)
yield GUID(self, "fs_guid")
yield UInt64(self, "volume_len")
yield String(self, "signature", 4)
yield UInt32(self, "attributes")
yield UInt16(self, "header_len")
yield UInt16(self, "checksum")
yield UInt16(self, "ext_header_offset")
yield UInt8(self, "reserved")
yield UInt8(self, "revision")
while True:
bm = BlockMap(self, "block_map[]")
yield bm
if bm['num_blocks'].value == 0 and bm['len'].value == 0:
break
# TODO must handle extended header
# Content
while not self.eof:
padding = paddingSize(self.current_size // 8, 8)
if padding:
yield PaddingBytes(self, "padding[]", padding)
yield File(self, "file[]")
def createDescription(self):
return "Firmware Volume containing %d file(s)" % len(self.array("file"))
class PIFVFile(Parser):
endian = LITTLE_ENDIAN
MAGIC = '_FVH'
PARSER_TAGS = {
"id": "pifv",
"category": "program",
"file_ext": ("bin", ""),
"min_size": 64*8, # smallest possible header
"magic_regex": (("\0{16}.{24}%s" % MAGIC, 0), ),
"description": "EFI Platform Initialization Firmware Volume",
}
def validate(self):
if self.stream.readBytes(40*8, 4) != self.MAGIC:
return "Invalid magic number"
if self.stream.readBytes(0, 16) != "\0"*16:
return "Invalid zero vector"
return True
def createFields(self):
while not self.eof:
yield FirmwareVolume(self, "firmware_volume[]")
|
gpl-3.0
|
Chenmxs/scrapy
|
tests/test_linkextractors_deprecated.py
|
55
|
9518
|
import unittest
from scrapy.linkextractors.regex import RegexLinkExtractor
from scrapy.http import HtmlResponse
from scrapy.link import Link
from scrapy.linkextractors.htmlparser import HtmlParserLinkExtractor
from scrapy.linkextractors.sgml import SgmlLinkExtractor, BaseSgmlLinkExtractor
from tests import get_testdata
from tests.test_linkextractors import Base
class BaseSgmlLinkExtractorTestCase(unittest.TestCase):
# XXX: should we move some of these tests to base link extractor tests?
def test_basic(self):
html = """<html><head><title>Page title<title>
<body><p><a href="item/12.html">Item 12</a></p>
<p><a href="/about.html">About us</a></p>
<img src="/logo.png" alt="Company logo (not a link)" />
<p><a href="../othercat.html">Other category</a></p>
<p><a href="/">>></a></p>
<p><a href="/" /></p>
</body></html>"""
response = HtmlResponse("http://example.org/somepage/index.html", body=html)
lx = BaseSgmlLinkExtractor() # default: tag=a, attr=href
self.assertEqual(lx.extract_links(response),
[Link(url='http://example.org/somepage/item/12.html', text='Item 12'),
Link(url='http://example.org/about.html', text='About us'),
Link(url='http://example.org/othercat.html', text='Other category'),
Link(url='http://example.org/', text='>>'),
Link(url='http://example.org/', text='')])
def test_base_url(self):
html = """<html><head><title>Page title<title><base href="http://otherdomain.com/base/" />
<body><p><a href="item/12.html">Item 12</a></p>
</body></html>"""
response = HtmlResponse("http://example.org/somepage/index.html", body=html)
lx = BaseSgmlLinkExtractor() # default: tag=a, attr=href
self.assertEqual(lx.extract_links(response),
[Link(url='http://otherdomain.com/base/item/12.html', text='Item 12')])
# base url is an absolute path and relative to host
html = """<html><head><title>Page title<title><base href="/" />
<body><p><a href="item/12.html">Item 12</a></p></body></html>"""
response = HtmlResponse("https://example.org/somepage/index.html", body=html)
self.assertEqual(lx.extract_links(response),
[Link(url='https://example.org/item/12.html', text='Item 12')])
# base url has no scheme
html = """<html><head><title>Page title<title><base href="//noschemedomain.com/path/to/" />
<body><p><a href="item/12.html">Item 12</a></p></body></html>"""
response = HtmlResponse("https://example.org/somepage/index.html", body=html)
self.assertEqual(lx.extract_links(response),
[Link(url='https://noschemedomain.com/path/to/item/12.html', text='Item 12')])
def test_link_text_wrong_encoding(self):
html = """<body><p><a href="item/12.html">Wrong: \xed</a></p></body></html>"""
response = HtmlResponse("http://www.example.com", body=html, encoding='utf-8')
lx = BaseSgmlLinkExtractor()
self.assertEqual(lx.extract_links(response), [
Link(url='http://www.example.com/item/12.html', text=u'Wrong: \ufffd'),
])
def test_extraction_encoding(self):
body = get_testdata('link_extractor', 'linkextractor_noenc.html')
response_utf8 = HtmlResponse(url='http://example.com/utf8', body=body, headers={'Content-Type': ['text/html; charset=utf-8']})
response_noenc = HtmlResponse(url='http://example.com/noenc', body=body)
body = get_testdata('link_extractor', 'linkextractor_latin1.html')
response_latin1 = HtmlResponse(url='http://example.com/latin1', body=body)
lx = BaseSgmlLinkExtractor()
self.assertEqual(lx.extract_links(response_utf8), [
Link(url='http://example.com/sample_%C3%B1.html', text=''),
Link(url='http://example.com/sample_%E2%82%AC.html', text='sample \xe2\x82\xac text'.decode('utf-8')),
])
self.assertEqual(lx.extract_links(response_noenc), [
Link(url='http://example.com/sample_%C3%B1.html', text=''),
Link(url='http://example.com/sample_%E2%82%AC.html', text='sample \xe2\x82\xac text'.decode('utf-8')),
])
self.assertEqual(lx.extract_links(response_latin1), [
Link(url='http://example.com/sample_%F1.html', text=''),
Link(url='http://example.com/sample_%E1.html', text='sample \xe1 text'.decode('latin1')),
])
def test_matches(self):
url1 = 'http://lotsofstuff.com/stuff1/index'
url2 = 'http://evenmorestuff.com/uglystuff/index'
lx = BaseSgmlLinkExtractor()
self.assertEqual(lx.matches(url1), True)
self.assertEqual(lx.matches(url2), True)
class HtmlParserLinkExtractorTestCase(unittest.TestCase):
def setUp(self):
body = get_testdata('link_extractor', 'sgml_linkextractor.html')
self.response = HtmlResponse(url='http://example.com/index', body=body)
def test_extraction(self):
# Default arguments
lx = HtmlParserLinkExtractor()
self.assertEqual(lx.extract_links(self.response),
[Link(url='http://example.com/sample2.html', text=u'sample 2'),
Link(url='http://example.com/sample3.html', text=u'sample 3 text'),
Link(url='http://example.com/sample3.html', text=u'sample 3 repetition'),
Link(url='http://www.google.com/something', text=u''),
Link(url='http://example.com/innertag.html', text=u'inner tag'),])
def test_link_wrong_href(self):
html = """
<a href="http://example.org/item1.html">Item 1</a>
<a href="http://[example.org/item2.html">Item 2</a>
<a href="http://example.org/item3.html">Item 3</a>
"""
response = HtmlResponse("http://example.org/index.html", body=html)
lx = HtmlParserLinkExtractor()
self.assertEqual([link for link in lx.extract_links(response)], [
Link(url='http://example.org/item1.html', text=u'Item 1', nofollow=False),
Link(url='http://example.org/item3.html', text=u'Item 3', nofollow=False),
])
class SgmlLinkExtractorTestCase(Base.LinkExtractorTestCase):
extractor_cls = SgmlLinkExtractor
def test_deny_extensions(self):
html = """<a href="page.html">asd</a> and <a href="photo.jpg">"""
response = HtmlResponse("http://example.org/", body=html)
lx = SgmlLinkExtractor(deny_extensions="jpg")
self.assertEqual(lx.extract_links(response), [
Link(url='http://example.org/page.html', text=u'asd'),
])
def test_attrs_sgml(self):
html = """<html><area href="sample1.html"></area>
<a ref="sample2.html">sample text 2</a></html>"""
response = HtmlResponse("http://example.com/index.html", body=html)
lx = SgmlLinkExtractor(attrs="href")
self.assertEqual(lx.extract_links(response), [
Link(url='http://example.com/sample1.html', text=u''),
])
def test_link_nofollow(self):
html = """
<a href="page.html?action=print" rel="nofollow">Printer-friendly page</a>
<a href="about.html">About us</a>
<a href="http://google.com/something" rel="external nofollow">Something</a>
"""
response = HtmlResponse("http://example.org/page.html", body=html)
lx = SgmlLinkExtractor()
self.assertEqual([link for link in lx.extract_links(response)], [
Link(url='http://example.org/page.html?action=print', text=u'Printer-friendly page', nofollow=True),
Link(url='http://example.org/about.html', text=u'About us', nofollow=False),
Link(url='http://google.com/something', text=u'Something', nofollow=True),
])
class RegexLinkExtractorTestCase(unittest.TestCase):
# XXX: RegexLinkExtractor is not deprecated yet, but it must be rewritten
# not to depend on SgmlLinkExractor. Its speed is also much worse
# than it should be.
def setUp(self):
body = get_testdata('link_extractor', 'sgml_linkextractor.html')
self.response = HtmlResponse(url='http://example.com/index', body=body)
def test_extraction(self):
# Default arguments
lx = RegexLinkExtractor()
self.assertEqual(lx.extract_links(self.response),
[Link(url='http://example.com/sample2.html', text=u'sample 2'),
Link(url='http://example.com/sample3.html', text=u'sample 3 text'),
Link(url='http://www.google.com/something', text=u''),
Link(url='http://example.com/innertag.html', text=u'inner tag'),])
def test_link_wrong_href(self):
html = """
<a href="http://example.org/item1.html">Item 1</a>
<a href="http://[example.org/item2.html">Item 2</a>
<a href="http://example.org/item3.html">Item 3</a>
"""
response = HtmlResponse("http://example.org/index.html", body=html)
lx = RegexLinkExtractor()
self.assertEqual([link for link in lx.extract_links(response)], [
Link(url='http://example.org/item1.html', text=u'Item 1', nofollow=False),
Link(url='http://example.org/item3.html', text=u'Item 3', nofollow=False),
])
|
bsd-3-clause
|
abadger/Bento
|
bento/private/_yaku/examples/fortran/conf_fortran.py
|
3
|
1061
|
import sys
from yaku.scheduler \
import \
run_tasks
from yaku.context \
import \
get_bld, get_cfg
from yaku.conftests.fconftests \
import \
check_fcompiler, check_fortran_verbose_flag, \
check_fortran_runtime_flags, check_fortran_dummy_main, \
check_fortran_mangling
def configure(ctx):
ctx.use_tools(["ctasks", "cxxtasks"])
ctx.load_tool("fortran")
ctx.builders["fortran"].configure(candidates=["gfortran"])
check_fcompiler(ctx)
check_fortran_verbose_flag(ctx)
check_fortran_runtime_flags(ctx)
check_fortran_dummy_main(ctx)
check_fortran_mangling(ctx)
def build(ctx):
builder = ctx.builders["fortran"]
builder.program("fbar", ["src/bar.f"])
builder = ctx.builders["ctasks"]
builder.program("cbar", ["src/bar.c"])
builder = ctx.builders["cxxtasks"]
builder.program("cxxbar", ["src/bar.cxx"])
if __name__ == "__main__":
ctx = get_cfg()
configure(ctx)
ctx.store()
ctx = get_bld()
build(ctx)
run_tasks(ctx)
ctx.store()
|
bsd-3-clause
|
sivaprakashniet/push_pull
|
p2p/lib/python2.7/site-packages/django/contrib/auth/models.py
|
104
|
17967
|
from __future__ import unicode_literals
from django.contrib import auth
from django.contrib.auth.hashers import (
check_password, is_password_usable, make_password,
)
from django.contrib.auth.signals import user_logged_in
from django.contrib.contenttypes.models import ContentType
from django.core import validators
from django.core.exceptions import PermissionDenied
from django.core.mail import send_mail
from django.db import models
from django.db.models.manager import EmptyManager
from django.utils import six, timezone
from django.utils.crypto import get_random_string, salted_hmac
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
def update_last_login(sender, user, **kwargs):
"""
A signal receiver which updates the last_login date for
the user logging in.
"""
user.last_login = timezone.now()
user.save(update_fields=['last_login'])
user_logged_in.connect(update_last_login)
class PermissionManager(models.Manager):
use_in_migrations = True
def get_by_natural_key(self, codename, app_label, model):
return self.get(
codename=codename,
content_type=ContentType.objects.db_manager(self.db).get_by_natural_key(app_label, model),
)
@python_2_unicode_compatible
class Permission(models.Model):
"""
The permissions system provides a way to assign permissions to specific
users and groups of users.
The permission system is used by the Django admin site, but may also be
useful in your own code. The Django admin site uses permissions as follows:
- The "add" permission limits the user's ability to view the "add" form
and add an object.
- The "change" permission limits a user's ability to view the change
list, view the "change" form and change an object.
- The "delete" permission limits the ability to delete an object.
Permissions are set globally per type of object, not per specific object
instance. It is possible to say "Mary may change news stories," but it's
not currently possible to say "Mary may change news stories, but only the
ones she created herself" or "Mary may only change news stories that have a
certain status or publication date."
Three basic permissions -- add, change and delete -- are automatically
created for each Django model.
"""
name = models.CharField(_('name'), max_length=255)
content_type = models.ForeignKey(ContentType)
codename = models.CharField(_('codename'), max_length=100)
objects = PermissionManager()
class Meta:
verbose_name = _('permission')
verbose_name_plural = _('permissions')
unique_together = (('content_type', 'codename'),)
ordering = ('content_type__app_label', 'content_type__model',
'codename')
def __str__(self):
return "%s | %s | %s" % (
six.text_type(self.content_type.app_label),
six.text_type(self.content_type),
six.text_type(self.name))
def natural_key(self):
return (self.codename,) + self.content_type.natural_key()
natural_key.dependencies = ['contenttypes.contenttype']
class GroupManager(models.Manager):
"""
The manager for the auth's Group model.
"""
use_in_migrations = True
def get_by_natural_key(self, name):
return self.get(name=name)
@python_2_unicode_compatible
class Group(models.Model):
"""
Groups are a generic way of categorizing users to apply permissions, or
some other label, to those users. A user can belong to any number of
groups.
A user in a group automatically has all the permissions granted to that
group. For example, if the group Site editors has the permission
can_edit_home_page, any user in that group will have that permission.
Beyond permissions, groups are a convenient way to categorize users to
apply some label, or extended functionality, to them. For example, you
could create a group 'Special users', and you could write code that would
do special things to those users -- such as giving them access to a
members-only portion of your site, or sending them members-only email
messages.
"""
name = models.CharField(_('name'), max_length=80, unique=True)
permissions = models.ManyToManyField(Permission,
verbose_name=_('permissions'), blank=True)
objects = GroupManager()
class Meta:
verbose_name = _('group')
verbose_name_plural = _('groups')
def __str__(self):
return self.name
def natural_key(self):
return (self.name,)
class BaseUserManager(models.Manager):
@classmethod
def normalize_email(cls, email):
"""
Normalize the address by lowercasing the domain part of the email
address.
"""
email = email or ''
try:
email_name, domain_part = email.strip().rsplit('@', 1)
except ValueError:
pass
else:
email = '@'.join([email_name, domain_part.lower()])
return email
def make_random_password(self, length=10,
allowed_chars='abcdefghjkmnpqrstuvwxyz'
'ABCDEFGHJKLMNPQRSTUVWXYZ'
'23456789'):
"""
Generates a random password with the given length and given
allowed_chars. Note that the default value of allowed_chars does not
have "I" or "O" or letters and digits that look similar -- just to
avoid confusion.
"""
return get_random_string(length, allowed_chars)
def get_by_natural_key(self, username):
return self.get(**{self.model.USERNAME_FIELD: username})
class UserManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, username, email, password,
is_staff, is_superuser, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
"""
now = timezone.now()
if not username:
raise ValueError('The given username must be set')
email = self.normalize_email(email)
user = self.model(username=username, email=email,
is_staff=is_staff, is_active=True,
is_superuser=is_superuser,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, username, email=None, password=None, **extra_fields):
return self._create_user(username, email, password, False, False,
**extra_fields)
def create_superuser(self, username, email, password, **extra_fields):
return self._create_user(username, email, password, True, True,
**extra_fields)
@python_2_unicode_compatible
class AbstractBaseUser(models.Model):
password = models.CharField(_('password'), max_length=128)
last_login = models.DateTimeField(_('last login'), blank=True, null=True)
is_active = True
REQUIRED_FIELDS = []
class Meta:
abstract = True
def get_username(self):
"Return the identifying username for this User"
return getattr(self, self.USERNAME_FIELD)
def __str__(self):
return self.get_username()
def natural_key(self):
return (self.get_username(),)
def is_anonymous(self):
"""
Always returns False. This is a way of comparing User objects to
anonymous users.
"""
return False
def is_authenticated(self):
"""
Always return True. This is a way to tell if the user has been
authenticated in templates.
"""
return True
def set_password(self, raw_password):
self.password = make_password(raw_password)
def check_password(self, raw_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
hashing formats behind the scenes.
"""
def setter(raw_password):
self.set_password(raw_password)
self.save(update_fields=["password"])
return check_password(raw_password, self.password, setter)
def set_unusable_password(self):
# Sets a value that will never be a valid hash
self.password = make_password(None)
def has_usable_password(self):
return is_password_usable(self.password)
def get_full_name(self):
raise NotImplementedError('subclasses of AbstractBaseUser must provide a get_full_name() method')
def get_short_name(self):
raise NotImplementedError('subclasses of AbstractBaseUser must provide a get_short_name() method.')
def get_session_auth_hash(self):
"""
Returns an HMAC of the password field.
"""
key_salt = "django.contrib.auth.models.AbstractBaseUser.get_session_auth_hash"
return salted_hmac(key_salt, self.password).hexdigest()
# A few helper functions for common logic between User and AnonymousUser.
def _user_get_all_permissions(user, obj):
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_all_permissions"):
permissions.update(backend.get_all_permissions(user, obj))
return permissions
def _user_has_perm(user, perm, obj):
"""
A backend can raise `PermissionDenied` to short-circuit permission checking.
"""
for backend in auth.get_backends():
if not hasattr(backend, 'has_perm'):
continue
try:
if backend.has_perm(user, perm, obj):
return True
except PermissionDenied:
return False
return False
def _user_has_module_perms(user, app_label):
"""
A backend can raise `PermissionDenied` to short-circuit permission checking.
"""
for backend in auth.get_backends():
if not hasattr(backend, 'has_module_perms'):
continue
try:
if backend.has_module_perms(user, app_label):
return True
except PermissionDenied:
return False
return False
class PermissionsMixin(models.Model):
"""
A mixin class that adds the fields and methods necessary to support
Django's Group and Permission model using the ModelBackend.
"""
is_superuser = models.BooleanField(_('superuser status'), default=False,
help_text=_('Designates that this user has all permissions without '
'explicitly assigning them.'))
groups = models.ManyToManyField(Group, verbose_name=_('groups'),
blank=True, help_text=_('The groups this user belongs to. A user will '
'get all permissions granted to each of '
'their groups.'),
related_name="user_set", related_query_name="user")
user_permissions = models.ManyToManyField(Permission,
verbose_name=_('user permissions'), blank=True,
help_text=_('Specific permissions for this user.'),
related_name="user_set", related_query_name="user")
class Meta:
abstract = True
def get_group_permissions(self, obj=None):
"""
Returns a list of permission strings that this user has through their
groups. This method queries all available auth backends. If an object
is passed in, only permissions matching this object are returned.
"""
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_group_permissions"):
permissions.update(backend.get_group_permissions(self, obj))
return permissions
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj)
def has_perm(self, perm, obj=None):
"""
Returns True if the user has the specified permission. This method
queries all available auth backends, but returns immediately if any
backend returns True. Thus, a user who has permission from a single
auth backend is assumed to have permission in general. If an object is
provided, permissions for this specific object are checked.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
# Otherwise we need to check the backends.
return _user_has_perm(self, perm, obj)
def has_perms(self, perm_list, obj=None):
"""
Returns True if the user has each of the specified permissions. If
object is passed, it checks if the user has all required perms for this
object.
"""
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, app_label):
"""
Returns True if the user has any permissions in the given app label.
Uses pretty much the same logic as has_perm, above.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
return _user_has_module_perms(self, app_label)
class AbstractUser(AbstractBaseUser, PermissionsMixin):
"""
An abstract base class implementing a fully featured User model with
admin-compliant permissions.
Username, password and email are required. Other fields are optional.
"""
username = models.CharField(_('username'), max_length=30, unique=True,
help_text=_('Required. 30 characters or fewer. Letters, digits and '
'@/./+/-/_ only.'),
validators=[
validators.RegexValidator(r'^[\w.@+-]+$',
_('Enter a valid username. '
'This value may contain only letters, numbers '
'and @/./+/-/_ characters.'), 'invalid'),
],
error_messages={
'unique': _("A user with that username already exists."),
})
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('email address'), blank=True)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
abstract = True
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
def email_user(self, subject, message, from_email=None, **kwargs):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email], **kwargs)
class User(AbstractUser):
"""
Users within the Django authentication system are represented by this
model.
Username, password and email are required. Other fields are optional.
"""
class Meta(AbstractUser.Meta):
swappable = 'AUTH_USER_MODEL'
@python_2_unicode_compatible
class AnonymousUser(object):
id = None
pk = None
username = ''
is_staff = False
is_active = False
is_superuser = False
_groups = EmptyManager(Group)
_user_permissions = EmptyManager(Permission)
def __init__(self):
pass
def __str__(self):
return 'AnonymousUser'
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return 1 # instances always return the same hash value
def save(self):
raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.")
def delete(self):
raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.")
def set_password(self, raw_password):
raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.")
def check_password(self, raw_password):
raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.")
def _get_groups(self):
return self._groups
groups = property(_get_groups)
def _get_user_permissions(self):
return self._user_permissions
user_permissions = property(_get_user_permissions)
def get_group_permissions(self, obj=None):
return set()
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj=obj)
def has_perm(self, perm, obj=None):
return _user_has_perm(self, perm, obj=obj)
def has_perms(self, perm_list, obj=None):
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, module):
return _user_has_module_perms(self, module)
def is_anonymous(self):
return True
def is_authenticated(self):
return False
def get_username(self):
return self.username
|
bsd-3-clause
|
markoshorro/gem5
|
src/mem/MemChecker.py
|
42
|
2752
|
# Copyright (c) 2014 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Marco Elver
from MemObject import MemObject
from m5.SimObject import SimObject
from m5.params import *
from m5.proxy import *
class MemChecker(SimObject):
type = 'MemChecker'
cxx_header = "mem/mem_checker.hh"
class MemCheckerMonitor(MemObject):
type = 'MemCheckerMonitor'
cxx_header = "mem/mem_checker_monitor.hh"
# one port in each direction
master = MasterPort("Master port")
slave = SlavePort("Slave port")
cpu_side = SlavePort("Alias for slave")
mem_side = MasterPort("Alias for master")
warn_only = Param.Bool(False, "Warn about violations only")
memchecker = Param.MemChecker("Instance shared with other monitors")
|
bsd-3-clause
|
biolab/orange
|
Orange/testing/regression/tests_20/reference_domain2.py
|
6
|
1700
|
# Description: Shows how to use orange.Domain for example conversion. Also shows how to add meta-attributes to domain descriptors and use them.
# Category: basic classes, meta-attributes
# Classes: Domain
# Uses: monk1
# Referenced: Domain.htm
import orange
data = orange.ExampleTable("monk1")
d2 = orange.Domain(["a", "b", "e", "y"], data.domain)
example = data[55]
print example
example2 = d2(example)
print example2
example2 = orange.Example(d2, example)
print example2
data2 = orange.ExampleTable(d2, data)
print data2[55]
d2.addmeta(orange.newmetaid(), orange.FloatVariable("w"))
data2 = orange.ExampleTable(d2, data)
print data2[55]
misses = orange.FloatVariable("misses")
id = orange.newmetaid()
data.domain.addmeta(id, misses)
print data[55]
print data.domain.hasmeta(id)
print data.domain.hasmeta(id-1)
for example in data:
example[misses] = 0
classifier = orange.BayesLearner(data)
for example in data:
if example.getclass() != classifier(example):
example[misses] += 1
for example in data:
print example
data = orange.ExampleTable("monk1")
domain = data.domain
d2 = orange.Domain(["a", "b", "e", "y"], domain)
for attr in ["c", "d", "f"]:
d2.addmeta(orange.newmetaid(), domain[attr])
d2.addmeta(orange.newmetaid(), orange.EnumVariable("X"))
data2 = orange.ExampleTable(d2, data)
print data[55]
print data2[55]
ido = -99
idr = -100
data.domain.addmeta(idr, orange.FloatVariable("required"), False)
data.domain.addmeta(ido, orange.FloatVariable("optional"), True)
print data.domain.isOptionalMeta(ido)
print data.domain.isOptionalMeta(idr)
print data.domain.getmetas()
print data.domain.getmetas(True)
print data.domain.getmetas(False)
|
gpl-3.0
|
RobotGarden/foscam-driver
|
camscheduler.py
|
1
|
4301
|
#!/usr/bin/env python
"""
Camera action scheduler.
"""
__author__ = "Daniel Casner <www.danielcasner.org>"
import time
import scheduler
import control
SEEK_TIME = 20.0
class CameraAction:
"""A general class for camera actions to queue"""
def __init__(self, foscam, expire=None):
"""Store basic action closure
@param foscam driver object
@param expire If time passes expire, delete the task
"""
self.cam = foscam
self.expire = expire
def run(self):
raise ValueError("CameraAction subclasses must implement run method")
class SnapshotAction:
"""An class to store the necessary information to execute 1 or more snapshots
immediately or at a time in the future."""
def __init__(self, foscam, preset, callback, number=1, interval=0.0, expire=None, userdata=None):
"""Setup the action closure.
@param foscam driver object
@param preset Preset to take snapshots at
@param callback Function to call with image data
arguments will be (image data, final image, userdata)
@param number Count of snapshots to take
@param interval If number > 1, interval between snapshots
@param expire If time passes expire, delete the task
"""
self.cam = foscam
self.preset = preset
self.callback = callbakc
self.number = number
self.interval = interval
self.expire = expire
self.userdata = userdata
self.nextTime = 0.0
def run(self):
"""Run the snapshot action.
@return True if this action is done and can be removed from the queue.
False if the action has more to do."""
if self.expire is not None and time.time() > self.expire:
return False
elif time.time() < self.nextTime - SEEK_TIME: # Not ready to run yet, snooze to the scheduler
return True
else:
self.cam.goto_preset(self.preset)
time.sleep(SEEK_TIME)
while self.number > 0:
self.callback(self.cam.snapshot(), self.preset, self.number==1, self.userdata)
self.number -= 1
if self.interval <= SEEK_TIME:
time.sleep(self.interval)
else:
self.nextTime = time.time() + self.interval - SEEK_TIME
return True
return False
class FoscamScheduler(scheduler.Scheduler):
"A scheduler specific for a given foscam"
def __init__(self, foscam):
scheduler.Scheduler.__init__(self)
self.cam = foscam
def snapshot(self, priority, preset, callback, expire=None, userdata=None):
"""Request a snapshot at a given preset.
snapshots will go into the priority queue and execute when it is their
turn. A snapshot will not be cancelled if another request with higher
priority arrives while it is being executed.
@param priority honor system priority number for this request, larger numbers = higher priority.
@param preset The preset to take the picture at.
@param callback Function to call with the photo data
@param expire Latest time that the caller wants the photo. None for no expiration.
"""
self.append(priority, SnapshotAction(self.cam, preset, callback, expire=expire, userdata=userdata))
def interval(self, priority, preset, callback, number, period, expire=None, userdata=None):
"""Requests a series of snapshots at a given present.
@param priority honor system priority number for this request, larger numbers = higher priority.
@param preset The preset to take the picture at.
@param callback Function to call each successive frame.
@param number How many pictures to take.
@param period How many seconds between pictures.
@param expire Latest time that the caller wants the photo. None for no expiration.
"""
self.append(priority, SnapshotAction(self.cam, preset, callback, number, period, expire, userdata))
def queueDone(self):
"Action when there are no more requests on the camera"
self.cam.goto_preset(self.cam.defaultPreset)
|
bsd-2-clause
|
3dfxsoftware/cbss-addons
|
crm_claim_rma/stock.py
|
1
|
3021
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright 2013 Camptocamp
# Copyright 2009-2013 Akretion,
# Author: Emmanuel Samyn, Raphaël Valyi, Sébastien Beau,
# Benoît Guillot, Joel Grand-Guillaume
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
class stock_picking(orm.Model):
_inherit = "stock.picking"
_columns = {
'claim_id': fields.many2one('crm.claim', 'Claim'),
}
def create(self, cr, uid, vals, context=None):
if ('name' not in vals) or (vals.get('name') == '/'):
sequence_obj = self.pool.get('ir.sequence')
if vals['type'] == 'internal':
seq_obj_name = self._name
else:
seq_obj_name = 'stock.picking.' + vals['type']
vals['name'] = sequence_obj.get(cr, uid, seq_obj_name,
context=context)
new_id = super(stock_picking, self).create(cr, uid, vals,
context=context)
return new_id
class stock_picking_out(orm.Model):
_inherit = "stock.picking.out"
_columns = {
'claim_id': fields.many2one('crm.claim', 'Claim'),
}
class stock_picking_out(orm.Model):
_inherit = "stock.picking.in"
_columns = {
'claim_id': fields.many2one('crm.claim', 'Claim'),
}
# This part concern the case of a wrong picking out. We need to create a new
# stock_move in a picking already open.
# In order to don't have to confirm the stock_move we override the create and
# confirm it at the creation only for this case
class stock_move(orm.Model):
_inherit = "stock.move"
def create(self, cr, uid, vals, context=None):
move_id = super(stock_move, self).create(cr, uid, vals, context=context)
if vals.get('picking_id'):
picking_obj = self.pool.get('stock.picking')
picking = picking_obj.browse(cr, uid, vals['picking_id'],
context=context)
if picking.claim_id and picking.type == u'in':
self.write(cr, uid, move_id, {'state': 'confirmed'},
context=context)
return move_id
|
gpl-2.0
|
spatial-ucsb/ConceptsOfSpatialInformation
|
CoreConceptsPy/Astronomic_Spaces/tests/networktest.py
|
2
|
2411
|
"""
Tests for network implementation
:author: Fenja Kollasch, 06/2017
"""
import sys
sys.path.append('../')
import networks as n
import objects as o
# Model the big dipper as unordered graph... because you can observe a constellation only from left to right... or so
big_dipper = n.AstroNetwork("big dipper")
alkaid = o.AstroObject("alkaid", lon=206.27, lat=49.18, bounding='ccs', reference='icrs')
mizar = o.AstroObject("mizar", lon=200.75, lat=54.55, bounding='ccs', reference='icrs')
alioth = o.AstroObject("alioth", lon=193.5, lat=55.57, bounding='ccs', reference='icrs')
megrez = o.AstroObject("megrez", lon=183.75, lat=57.01, bounding='ccs', reference='icrs')
phecda = o.AstroObject("phecda", lon=178.25, lat=53.41, bounding='ccs', reference='icrs')
merak = o.AstroObject("merak", lon=165.25, lat=56.22, bounding='ccs', reference='icrs')
dubhe = o.AstroObject("dubhe", lon=165.75, lat=61.41, bounding='ccs', reference='icrs')
big_dipper.addNode(alkaid)
big_dipper.addNode(mizar)
big_dipper.addNode(alioth)
big_dipper.addNode(megrez)
big_dipper.addNode(phecda)
big_dipper.addNode(merak)
big_dipper.addNode(dubhe)
# Totally absurd color object...
big_dipper.addEdge(alkaid, mizar, distance=alkaid.relation(mizar, 'distance'), color="blue")
big_dipper.addEdge(mizar, alioth, distance=mizar.relation(alioth, 'distance'), color="blue")
big_dipper.addEdge(alioth, megrez, distance=alioth.relation(megrez, 'distance'), color="blue")
big_dipper.addEdge(megrez, dubhe, distance=megrez.relation(dubhe, 'distance'), color="red")
big_dipper.addEdge(megrez, phecda, distance=megrez.relation(phecda, 'distance'), color="blue")
big_dipper.addEdge(phecda, merak, distance=phecda.relation(merak, 'distance'), color="blue")
big_dipper.addEdge(merak, dubhe, distance=merak.relation(dubhe, 'distance'), color="blue")
print("Path from Alkaid to Dubhe: {0}".format(alkaid.relation(mizar, 'distance') +
mizar.relation(alioth, 'distance') +
alioth.relation(megrez, 'distance') +
megrez.relation(dubhe, 'distance')))
print(big_dipper.shortestPath(alkaid, dubhe, weight=('distance', 0)))
print(big_dipper.shortestPath(alkaid, dubhe, weight=('distance', 0), color=('color', 'blue')))
print("first breadth from megrez (2):")
for s in big_dipper.breadthFirst(megrez, 2):
print(str(s))
|
apache-2.0
|
xenserver/xsconsole
|
plugins-base/XSFeatureLogInOut.py
|
4
|
2299
|
# Copyright (c) 2008-2009 Citrix Systems Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 only.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
if __name__ == "__main__":
raise Exception("This script is a plugin for xsconsole and cannot run independently")
from XSConsoleStandard import *
class XSFeatureLogInOut:
@classmethod
def StatusUpdateHandler(cls, inPane):
if Auth.Inst().IsAuthenticated():
inPane.AddTitleField(Lang("Log Out"))
inPane.AddWrappedTextField(Lang("Press <Enter> to log out."))
inPane.AddKeyHelpField( {Lang("<Enter>") : Lang("Log out") })
else:
inPane.AddTitleField(Lang("Log In"))
inPane.AddWrappedTextField(Lang("Press <Enter> to log in."))
inPane.AddKeyHelpField( { Lang("<Enter>") : Lang("Log in") })
@classmethod
def ActivateHandler(cls):
if Auth.Inst().IsAuthenticated():
name = Auth.Inst().LoggedInUsername()
Auth.Inst().LogOut()
Data.Inst().Update()
Layout.Inst().PushDialogue(InfoDialogue( Lang("User '")+name+Lang("' logged out")))
else:
Layout.Inst().PushDialogue(LoginDialogue())
def Register(self):
Importer.RegisterNamedPlugIn(
self,
'LOGINOUT', # Key of this plugin for replacement, etc.
{
'menuname' : 'MENU_AUTH',
'menupriority' : 100,
'menutext' : Lang('Log In/Out'),
'statusupdatehandler' : XSFeatureLogInOut.StatusUpdateHandler,
'activatehandler' : XSFeatureLogInOut.ActivateHandler
}
)
# Register this plugin when module is imported
XSFeatureLogInOut().Register()
|
gpl-2.0
|
dcroc16/skunk_works
|
google_appengine/lib/django-1.2/django/contrib/flatpages/tests/csrf.py
|
47
|
3425
|
import os
from django.conf import settings
from django.test import TestCase, Client
class FlatpageCSRFTests(TestCase):
fixtures = ['sample_flatpages']
urls = 'django.contrib.flatpages.tests.urls'
def setUp(self):
self.client = Client(enforce_csrf_checks=True)
self.old_MIDDLEWARE_CLASSES = settings.MIDDLEWARE_CLASSES
flatpage_middleware_class = 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware'
csrf_middleware_class = 'django.middleware.csrf.CsrfViewMiddleware'
if csrf_middleware_class not in settings.MIDDLEWARE_CLASSES:
settings.MIDDLEWARE_CLASSES += (csrf_middleware_class,)
if flatpage_middleware_class not in settings.MIDDLEWARE_CLASSES:
settings.MIDDLEWARE_CLASSES += (flatpage_middleware_class,)
self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS
settings.TEMPLATE_DIRS = (
os.path.join(
os.path.dirname(__file__),
'templates'
),
)
self.old_LOGIN_URL = settings.LOGIN_URL
settings.LOGIN_URL = '/accounts/login/'
def tearDown(self):
settings.MIDDLEWARE_CLASSES = self.old_MIDDLEWARE_CLASSES
settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS
settings.LOGIN_URL = self.old_LOGIN_URL
def test_view_flatpage(self):
"A flatpage can be served through a view, even when the middleware is in use"
response = self.client.get('/flatpage_root/flatpage/')
self.assertEquals(response.status_code, 200)
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_view_non_existent_flatpage(self):
"A non-existent flatpage raises 404 when served through a view, even when the middleware is in use"
response = self.client.get('/flatpage_root/no_such_flatpage/')
self.assertEquals(response.status_code, 404)
def test_view_authenticated_flatpage(self):
"A flatpage served through a view can require authentication"
response = self.client.get('/flatpage_root/sekrit/')
self.assertRedirects(response, '/accounts/login/?next=/flatpage_root/sekrit/')
def test_fallback_flatpage(self):
"A flatpage can be served by the fallback middlware"
response = self.client.get('/flatpage/')
self.assertEquals(response.status_code, 200)
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_fallback_non_existent_flatpage(self):
"A non-existent flatpage raises a 404 when served by the fallback middlware"
response = self.client.get('/no_such_flatpage/')
self.assertEquals(response.status_code, 404)
def test_post_view_flatpage(self):
"POSTing to a flatpage served through a view will raise a CSRF error if no token is provided (Refs #14156)"
response = self.client.post('/flatpage_root/flatpage/')
self.assertEquals(response.status_code, 403)
def test_post_fallback_flatpage(self):
"POSTing to a flatpage served by the middleware will raise a CSRF error if no token is provided (Refs #14156)"
response = self.client.post('/flatpage/')
self.assertEquals(response.status_code, 403)
def test_post_unknown_page(self):
"POSTing to an unknown page isn't caught as a 403 CSRF error"
response = self.client.post('/no_such_page/')
self.assertEquals(response.status_code, 404)
|
mit
|
rdo-management/heat
|
heat/tests/test_api_aws.py
|
7
|
10180
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.api.aws import exception as aws_exception
from heat.api.aws import utils as api_utils
from heat.common import exception as common_exception
from heat.tests import common
class AWSCommonTest(common.HeatTestCase):
'''
Tests the api/aws common components
'''
# The tests
def test_format_response(self):
response = api_utils.format_response("Foo", "Bar")
expected = {'FooResponse': {'FooResult': 'Bar'}}
self.assertEqual(expected, response)
def test_params_extract(self):
p = {'Parameters.member.1.ParameterKey': 'foo',
'Parameters.member.1.ParameterValue': 'bar',
'Parameters.member.2.ParameterKey': 'blarg',
'Parameters.member.2.ParameterValue': 'wibble'}
params = api_utils.extract_param_pairs(p, prefix='Parameters',
keyname='ParameterKey',
valuename='ParameterValue')
self.assertEqual(2, len(params))
self.assertIn('foo', params)
self.assertEqual('bar', params['foo'])
self.assertIn('blarg', params)
self.assertEqual('wibble', params['blarg'])
def test_params_extract_dots(self):
p = {'Parameters.member.1.1.ParameterKey': 'foo',
'Parameters.member.1.1.ParameterValue': 'bar',
'Parameters.member.2.1.ParameterKey': 'blarg',
'Parameters.member.2.1.ParameterValue': 'wibble'}
params = api_utils.extract_param_pairs(p, prefix='Parameters',
keyname='ParameterKey',
valuename='ParameterValue')
self.assertFalse(params)
def test_params_extract_garbage(self):
p = {'Parameters.member.1.ParameterKey': 'foo',
'Parameters.member.1.ParameterValue': 'bar',
'Foo.1.ParameterKey': 'blarg',
'Foo.1.ParameterValue': 'wibble'}
params = api_utils.extract_param_pairs(p, prefix='Parameters',
keyname='ParameterKey',
valuename='ParameterValue')
self.assertEqual(1, len(params))
self.assertIn('foo', params)
self.assertEqual('bar', params['foo'])
def test_params_extract_garbage_prefix(self):
p = {'prefixParameters.member.Foo.Bar.ParameterKey': 'foo',
'Parameters.member.Foo.Bar.ParameterValue': 'bar'}
params = api_utils.extract_param_pairs(p, prefix='Parameters',
keyname='ParameterKey',
valuename='ParameterValue')
self.assertFalse(params)
def test_params_extract_garbage_suffix(self):
p = {'Parameters.member.1.ParameterKeysuffix': 'foo',
'Parameters.member.1.ParameterValue': 'bar'}
params = api_utils.extract_param_pairs(p, prefix='Parameters',
keyname='ParameterKey',
valuename='ParameterValue')
self.assertFalse(params)
def test_extract_param_list(self):
p = {'MetricData.member.1.MetricName': 'foo',
'MetricData.member.1.Unit': 'Bytes',
'MetricData.member.1.Value': 234333}
params = api_utils.extract_param_list(p, prefix='MetricData')
self.assertEqual(1, len(params))
self.assertIn('MetricName', params[0])
self.assertIn('Unit', params[0])
self.assertIn('Value', params[0])
self.assertEqual('foo', params[0]['MetricName'])
self.assertEqual('Bytes', params[0]['Unit'])
self.assertEqual(234333, params[0]['Value'])
def test_extract_param_list_garbage_prefix(self):
p = {'AMetricData.member.1.MetricName': 'foo',
'MetricData.member.1.Unit': 'Bytes',
'MetricData.member.1.Value': 234333}
params = api_utils.extract_param_list(p, prefix='MetricData')
self.assertEqual(1, len(params))
self.assertNotIn('MetricName', params[0])
self.assertIn('Unit', params[0])
self.assertIn('Value', params[0])
self.assertEqual('Bytes', params[0]['Unit'])
self.assertEqual(234333, params[0]['Value'])
def test_extract_param_list_garbage_prefix2(self):
p = {'AMetricData.member.1.MetricName': 'foo',
'BMetricData.member.1.Unit': 'Bytes',
'CMetricData.member.1.Value': 234333}
params = api_utils.extract_param_list(p, prefix='MetricData')
self.assertEqual(0, len(params))
def test_extract_param_list_garbage_suffix(self):
p = {'MetricData.member.1.AMetricName': 'foo',
'MetricData.member.1.Unit': 'Bytes',
'MetricData.member.1.Value': 234333}
params = api_utils.extract_param_list(p, prefix='MetricData')
self.assertEqual(1, len(params))
self.assertNotIn('MetricName', params[0])
self.assertIn('Unit', params[0])
self.assertIn('Value', params[0])
self.assertEqual('Bytes', params[0]['Unit'])
self.assertEqual(234333, params[0]['Value'])
def test_extract_param_list_multiple(self):
p = {'MetricData.member.1.MetricName': 'foo',
'MetricData.member.1.Unit': 'Bytes',
'MetricData.member.1.Value': 234333,
'MetricData.member.2.MetricName': 'foo2',
'MetricData.member.2.Unit': 'Bytes',
'MetricData.member.2.Value': 12345}
params = api_utils.extract_param_list(p, prefix='MetricData')
self.assertEqual(2, len(params))
self.assertIn('MetricName', params[0])
self.assertIn('MetricName', params[1])
self.assertEqual('foo', params[0]['MetricName'])
self.assertEqual('Bytes', params[0]['Unit'])
self.assertEqual(234333, params[0]['Value'])
self.assertEqual('foo2', params[1]['MetricName'])
self.assertEqual('Bytes', params[1]['Unit'])
self.assertEqual(12345, params[1]['Value'])
def test_extract_param_list_multiple_missing(self):
# Handle case where there is an empty list item
p = {'MetricData.member.1.MetricName': 'foo',
'MetricData.member.1.Unit': 'Bytes',
'MetricData.member.1.Value': 234333,
'MetricData.member.3.MetricName': 'foo2',
'MetricData.member.3.Unit': 'Bytes',
'MetricData.member.3.Value': 12345}
params = api_utils.extract_param_list(p, prefix='MetricData')
self.assertEqual(2, len(params))
self.assertIn('MetricName', params[0])
self.assertIn('MetricName', params[1])
self.assertEqual('foo', params[0]['MetricName'])
self.assertEqual('Bytes', params[0]['Unit'])
self.assertEqual(234333, params[0]['Value'])
self.assertEqual('foo2', params[1]['MetricName'])
self.assertEqual('Bytes', params[1]['Unit'])
self.assertEqual(12345, params[1]['Value'])
def test_extract_param_list_badindex(self):
p = {'MetricData.member.xyz.MetricName': 'foo',
'MetricData.member.$!&^.Unit': 'Bytes',
'MetricData.member.+.Value': 234333,
'MetricData.member.--.MetricName': 'foo2',
'MetricData.member._3.Unit': 'Bytes',
'MetricData.member.-1000.Value': 12345}
params = api_utils.extract_param_list(p, prefix='MetricData')
self.assertEqual(0, len(params))
def test_reformat_dict_keys(self):
keymap = {"foo": "bar"}
data = {"foo": 123}
expected = {"bar": 123}
result = api_utils.reformat_dict_keys(keymap, data)
self.assertEqual(expected, result)
def test_reformat_dict_keys_missing(self):
keymap = {"foo": "bar", "foo2": "bar2"}
data = {"foo": 123}
expected = {"bar": 123}
result = api_utils.reformat_dict_keys(keymap, data)
self.assertEqual(expected, result)
def test_get_param_value(self):
params = {"foo": 123}
self.assertEqual(123, api_utils.get_param_value(params, "foo"))
def test_get_param_value_missing(self):
params = {"foo": 123}
self.assertRaises(
aws_exception.HeatMissingParameterError,
api_utils.get_param_value, params, "bar")
def test_map_remote_error(self):
ex = Exception()
expected = aws_exception.HeatInternalFailureError
self.assertIsInstance(aws_exception.map_remote_error(ex), expected)
def test_map_remote_error_inval_param_error(self):
ex = AttributeError()
expected = aws_exception.HeatInvalidParameterValueError
self.assertIsInstance(aws_exception.map_remote_error(ex), expected)
def test_map_remote_error_denied_error(self):
ex = common_exception.Forbidden()
expected = aws_exception.HeatAccessDeniedError
self.assertIsInstance(aws_exception.map_remote_error(ex), expected)
def test_map_remote_error_already_exists_error(self):
ex = common_exception.StackExists(stack_name="teststack")
expected = aws_exception.AlreadyExistsError
self.assertIsInstance(aws_exception.map_remote_error(ex), expected)
def test_map_remote_error_invalid_action_error(self):
ex = common_exception.ActionInProgress(stack_name="teststack",
action="testing")
expected = aws_exception.HeatActionInProgressError
self.assertIsInstance(aws_exception.map_remote_error(ex), expected)
|
apache-2.0
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-servermanager/azure/mgmt/servermanager/models/node_resource.py
|
4
|
2535
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class NodeResource(Resource):
"""A Node Resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Manager Resource ID.
:vartype id: str
:ivar type: Resource Manager Resource Type.
:vartype type: str
:ivar name: Resource Manager Resource Name.
:vartype name: str
:ivar location: Resource Manager Resource Location.
:vartype location: str
:param tags: Resource Manager Resource Tags.
:type tags: dict
:param etag:
:type etag: str
:param gateway_id: ID of the gateway.
:type gateway_id: str
:param connection_name: myhost.domain.com
:type connection_name: str
:param created: UTC date and time when node was first added to management
service.
:type created: datetime
:param updated: UTC date and time when node was last updated.
:type updated: datetime
"""
_validation = {
'id': {'readonly': True},
'type': {'readonly': True},
'name': {'readonly': True},
'location': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'gateway_id': {'key': 'properties.gatewayId', 'type': 'str'},
'connection_name': {'key': 'properties.connectionName', 'type': 'str'},
'created': {'key': 'properties.created', 'type': 'iso-8601'},
'updated': {'key': 'properties.updated', 'type': 'iso-8601'},
}
def __init__(self, tags=None, etag=None, gateway_id=None, connection_name=None, created=None, updated=None):
super(NodeResource, self).__init__(tags=tags, etag=etag)
self.gateway_id = gateway_id
self.connection_name = connection_name
self.created = created
self.updated = updated
|
mit
|
utecuy/edx-platform
|
cms/djangoapps/contentstore/management/commands/import.py
|
64
|
2065
|
"""
Script for importing courseware from XML format
"""
from django.core.management.base import BaseCommand, CommandError, make_option
from django_comment_common.utils import (seed_permissions_roles,
are_permissions_roles_seeded)
from xmodule.modulestore.xml_importer import import_course_from_xml
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.contentstore.django import contentstore
class Command(BaseCommand):
"""
Import the specified data directory into the default ModuleStore
"""
help = 'Import the specified data directory into the default ModuleStore'
option_list = BaseCommand.option_list + (
make_option('--nostatic',
action='store_true',
help='Skip import of static content'),
)
def handle(self, *args, **options):
"Execute the command"
if len(args) == 0:
raise CommandError("import requires at least one argument: <data directory> [--nostatic] [<course dir>...]")
data_dir = args[0]
do_import_static = not options.get('nostatic', False)
if len(args) > 1:
source_dirs = args[1:]
else:
source_dirs = None
self.stdout.write("Importing. Data_dir={data}, source_dirs={courses}\n".format(
data=data_dir,
courses=source_dirs,
))
mstore = modulestore()
course_items = import_course_from_xml(
mstore, ModuleStoreEnum.UserID.mgmt_command, data_dir, source_dirs, load_error_modules=False,
static_content_store=contentstore(), verbose=True,
do_import_static=do_import_static,
create_if_not_present=True,
)
for course in course_items:
course_id = course.id
if not are_permissions_roles_seeded(course_id):
self.stdout.write('Seeding forum roles for course {0}\n'.format(course_id))
seed_permissions_roles(course_id)
|
agpl-3.0
|
alexholcombe/twoWords
|
oldData/Alex_16Mar2015_12-04.py
|
1
|
48426
|
#Alex Holcombe alex.holcombe@sydney.edu.au
#See the github repository for more information: https://github.com/alexholcombe/twoWords
from __future__ import print_function
from psychopy import monitors, visual, event, data, logging, core, sound, gui
import psychopy.info
import numpy as np
from math import atan, log, ceil
import copy
import time, sys, os, pylab
try:
from noiseStaircaseHelpers import printStaircase, toStaircase, outOfStaircase, createNoise, plotDataAndPsychometricCurve
except ImportError:
print('Could not import from noiseStaircaseHelpers.py (you need that file to be in the same directory)')
try:
import stringResponse
except ImportError:
print('Could not import stringResponse.py (you need that file to be in the same directory)')
wordEccentricity=3
tasks=['T1']; task = tasks[0]
#THINGS THAT COULD PREVENT SUCCESS ON A STRANGE MACHINE
#same screen or external screen? Set scrn=0 if one screen. scrn=1 means display stimulus on second screen.
#widthPix, heightPix
quitFinder = False #if checkRefreshEtc, quitFinder becomes True
autopilot=False
demo=False #False
exportImages= False #quits after one trial
subject='Hubert' #user is prompted to enter true subject name
if autopilot: subject='auto'
if os.path.isdir('.'+os.sep+'data'):
dataDir='data'
else:
print('"data" directory does not exist, so saving data in present working directory')
dataDir='.'
timeAndDateStr = time.strftime("%d%b%Y_%H-%M", time.localtime())
showRefreshMisses=True #flicker fixation at refresh rate, to visualize if frames missed
feedback=True
autoLogging=False
refreshRate = 60.; #100
if demo:
refreshRate = 60.; #100
staircaseTrials = 25
prefaceStaircaseTrialsN = 20 #22
prefaceStaircaseNoise = np.array([5,20,20,20, 50,50,50,5,80,80,80,5,95,95,95]) #will be recycled / not all used, as needed
descendingPsycho = True #psychometric function- more noise means worse performance
threshCriterion = 0.58
numWordsInStream = 24
wordsUnparsed="the, and, for, you, say, but, his, not, she, can, who, get, her, all, one, out, see, him, now, how, its, our, two, way" #24 most common words
lettersUnparsed = "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".upper()
wordList = wordsUnparsed.split(",") #split into list
for i in range(len(wordList)):
wordList[i] = wordList[i].replace(" ", "") #delete spaces
bgColor = [-.7,-.7,-.7] # [-1,-1,-1]
cueColor = [1.,1.,1.]
letterColor = [1.,1.,1.]
cueRadius = 7 #6 deg in Goodbourn & Holcombe
widthPix= 1280 #monitor width in pixels of Agosta
heightPix= 800 #800 #monitor height in pixels
monitorwidth = 38.7 #monitor width in cm
scrn=0 #0 to use main screen, 1 to use external screen connected to computer
fullscr=True #True to use fullscreen, False to not. Timing probably won't be quite right if fullscreen = False
allowGUI = False
if demo: monitorwidth = 23#18.0
if exportImages:
widthPix = 600; heightPix = 600
monitorwidth = 13.0
fullscr=False; scrn=0
framesSaved=0
if demo:
scrn=0; fullscr=False
widthPix = 800; heightPix = 600
monitorname='testMonitor'
allowGUI = True
viewdist = 57. #cm
pixelperdegree = widthPix/ (atan(monitorwidth/viewdist) /np.pi*180)
print('pixelperdegree=',pixelperdegree)
# create a dialog from dictionary
infoFirst = { 'Do staircase (only)': False, 'Check refresh etc':True, 'Fullscreen (timing errors if not)': False, 'Screen refresh rate':refreshRate }
OK = gui.DlgFromDict(dictionary=infoFirst,
title='Dual-RSVP experiment OR staircase to find thresh noise level for performance criterion',
order=['Do staircase (only)', 'Check refresh etc', 'Fullscreen (timing errors if not)'],
tip={'Check refresh etc': 'To confirm refresh rate and that can keep up, at least when drawing a grating'},
#fixed=['Check refresh etc'])#this attribute can't be changed by the user
)
if not OK.OK:
print('User cancelled from dialog box'); core.quit()
doStaircase = infoFirst['Do staircase (only)']
checkRefreshEtc = infoFirst['Check refresh etc']
fullscr = infoFirst['Fullscreen (timing errors if not)']
refreshRate = infoFirst['Screen refresh rate']
if checkRefreshEtc:
quitFinder = True
if quitFinder:
import os
applescript="\'tell application \"Finder\" to quit\'"
shellCmd = 'osascript -e '+applescript
os.system(shellCmd)
#letter size 2.5 deg
SOAms = 133 #Battelli, Agosta, Goodbourn, Holcombe mostly using 133
#Minimum SOAms should be 84 because any shorter, I can't always notice the second ring when lag1. 71 in Martini E2 and E1b (actually he used 66.6 but that's because he had a crazy refresh rate of 90 Hz)
letterDurMs = 80 #23.6 in Martini E2 and E1b (actually he used 22.2 but that's because he had a crazy refresh rate of 90 Hz)
ISIms = SOAms - letterDurMs
letterDurFrames = int( np.floor(letterDurMs / (1000./refreshRate)) )
cueDurFrames = letterDurFrames
ISIframes = int( np.floor(ISIms / (1000./refreshRate)) )
#have set ISIframes and letterDurFrames to integer that corresponds as close as possible to originally intended ms
rateInfo = 'total SOA=' + str(round( (ISIframes + letterDurFrames)*1000./refreshRate, 2)) + ' or ' + str(ISIframes + letterDurFrames) + ' frames, comprising\n'
rateInfo+= 'ISIframes ='+str(ISIframes)+' or '+str(ISIframes*(1000./refreshRate))+' ms and letterDurFrames ='+str(letterDurFrames)+' or '+str(round( letterDurFrames*(1000./refreshRate), 2))+'ms'
logging.info(rateInfo); print(rateInfo)
trialDurFrames = int( numWordsInStream*(ISIframes+letterDurFrames) ) #trial duration in frames
monitorname = 'testmonitor'
waitBlank = False
mon = monitors.Monitor(monitorname,width=monitorwidth, distance=viewdist)#relying on monitorwidth cm (39 for Mitsubishi to do deg calculations) and gamma info in calibratn
mon.setSizePix( (widthPix,heightPix) )
units='deg' #'cm'
def openMyStimWindow(): #make it a function because have to do it several times, want to be sure is identical each time
myWin = visual.Window(monitor=mon,size=(widthPix,heightPix),allowGUI=allowGUI,units=units,color=bgColor,colorSpace='rgb',fullscr=fullscr,screen=scrn,waitBlanking=waitBlank) #Holcombe lab monitor
return myWin
myWin = openMyStimWindow()
refreshMsg2 = ''
if not checkRefreshEtc:
refreshMsg1 = 'REFRESH RATE WAS NOT CHECKED'
refreshRateWrong = False
else: #checkRefreshEtc
runInfo = psychopy.info.RunTimeInfo(
# if you specify author and version here, it overrides the automatic detection of __author__ and __version__ in your script
#author='<your name goes here, plus whatever you like, e.g., your lab or contact info>',
#version="<your experiment version info>",
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=True, ## True means report on everything
userProcsDetailed=True ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
)
#print(runInfo)
logging.info(runInfo)
print('Finished runInfo- which assesses the refresh and processes of this computer')
#check screen refresh is what assuming it is ##############################################
Hzs=list()
myWin.flip(); myWin.flip();myWin.flip();myWin.flip();
myWin.setRecordFrameIntervals(True) #otherwise myWin.fps won't work
print('About to measure frame flips')
for i in range(50):
myWin.flip()
Hzs.append( myWin.fps() ) #varies wildly on successive runs!
myWin.setRecordFrameIntervals(False)
# end testing of screen refresh########################################################
Hzs = np.array( Hzs ); Hz= np.median(Hzs)
msPerFrame= 1000./Hz
refreshMsg1= 'Frames per second ~='+ str( np.round(Hz,1) )
refreshRateTolerancePct = 3
pctOff = abs( (np.median(Hzs)-refreshRate) / refreshRate)
refreshRateWrong = pctOff > (refreshRateTolerancePct/100.)
if refreshRateWrong:
refreshMsg1 += ' BUT'
refreshMsg1 += ' program assumes ' + str(refreshRate)
refreshMsg2 = 'which is off by more than' + str(round(refreshRateTolerancePct,0)) + '%!!'
else:
refreshMsg1 += ', which is close enough to desired val of ' + str( round(refreshRate,1) )
myWinRes = myWin.size
myWin.allowGUI =True
myWin.close() #have to close window to show dialog box
defaultNoiseLevel = 0.0 #to use if no staircase, can be set by user
trialsPerCondition = 1 #default value
dlgLabelsOrdered = list()
if doStaircase:
myDlg = gui.Dlg(title="Staircase to find appropriate noisePercent", pos=(200,400))
else:
myDlg = gui.Dlg(title="RSVP experiment", pos=(200,400))
if not autopilot:
myDlg.addField('Subject name (default="Hubert"):', 'Hubert', tip='or subject code')
dlgLabelsOrdered.append('subject')
if doStaircase:
easyTrialsCondText = 'Num preassigned noise trials to preface staircase with (default=' + str(prefaceStaircaseTrialsN) + '):'
myDlg.addField(easyTrialsCondText, tip=str(prefaceStaircaseTrialsN))
dlgLabelsOrdered.append('easyTrials')
myDlg.addField('Staircase trials (default=' + str(staircaseTrials) + '):', tip="Staircase will run until this number is reached or it thinks it has precise estimate of threshold")
dlgLabelsOrdered.append('staircaseTrials')
pctCompletedBreak = 101
else:
myDlg.addField('\tPercent noise dots=', defaultNoiseLevel, tip=str(defaultNoiseLevel))
dlgLabelsOrdered.append('defaultNoiseLevel')
myDlg.addField('Trials per condition (default=' + str(trialsPerCondition) + '):', trialsPerCondition, tip=str(trialsPerCondition))
dlgLabelsOrdered.append('trialsPerCondition')
pctCompletedBreak = 50
myDlg.addText(refreshMsg1, color='Black')
if refreshRateWrong:
myDlg.addText(refreshMsg2, color='Red')
if refreshRateWrong:
logging.error(refreshMsg1+refreshMsg2)
else: logging.info(refreshMsg1+refreshMsg2)
if checkRefreshEtc and (not demo) and (myWinRes != [widthPix,heightPix]).any():
msgWrongResolution = 'Screen apparently NOT the desired resolution of '+ str(widthPix)+'x'+str(heightPix)+ ' pixels!!'
myDlg.addText(msgWrongResolution, color='Red')
logging.error(msgWrongResolution)
print(msgWrongResolution)
myDlg.addText('Note: to abort press ESC at a trials response screen', color=[-1.,1.,-1.]) # color='DimGrey') color names stopped working along the way, for unknown reason
myDlg.show()
if myDlg.OK: #unpack information from dialogue box
thisInfo = myDlg.data #this will be a list of data returned from each field added in order
if not autopilot:
name=thisInfo[dlgLabelsOrdered.index('subject')]
if len(name) > 0: #if entered something
subject = name #change subject default name to what user entered
if doStaircase:
if len(thisInfo[dlgLabelsOrdered.index('staircaseTrials')]) >0:
staircaseTrials = int( thisInfo[ dlgLabelsOrdered.index('staircaseTrials') ] ) #convert string to integer
print('staircaseTrials entered by user=',staircaseTrials)
logging.info('staircaseTrials entered by user=',staircaseTrials)
if len(thisInfo[dlgLabelsOrdered.index('easyTrials')]) >0:
prefaceStaircaseTrialsN = int( thisInfo[ dlgLabelsOrdered.index('easyTrials') ] ) #convert string to integer
print('prefaceStaircaseTrialsN entered by user=',thisInfo[dlgLabelsOrdered.index('easyTrials')])
logging.info('prefaceStaircaseTrialsN entered by user=',prefaceStaircaseTrialsN)
else: #not doing staircase
trialsPerCondition = int( thisInfo[ dlgLabelsOrdered.index('trialsPerCondition') ] ) #convert string to integer
print('trialsPerCondition=',trialsPerCondition)
logging.info('trialsPerCondition =',trialsPerCondition)
defaultNoiseLevel = int (thisInfo[ dlgLabelsOrdered.index('defaultNoiseLevel') ])
else:
print('User cancelled from dialog box.')
logging.flush()
core.quit()
if not demo:
allowGUI = False
myWin = openMyStimWindow()
#set up output data file, log file, copy of program code, and logging
infix = ''
if doStaircase:
infix = 'staircase_'
fileName = os.path.join(dataDir, subject + '_' + infix+ timeAndDateStr)
if not demo and not exportImages:
dataFile = open(fileName+'.txt', 'w')
saveCodeCmd = 'cp \'' + sys.argv[0] + '\' '+ fileName + '.py'
os.system(saveCodeCmd) #save a copy of the code as it was when that subject was run
logFname = fileName+'.log'
ppLogF = logging.LogFile(logFname,
filemode='w',#if you set this to 'a' it will append instead of overwriting
level=logging.INFO)#errors, data and warnings will be sent to this logfile
if demo or exportImages:
dataFile = sys.stdout; logF = sys.stdout
logging.console.setLevel(logging.ERROR) #only show this level messages and higher
logging.console.setLevel(logging.ERROR) #DEBUG means set console to receive nearly all messges, INFO next level, EXP, DATA, WARNING and ERROR
if fullscr and not demo and not exportImages:
runInfo = psychopy.info.RunTimeInfo(
# if you specify author and version here, it overrides the automatic detection of __author__ and __version__ in your script
#author='<your name goes here, plus whatever you like, e.g., your lab or contact info>',
#version="<your experiment version info>",
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=False, ## True means report on everything
userProcsDetailed=True, ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
#randomSeed='set:42', ## a way to record, and optionally set, a random seed of type str for making reproducible random sequences
## None -> default
## 'time' will use experimentRuntime.epoch as the value for the seed, different value each time the script is run
##'set:time' --> seed value is set to experimentRuntime.epoch, and initialized: random.seed(info['randomSeed'])
##'set:42' --> set & initialize to str('42'), and will give the same sequence of random.random() for all runs of the script
)
logging.info(runInfo)
logging.flush()
textStimuliStream1 = list()
textStimuliStream2 = list() #used for second, simultaneous RSVP stream
def calcAndPredrawStimuli(wordList):
if len(wordList) < numWordsInStream:
print('Error! Your word list must have at least ',numWordsInStream,'strings')
idxsIntoWordList = np.arange( len(wordList) ) #create a list of indexes of the entire word list
print('wordList=',wordList)
for i in range(0,numWordsInStream): #draw the words that will be used on this trial, the first 26 of the shuffled list
word = wordList[ i ] # #[ idxsIntoWordList[i] ]
textStimulusStream1 = visual.TextStim(myWin,text=word,height=ltrHeight,colorSpace='rgb',color=letterColor,alignHoriz='center',alignVert='center',units='deg',autoLog=autoLogging)
textStimulusStream2 = visual.TextStim(myWin,text=word,height=ltrHeight,colorSpace='rgb',color=letterColor,alignHoriz='center',alignVert='center',units='deg',autoLog=autoLogging)
textStimulusStream1.setPos([-wordEccentricity,0]) #left
textStimuliStream1.append(textStimulusStream1) #add to list of text stimuli that comprise stream 1
textStimulusStream2.setPos([wordEccentricity,0]) #right
textStimuliStream2.append(textStimulusStream2) #add to list of text stimuli
idxsStream1 = idxsIntoWordList #first RSVP stream
np.random.shuffle(idxsIntoWordList)
idxsStream2 = copy.deepcopy(idxsIntoWordList)
np.random.shuffle(idxsStream2)
return idxsStream1, idxsStream2
#create click sound for keyboard
try:
click=sound.Sound('406__tictacshutup__click-1-d.wav')
except: #in case file missing, create inferiro click manually
logging.warn('Could not load the desired click sound file, instead using manually created inferior click')
click=sound.Sound('D',octave=4, sampleRate=22050, secs=0.015, bits=8)
if showRefreshMisses:
fixSizePix = 32 #2.6 #make fixation bigger so flicker more conspicuous
else: fixSizePix = 32
fixColor = [1,1,1]
if exportImages: fixColor= [0,0,0]
fixatnNoiseTexture = np.round( np.random.rand(fixSizePix/4,fixSizePix/4) ,0 ) *2.0-1 #Can counterphase flicker noise texture to create salient flicker if you break fixation
fixation= visual.PatchStim(myWin, tex=fixatnNoiseTexture, size=(fixSizePix,fixSizePix), units='pix', mask='circle', interpolate=False, autoLog=False)
fixationBlank= visual.PatchStim(myWin, tex= -1*fixatnNoiseTexture, size=(fixSizePix,fixSizePix), units='pix', mask='circle', interpolate=False, autoLog=False) #reverse contrast
fixationPoint= visual.PatchStim(myWin,tex='none',colorSpace='rgb',color=(1,1,1),size=4,units='pix',autoLog=autoLogging)
respPromptStim = visual.TextStim(myWin,pos=(0, -.9),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
acceptTextStim = visual.TextStim(myWin,pos=(0, -.8),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
acceptTextStim.setText('Hit ENTER to accept. Backspace to edit')
respStim = visual.TextStim(myWin,pos=(0,0),colorSpace='rgb',color=(1,1,0),alignHoriz='center', alignVert='center',height=3,units='deg',autoLog=autoLogging)
clickSound, badKeySound = stringResponse.setupSoundsForResponse()
requireAcceptance = False
nextText = visual.TextStim(myWin,pos=(0, .1),colorSpace='rgb',color = (1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
NextRemindCountText = visual.TextStim(myWin,pos=(0,.2),colorSpace='rgb',color= (1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
screenshot= False; screenshotDone = False
stimList = []
#SETTING THE CONDITIONS
cuePositions = np.array([6,7,8,9,10]) # [4,10,16,22] used in Martini E2, group 2
for cuePos in cuePositions:
for rightResponseFirst in [False,True]:
for bothWordsFlipped in [False,True]:
stimList.append( {'cuePos':cuePos, 'rightResponseFirst':rightResponseFirst,
'leftStreamFlip':bothWordsFlipped, 'rightStreamFlip':bothWordsFlipped} )
trials = data.TrialHandler(stimList,trialsPerCondition) #constant stimuli method
trialsForPossibleStaircase = data.TrialHandler(stimList,trialsPerCondition) #independent randomization, just to create random trials for staircase phase
numRightWrongEachCuepos = np.zeros([ len(cuePositions), 1 ]); #summary results to print out at end
logging.info( 'numtrials=' + str(trials.nTotal) + ' and each trialDurFrames='+str(trialDurFrames)+' or '+str(trialDurFrames*(1000./refreshRate))+ \
' ms' + ' task=' + task)
def numberToLetter(number): #0 = A, 25 = Z
#if it's not really a letter, return @
if number < 0 or number > 25:
return ('@')
else: #it's probably a letter
try:
return chr( ord('A')+number )
except:
return('@')
def letterToNumber(letter): #A = 0, Z = 25
#if it's not really a letter, return -999
#HOW CAN I GENERICALLY TEST FOR LENGTH. EVEN IN CASE OF A NUMBER THAT' SNOT PART OF AN ARRAY?
try:
#if len(letter) > 1:
# return (-999)
if letter < 'A' or letter > 'Z':
return (-999)
else: #it's a letter
return ord(letter)-ord('A')
except:
return (-999)
def wordToIdx(word,wordList):
#if it's not in the list of stimuli, return -999
try:
#http://stackoverflow.com/questions/7102050/how-can-i-get-a-python-generator-to-return-none-rather-than-stopiteration
firstMatchIdx = next((i for i, val in enumerate(wordList) if val.upper()==word), None) #return i (index) unless no matches, in which case return None
#print('Looked for ',word,' in ',wordList,'\nfirstMatchIdx =',firstMatchIdx)
return firstMatchIdx
except:
print('Unexpected error in wordToIdx with word=',word)
return (None)
#print header for data file
print('experimentPhase\ttrialnum\tsubject\ttask\t',file=dataFile,end='')
print('noisePercent\tleftStreamFlip\trightStreamFlip\t',end='',file=dataFile)
if task=='T1':
numRespsWanted = 2
dataFile.write('rightResponseFirst\t')
for i in range(numRespsWanted):
dataFile.write('cuePos'+str(i)+'\t') #have to use write to avoid ' ' between successive text, at least until Python 3
dataFile.write('answer'+str(i)+'\t')
dataFile.write('response'+str(i)+'\t')
dataFile.write('correct'+str(i)+'\t')
dataFile.write('responsePosRelative'+str(i)+'\t')
print('timingBlips',file=dataFile)
#end of header
def oneFrameOfStim( n,cue,seq1,seq2,cueDurFrames,letterDurFrames,ISIframes,thisTrial,textStimuliStream1,textStimuliStream2,
noise,proportnNoise,allFieldCoords,numNoiseDots ):
#defining a function to draw each frame of stim.
#seq1 is an array of indices corresponding to the appropriate pre-drawn stimulus, contained in textStimuli
SOAframes = letterDurFrames+ISIframes
cueFrames = thisTrial['cuePos']*SOAframes #cuesPos is global variable
stimN = int( np.floor(n/SOAframes) )
frameOfThisLetter = n % SOAframes #every SOAframes, new letter
showLetter = frameOfThisLetter < letterDurFrames #if true, it's not time for the blank ISI. it's still time to draw the letter
#print 'n=',n,' SOAframes=',SOAframes, ' letterDurFrames=', letterDurFrames, ' (n % SOAframes) =', (n % SOAframes) #DEBUGOFF
thisStimIdx = seq1[stimN] #which letter, from A to Z (1 to 26), should be shown?
if seq2 is not None:
thisStim2Idx = seq2[stimN]
#so that any timing problems occur just as often for every frame, always draw the letter and the cue, but simply draw it in the bgColor when it's not meant to be on
cue.setLineColor( bgColor )
if type(cueFrames) not in [tuple,list,np.ndarray]: #scalar. But need collection to do loop based on it
cueFrames = list([cueFrames])
for cueFrame in cueFrames: #cheTck whether it's time for any cue
if n>=cueFrame and n<cueFrame+cueDurFrames:
cue.setLineColor( cueColor )
if showLetter:
textStimuliStream1[thisStimIdx].setColor( letterColor )
textStimuliStream2[thisStim2Idx].setColor( letterColor )
else:
textStimuliStream1[thisStimIdx].setColor( bgColor )
textStimuliStream2[thisStim2Idx].setColor( bgColor )
textStimuliStream1[thisStimIdx].flipHoriz = thisTrial['leftStreamFlip']
textStimuliStream2[thisStim2Idx].flipHoriz = thisTrial['rightStreamFlip']
textStimuliStream1[thisStimIdx].draw()
textStimuliStream2[thisStim2Idx].draw()
cue.draw()
refreshNoise = False #Not recommended because takes longer than a frame, even to shuffle apparently. Or may be setXYs step
if proportnNoise>0 and refreshNoise:
if frameOfThisLetter ==0:
np.random.shuffle(allFieldCoords)
dotCoords = allFieldCoords[0:numNoiseDots]
noise.setXYs(dotCoords)
if proportnNoise>0:
noise.draw()
return True
# #######End of function definition that displays the stimuli!!!! #####################################
#############################################################################################################################
cue = visual.Circle(myWin,
radius=cueRadius,#Martini used circles with diameter of 12 deg
lineColorSpace = 'rgb',
lineColor=bgColor,
lineWidth=4.0, #in pixels. Was thinner (2 pixels) in letter AB experiments
units = 'deg',
fillColorSpace = 'rgb',
fillColor=None, #beware, with convex shapes fill colors don't work
pos= [0,0], #the anchor (rotation and vertices are position with respect to this)
interpolate=True,
autoLog=False)#this stim changes too much for autologging to be useful
ltrHeight = 2.5 #Martini letters were 2.5deg high
#All noise dot coordinates ultimately in pixels, so can specify each dot is one pixel
noiseFieldWidthDeg=ltrHeight *1.0
noiseFieldWidthPix = int( round( noiseFieldWidthDeg*pixelperdegree ) )
def timingCheckAndLog(ts,trialN):
#check for timing problems and log them
#ts is a list of the times of the clock after each frame
interframeIntervs = np.diff(ts)*1000
#print ' interframe intervs were ',around(interframeIntervs,1) #DEBUGOFF
frameTimeTolerance=.3 #proportion longer than refreshRate that will not count as a miss
longFrameLimit = np.round(1000/refreshRate*(1.0+frameTimeTolerance),2)
idxsInterframeLong = np.where( interframeIntervs > longFrameLimit ) [0] #frames that exceeded 150% of expected duration
numCasesInterframeLong = len( idxsInterframeLong )
if numCasesInterframeLong >0 and (not demo):
longFramesStr = 'ERROR,'+str(numCasesInterframeLong)+' frames were longer than '+str(longFrameLimit)+' ms'
if demo:
longFramesStr += 'not printing them all because in demo mode'
else:
longFramesStr += ' apparently screen refreshes skipped, interframe durs were:'+\
str( np.around( interframeIntervs[idxsInterframeLong] ,1 ) )+ ' and was these frames: '+ str(idxsInterframeLong)
if longFramesStr != None:
logging.error( 'trialnum='+str(trialN)+' '+longFramesStr )
if not demo:
flankingAlso=list()
for idx in idxsInterframeLong: #also print timing of one before and one after long frame
if idx-1>=0:
flankingAlso.append(idx-1)
else: flankingAlso.append(np.NaN)
flankingAlso.append(idx)
if idx+1<len(interframeIntervs): flankingAlso.append(idx+1)
else: flankingAlso.append(np.NaN)
flankingAlso = np.array(flankingAlso)
flankingAlso = flankingAlso[np.negative(np.isnan(flankingAlso))] #remove nan values
flankingAlso = flankingAlso.astype(np.integer) #cast as integers, so can use as subscripts
logging.info( 'flankers also='+str( np.around( interframeIntervs[flankingAlso], 1) ) ) #because this is not an essential error message, as previous one already indicates error
#As INFO, at least it won't fill up the console when console set to WARNING or higher
return numCasesInterframeLong
#end timing check
trialClock = core.Clock()
numTrialsCorrect = 0;
numTrialsApproxCorrect = 0;
numTrialsEachCorrect= np.zeros( numRespsWanted )
numTrialsEachApproxCorrect= np.zeros( numRespsWanted )
def do_RSVP_stim(thisTrial, seq1, seq2, proportnNoise,trialN):
#relies on global variables:
# textStimuli, logging, bgColor
# thisTrial should have 'cuePos'
global framesSaved #because change this variable. Can only change a global variable if you declare it
cuesPos = [] #will contain the positions in the stream of all the cues (targets)
cuesPos.append(thisTrial['cuePos'])
cuesPos = np.array(cuesPos)
noise = None; allFieldCoords=None; numNoiseDots=0
if proportnNoise > 0: #gtenerating noise is time-consuming, so only do it once per trial. Then shuffle noise coordinates for each letter
(noise,allFieldCoords,numNoiseDots) = createNoise(proportnNoise,myWin,noiseFieldWidthPix, bgColor)
preDrawStimToGreasePipeline = list() #I don't know why this works, but without drawing it I have consistent timing blip first time that draw ringInnerR for phantom contours
cue.setLineColor(bgColor)
preDrawStimToGreasePipeline.extend([cue])
for stim in preDrawStimToGreasePipeline:
stim.draw()
myWin.flip(); myWin.flip()
#end preparation of stimuli
core.wait(.1);
trialClock.reset()
fixatnPeriodMin = 0.3
fixatnPeriodFrames = int( (np.random.rand(1)/2.+fixatnPeriodMin) *refreshRate) #random interval between 800ms and 1.3s
ts = list(); #to store time of each drawing, to check whether skipped frames
for i in range(fixatnPeriodFrames+20): #prestim fixation interval
#if i%4>=2 or demo or exportImages: #flicker fixation on and off at framerate to see when skip frame
# fixation.draw()
#else: fixationBlank.draw()
fixationPoint.draw()
myWin.flip() #end fixation interval
#myWin.setRecordFrameIntervals(True); #can't get it to stop detecting superlong frames
t0 = trialClock.getTime()
for n in range(trialDurFrames): #this is the loop for this trial's stimulus!
worked = oneFrameOfStim( n,cue,seq1,seq2,cueDurFrames,letterDurFrames,ISIframes,thisTrial,textStimuliStream1,textStimuliStream2,
noise,proportnNoise,allFieldCoords,numNoiseDots ) #draw letter and possibly cue and noise on top
fixationPoint.draw()
if exportImages:
myWin.getMovieFrame(buffer='back') #for later saving
framesSaved +=1
myWin.flip()
t=trialClock.getTime()-t0; ts.append(t);
#end of big stimulus loop
myWin.setRecordFrameIntervals(False);
if task=='T1':
respPromptStim.setText('What was circled?',log=False)
else: respPromptStim.setText('Error: unexpected task',log=False)
postCueNumBlobsAway=-999 #doesn't apply to non-tracking and click tracking task
correctAnswerIdxsStream1 = np.array( seq1[cuesPos] )
correctAnswerIdxsStream2 = np.array( seq2[cuesPos] )
#print('correctAnswerIdxsStream1=',correctAnswerIdxsStream1, 'wordList[correctAnswerIdxsStream1[0]]=',wordList[correctAnswerIdxsStream1[0]])
return cuesPos,correctAnswerIdxsStream1,correctAnswerIdxsStream2,ts
def handleAndScoreResponse(passThisTrial,response,responseAutopilot,task,stimSequence,cuePos,correctAnswerIdx):
#Handle response, calculate whether correct, ########################################
#responses are actual characters
#correctAnswer is index into stimSequence
#autopilot is global variable
if autopilot or passThisTrial:
response = responseAutopilot
#print('handleAndScoreResponse correctAnswerIdxs=',correctAnswerIdxs,'\nstimSequence=',stimSequence, '\nwords=',wordList)
correct = 0
approxCorrect = 0
posOfResponse = -999
responsePosRelative = -999
idx = correctAnswerIdx
correctAnswer = wordList[idx].upper()
responseString= ''.join(['%s' % char for char in response])
responseString= responseString.upper()
#print('correctAnswer=',correctAnswer ,' responseString=',responseString)
if correctAnswer == responseString:
correct = 1
#print('correct=',correct)
responseWordIdx = wordToIdx(responseString,wordList)
if responseWordIdx is None: #response is not in the wordList
posOfResponse = -999
logging.warn('Response was not present in the stimulus stream')
else:
posOfResponse= np.where( responseWordIdx==stimSequence )
posOfResponse= posOfResponse[0] #list with two entries, want first which will be array of places where the response was found in the sequence
if len(posOfResponse) > 1:
logging.error('Expected response to have occurred in only one position in stream')
posOfResponse = posOfResponse[0] #first element of list (should be only one element long
responsePosRelative = posOfResponse - cuePos
approxCorrect = abs(responsePosRelative)<= 3 #Vul efficacy measure of getting it right to within plus/minus
#print('wordToIdx(',responseString,',',wordList,')=',responseWordIdx,' stimSequence=',stimSequence,'\nposOfResponse = ',posOfResponse) #debugON
#print response stuff to dataFile
#header was answerPos0, answer0, response0, correct0, responsePosRelative0
print(cuePos,'\t', end='', file=dataFile)
print(correctAnswer, '\t', end='', file=dataFile) #answer0
print(responseString, '\t', end='', file=dataFile) #response0
print(correct, '\t', end='',file=dataFile) #correct0
print(responsePosRelative, '\t', end='',file=dataFile) #responsePosRelative0
return correct,approxCorrect,responsePosRelative
#end handleAndScoreResponses
def play_high_tone_correct_low_incorrect(correct, passThisTrial=False):
highA = sound.Sound('G',octave=5, sampleRate=6000, secs=.3, bits=8)
low = sound.Sound('F',octave=3, sampleRate=6000, secs=.3, bits=8)
highA.setVolume(0.9)
low.setVolume(1.0)
if correct:
highA.play()
elif passThisTrial:
high= sound.Sound('G',octave=4, sampleRate=2000, secs=.08, bits=8)
for i in range(2):
high.play(); low.play();
else: #incorrect
low.play()
expStop=False
nDoneMain = -1 #change to zero once start main part of experiment
if doStaircase:
#create the staircase handler
useQuest = True
if useQuest:
staircase = data.QuestHandler(startVal = 95,
startValSd = 80,
stopInterval= 1, #sd of posterior has to be this small or smaller for staircase to stop, unless nTrials reached
nTrials = staircaseTrials,
#extraInfo = thisInfo,
pThreshold = threshCriterion, #0.25,
gamma = 1./26,
delta=0.02, #lapse rate, I suppose for Weibull function fit
method = 'quantile', #uses the median of the posterior as the final answer
stepType = 'log', #will home in on the 80% threshold. But stepType = 'log' doesn't usually work
minVal=1, maxVal = 100
)
print('created QUEST staircase')
else:
stepSizesLinear = [.2,.2,.1,.1,.05,.05]
stepSizesLog = [log(1.4,10),log(1.4,10),log(1.3,10),log(1.3,10),log(1.2,10)]
staircase = data.StairHandler(startVal = 0.1,
stepType = 'log', #if log, what do I want to multiply it by
stepSizes = stepSizesLog, #step size to use after each reversal
minVal=0, maxVal=1,
nUp=1, nDown=3, #will home in on the 80% threshold
nReversals = 2, #The staircase terminates when nTrials have been exceeded, or when both nReversals and nTrials have been exceeded
nTrials=1)
print('created conventional staircase')
if prefaceStaircaseTrialsN > len(prefaceStaircaseNoise): #repeat array to accommodate desired number of easyStarterTrials
prefaceStaircaseNoise = np.tile( prefaceStaircaseNoise, ceil( prefaceStaircaseTrialsN/len(prefaceStaircaseNoise) ) )
prefaceStaircaseNoise = prefaceStaircaseNoise[0:prefaceStaircaseTrialsN]
phasesMsg = ('Doing '+str(prefaceStaircaseTrialsN)+'trials with noisePercent= '+str(prefaceStaircaseNoise)+' then doing a max '+str(staircaseTrials)+'-trial staircase')
print(phasesMsg); logging.info(phasesMsg)
#staircaseStarterNoise PHASE OF EXPERIMENT
corrEachTrial = list() #only needed for easyStaircaseStarterNoise
staircaseTrialN = -1; mainStaircaseGoing = False
while (not staircase.finished) and expStop==False: #staircase.thisTrialN < staircase.nTrials
if staircaseTrialN+1 < len(prefaceStaircaseNoise): #still doing easyStaircaseStarterNoise
staircaseTrialN += 1
noisePercent = prefaceStaircaseNoise[staircaseTrialN]
else:
if staircaseTrialN+1 == len(prefaceStaircaseNoise): #add these non-staircase trials so QUEST knows about them
mainStaircaseGoing = True
print('Importing ',corrEachTrial,' and intensities ',prefaceStaircaseNoise)
staircase.importData(100-prefaceStaircaseNoise, np.array(corrEachTrial))
printStaircase(staircase, descendingPsycho, briefTrialUpdate=False, printInternalVal=True, alsoLog=False)
try: #advance the staircase
printStaircase(staircase, descendingPsycho, briefTrialUpdate=True, printInternalVal=True, alsoLog=False)
noisePercent = 100. - staircase.next() #will step through the staircase, based on whether told it (addResponse) got it right or wrong
staircaseTrialN += 1
except StopIteration: #Need this here, even though test for finished above. I can't understand why finished test doesn't accomplish this.
print('stopping because staircase.next() returned a StopIteration, which it does when it is finished')
break #break out of the trials loop
#print('staircaseTrialN=',staircaseTrialN)
idxsStream1, idxsStream2 = calcAndPredrawStimuli(wordList)
cuesPos,correctAnswerIdxsStream1,correctAnswerIdxsStream2, ts = \
do_RSVP_stim(cuePos, idxsStream1, idxsStream2, noisePercent/100.,staircaseTrialN)
numCasesInterframeLong = timingCheckAndLog(ts,staircaseTrialN)
expStop,passThisTrial,responses,responsesAutopilot = \
stringResponse.collectStringResponse(numRespsWanted,respPromptStim,respStim,acceptTextStim,myWin,clickSound,badKeySound,
requireAcceptance,autopilot,responseDebug=True)
if not expStop:
if mainStaircaseGoing:
print('staircase\t', end='', file=dataFile)
else:
print('staircase_preface\t', end='', file=dataFile)
#header start 'trialnum\tsubject\ttask\t'
print(staircaseTrialN,'\t', end='', file=dataFile) #first thing printed on each line of dataFile
print(subject,'\t',task,'\t', round(noisePercent,2),'\t', end='', file=dataFile)
correct,approxCorrect,responsePosRelative= handleAndScoreResponse(
passThisTrial,responses,responseAutopilot,task,sequenceLeft,cuesPos[0],correctAnswerIdx )
print(numCasesInterframeLong, file=dataFile) #timingBlips, last thing recorded on each line of dataFile
core.wait(.06)
if feedback:
play_high_tone_correct_low_incorrect(correct, passThisTrial=False)
print('staircaseTrialN=', staircaseTrialN,' noisePercent=',round(noisePercent,3),' T1approxCorrect=',T1approxCorrect) #debugON
corrEachTrial.append(T1approxCorrect)
if mainStaircaseGoing:
staircase.addResponse(T1approxCorrect, intensity = 100-noisePercent) #Add a 1 or 0 to signify a correct/detected or incorrect/missed trial
#print('Have added an intensity of','{:.3f}'.format(100-noisePercent), 'T1approxCorrect =', T1approxCorrect, ' to staircase') #debugON
#ENDING STAIRCASE PHASE
if staircaseTrialN+1 < len(prefaceStaircaseNoise) and (staircaseTrialN>=0): #exp stopped before got through staircase preface trials, so haven't imported yet
print('Importing ',corrEachTrial,' and intensities ',prefaceStaircaseNoise[0:staircaseTrialN+1])
staircase.importData(100-prefaceStaircaseNoise[0:staircaseTrialN], np.array(corrEachTrial))
print('framesSaved after staircase=',framesSaved) #debugON
timeAndDateStr = time.strftime("%H:%M on %d %b %Y", time.localtime())
msg = ('prefaceStaircase phase' if expStop else '')
msg += ('ABORTED' if expStop else 'Finished') + ' staircase part of experiment at ' + timeAndDateStr
logging.info(msg); print(msg)
printStaircase(staircase, descendingPsycho, briefTrialUpdate=True, printInternalVal=True, alsoLog=False)
#print('staircase.quantile=',round(staircase.quantile(),2),' sd=',round(staircase.sd(),2))
threshNoise = round(staircase.quantile(),3)
if descendingPsycho:
threshNoise = 100- threshNoise
threshNoise = max( 0, threshNoise ) #e.g. ff get all trials wrong, posterior peaks at a very negative number
msg= 'Staircase estimate of threshold = ' + str(threshNoise) + ' with sd=' + str(round(staircase.sd(),2))
logging.info(msg); print(msg)
myWin.close()
#Fit and plot data
fit = None
try:
intensityForCurveFitting = staircase.intensities
if descendingPsycho:
intensityForCurveFitting = 100-staircase.intensities #because fitWeibull assumes curve is ascending
fit = data.FitWeibull(intensityForCurveFitting, staircase.data, expectedMin=1/26., sems = 1.0/len(staircase.intensities))
except:
print("Fit failed.")
plotDataAndPsychometricCurve(staircase,fit,descendingPsycho,threshCriterion)
#save figure to file
pylab.savefig(fileName+'.pdf')
print('The plot has been saved, as '+fileName+'.pdf')
pylab.show() #must call this to actually show plot
else: #not staircase
noisePercent = defaultNoiseLevel
phasesMsg = 'Experiment will have '+str(trials.nTotal)+' trials. Letters will be drawn with superposed noise of ' + "{:.2%}".format(defaultNoiseLevel)
print(phasesMsg); logging.info(phasesMsg)
nDoneMain =0
while nDoneMain < trials.nTotal and expStop==False: #MAIN EXPERIMENT LOOP
if nDoneMain==0:
msg='Starting main (non-staircase) part of experiment'
logging.info(msg); print(msg)
thisTrial = trials.next() #get a proper (non-staircase) trial
sequenceStream1, sequenceStream2 = calcAndPredrawStimuli(wordList)
cuesPos,correctAnswerIdxsStream1,correctAnswerIdxsStream2, ts = \
do_RSVP_stim(thisTrial, sequenceStream1, sequenceStream2, noisePercent/100.,nDoneMain)
numCasesInterframeLong = timingCheckAndLog(ts,nDoneMain)
#call for each response
expStop = list(); passThisTrial = list(); responses=list(); responsesAutopilot=list()
numCharsInResponse = len(wordList[0])
dL = [None]*numRespsWanted #dummy list for null values
expStop = copy.deepcopy(dL); responses = copy.deepcopy(dL); responsesAutopilot = copy.deepcopy(dL); passThisTrial=copy.deepcopy(dL)
responseOrder = range(numRespsWanted)
if thisTrial['rightResponseFirst']: #change order of indices depending on rightResponseFirst. response0, answer0 etc refer to which one had to be reported first
responseOrder.reverse()
for i in responseOrder:
x = 3* wordEccentricity*(i*2-1) #put it 3 times farther out than stimulus, so participant is sure which is left and which right
expStop[i],passThisTrial[i],responses[i],responsesAutopilot[i] = stringResponse.collectStringResponse(
numCharsInResponse,x,respPromptStim,respStim,acceptTextStim,fixationPoint,myWin,clickSound,badKeySound,
requireAcceptance,autopilot,responseDebug=True)
expStop = np.array(expStop).any(); passThisTrial = np.array(passThisTrial).any()
if not expStop:
print('main\t', end='', file=dataFile) #first thing printed on each line of dataFile to indicate main part of experiment, not staircase
print(nDoneMain,'\t', end='', file=dataFile)
print(subject,'\t',task,'\t', round(noisePercent,3),'\t', end='', file=dataFile)
print(thisTrial['leftStreamFlip'],'\t', end='', file=dataFile)
print(thisTrial['rightStreamFlip'],'\t', end='', file=dataFile)
print(thisTrial['rightResponseFirst'],'\t', end='', file=dataFile)
i = 0
eachCorrect = np.ones(numRespsWanted)*-999; eachApproxCorrect = np.ones(numRespsWanted)*-999
for i in range(numRespsWanted): #scored and printed to dataFile in left first, right second order even if collected in different order
if i==0:
sequenceStream = sequenceStream1; correctAnswerIdxs = correctAnswerIdxsStream1;
else: sequenceStream = sequenceStream2; correctAnswerIdxs = correctAnswerIdxsStream2;
correct,approxCorrect,responsePosRelative = (
handleAndScoreResponse(passThisTrial,responses[i],responsesAutopilot[i],task,sequenceStream,thisTrial['cuePos'],correctAnswerIdxs ) )
eachCorrect[i] = correct
eachApproxCorrect[i] = approxCorrect
print(numCasesInterframeLong, file=dataFile) #timingBlips, last thing recorded on each line of dataFile
print('correct=',correct,' approxCorrect=',approxCorrect,' eachCorrect=',eachCorrect, ' responsePosRelative=', responsePosRelative)
numTrialsCorrect += eachCorrect.all() #so count -1 as 0
numTrialsApproxCorrect += eachApproxCorrect.all()
numTrialsEachCorrect += eachCorrect #list numRespsWanted long
numTrialsEachApproxCorrect += eachApproxCorrect #list numRespsWanted long
if exportImages: #catches one frame of response
myWin.getMovieFrame() #I cant explain why another getMovieFrame, and core.wait is needed
framesSaved +=1; core.wait(.1)
myWin.saveMovieFrames('images_sounds_movies/frames.png') #mov not currently supported
expStop=True
core.wait(.1)
if feedback: play_high_tone_correct_low_incorrect(correct, passThisTrial=False)
nDoneMain+=1
dataFile.flush(); logging.flush()
print('nDoneMain=', nDoneMain,' trials.nTotal=',trials.nTotal) #' trials.thisN=',trials.thisN
if (trials.nTotal > 6 and nDoneMain > 2 and nDoneMain %
( trials.nTotal*pctCompletedBreak/100. ) ==1): #dont modulus 0 because then will do it for last trial
nextText.setText('Press "SPACE" to continue!')
nextText.draw()
progressMsg = 'Completed ' + str(nDoneMain) + ' of ' + str(trials.nTotal) + ' trials'
NextRemindCountText.setText(progressMsg)
NextRemindCountText.draw()
myWin.flip() # myWin.flip(clearBuffer=True)
waiting=True
while waiting:
if autopilot: break
elif expStop == True:break
for key in event.getKeys(): #check if pressed abort-type key
if key in ['space','ESCAPE']:
waiting=False
if key in ['ESCAPE']:
expStop = True
myWin.clearBuffer()
core.wait(.2); time.sleep(.2)
#end main trials loop
timeAndDateStr = time.strftime("%H:%M on %d %b %Y", time.localtime())
msg = 'Finishing at '+timeAndDateStr
print(msg); logging.info(msg)
if expStop:
msg = 'user aborted experiment on keypress with trials done=' + str(nDoneMain) + ' of ' + str(trials.nTotal+1)
print(msg); logging.error(msg)
if not doStaircase and (nDoneMain >0):
msg = 'Of ' + str(nDoneMain)+' trials, on '+str(numTrialsCorrect*1.0/nDoneMain*100.)+'% of all trials all targets reported exactly correct'
print(msg); logging.info(msg)
msg= 'All targets approximately correct in '+ str( round(numTrialsApproxCorrect*1.0/nDoneMain*100,1)) + '% of trials'
print(msg); logging.info(msg)
for i in range(numRespsWanted):
msg = 'stream'+str(i)+': '+str( round(numTrialsEachCorrect[i]*1.0/nDoneMain*100.,2) ) + '% correct'
print(msg); logging.info(msg)
msg = 'stream' + str(i) + ': '+ str( round(numTrialsEachApproxCorrect[i]*1.0/nDoneMain*100,2) ) +'% approximately correct'
print(msg); logging.info(msg)
logging.flush(); dataFile.close()
myWin.close() #have to close window if want to show a plot
|
mit
|
cyberden/CouchPotatoServer
|
couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/restudy.py
|
146
|
1155
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class RestudyIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?restudy\.dk/video/play/id/(?P<id>[0-9]+)'
_TEST = {
'url': 'https://www.restudy.dk/video/play/id/1637',
'info_dict': {
'id': '1637',
'ext': 'flv',
'title': 'Leiden-frosteffekt',
'description': 'Denne video er et eksperiment med flydende kvælstof.',
},
'params': {
# rtmp download
'skip_download': True,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._og_search_title(webpage).strip()
description = self._og_search_description(webpage).strip()
formats = self._extract_smil_formats(
'https://www.restudy.dk/awsmedia/SmilDirectory/video_%s.xml' % video_id,
video_id)
return {
'id': video_id,
'title': title,
'description': description,
'formats': formats,
}
|
gpl-3.0
|
frankiecjunle/yunblog
|
venv/lib/python2.7/site-packages/werkzeug/exceptions.py
|
176
|
18733
|
# -*- coding: utf-8 -*-
"""
werkzeug.exceptions
~~~~~~~~~~~~~~~~~~~
This module implements a number of Python exceptions you can raise from
within your views to trigger a standard non-200 response.
Usage Example
-------------
::
from werkzeug.wrappers import BaseRequest
from werkzeug.wsgi import responder
from werkzeug.exceptions import HTTPException, NotFound
def view(request):
raise NotFound()
@responder
def application(environ, start_response):
request = BaseRequest(environ)
try:
return view(request)
except HTTPException as e:
return e
As you can see from this example those exceptions are callable WSGI
applications. Because of Python 2.4 compatibility those do not extend
from the response objects but only from the python exception class.
As a matter of fact they are not Werkzeug response objects. However you
can get a response object by calling ``get_response()`` on a HTTP
exception.
Keep in mind that you have to pass an environment to ``get_response()``
because some errors fetch additional information from the WSGI
environment.
If you want to hook in a different exception page to say, a 404 status
code, you can add a second except for a specific subclass of an error::
@responder
def application(environ, start_response):
request = BaseRequest(environ)
try:
return view(request)
except NotFound, e:
return not_found(request)
except HTTPException, e:
return e
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
# Because of bootstrapping reasons we need to manually patch ourselves
# onto our parent module.
import werkzeug
werkzeug.exceptions = sys.modules[__name__]
from werkzeug._internal import _get_environ
from werkzeug._compat import iteritems, integer_types, text_type, \
implements_to_string
from werkzeug.wrappers import Response
@implements_to_string
class HTTPException(Exception):
"""
Baseclass for all HTTP exceptions. This exception can be called as WSGI
application to render a default error page or you can catch the subclasses
of it independently and render nicer error messages.
"""
code = None
description = None
def __init__(self, description=None, response=None):
Exception.__init__(self)
if description is not None:
self.description = description
self.response = response
@classmethod
def wrap(cls, exception, name=None):
"""This method returns a new subclass of the exception provided that
also is a subclass of `BadRequest`.
"""
class newcls(cls, exception):
def __init__(self, arg=None, *args, **kwargs):
cls.__init__(self, *args, **kwargs)
exception.__init__(self, arg)
newcls.__module__ = sys._getframe(1).f_globals.get('__name__')
newcls.__name__ = name or cls.__name__ + exception.__name__
return newcls
@property
def name(self):
"""The status name."""
return HTTP_STATUS_CODES.get(self.code, 'Unknown Error')
def get_description(self, environ=None):
"""Get the description."""
return u'<p>%s</p>' % escape(self.description)
def get_body(self, environ=None):
"""Get the HTML body."""
return text_type((
u'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
u'<title>%(code)s %(name)s</title>\n'
u'<h1>%(name)s</h1>\n'
u'%(description)s\n'
) % {
'code': self.code,
'name': escape(self.name),
'description': self.get_description(environ)
})
def get_headers(self, environ=None):
"""Get a list of headers."""
return [('Content-Type', 'text/html')]
def get_response(self, environ=None):
"""Get a response object. If one was passed to the exception
it's returned directly.
:param environ: the optional environ for the request. This
can be used to modify the response depending
on how the request looked like.
:return: a :class:`Response` object or a subclass thereof.
"""
if self.response is not None:
return self.response
if environ is not None:
environ = _get_environ(environ)
headers = self.get_headers(environ)
return Response(self.get_body(environ), self.code, headers)
def __call__(self, environ, start_response):
"""Call the exception as WSGI application.
:param environ: the WSGI environment.
:param start_response: the response callable provided by the WSGI
server.
"""
response = self.get_response(environ)
return response(environ, start_response)
def __str__(self):
return '%d: %s' % (self.code, self.name)
def __repr__(self):
return '<%s \'%s\'>' % (self.__class__.__name__, self)
class BadRequest(HTTPException):
"""*400* `Bad Request`
Raise if the browser sends something to the application the application
or server cannot handle.
"""
code = 400
description = (
'The browser (or proxy) sent a request that this server could '
'not understand.'
)
class ClientDisconnected(BadRequest):
"""Internal exception that is raised if Werkzeug detects a disconnected
client. Since the client is already gone at that point attempting to
send the error message to the client might not work and might ultimately
result in another exception in the server. Mainly this is here so that
it is silenced by default as far as Werkzeug is concerned.
Since disconnections cannot be reliably detected and are unspecified
by WSGI to a large extent this might or might not be raised if a client
is gone.
.. versionadded:: 0.8
"""
class SecurityError(BadRequest):
"""Raised if something triggers a security error. This is otherwise
exactly like a bad request error.
.. versionadded:: 0.9
"""
class BadHost(BadRequest):
"""Raised if the submitted host is badly formatted.
.. versionadded:: 0.11.2
"""
class Unauthorized(HTTPException):
"""*401* `Unauthorized`
Raise if the user is not authorized. Also used if you want to use HTTP
basic auth.
"""
code = 401
description = (
'The server could not verify that you are authorized to access '
'the URL requested. You either supplied the wrong credentials (e.g. '
'a bad password), or your browser doesn\'t understand how to supply '
'the credentials required.'
)
class Forbidden(HTTPException):
"""*403* `Forbidden`
Raise if the user doesn't have the permission for the requested resource
but was authenticated.
"""
code = 403
description = (
'You don\'t have the permission to access the requested resource. '
'It is either read-protected or not readable by the server.'
)
class NotFound(HTTPException):
"""*404* `Not Found`
Raise if a resource does not exist and never existed.
"""
code = 404
description = (
'The requested URL was not found on the server. '
'If you entered the URL manually please check your spelling and '
'try again.'
)
class MethodNotAllowed(HTTPException):
"""*405* `Method Not Allowed`
Raise if the server used a method the resource does not handle. For
example `POST` if the resource is view only. Especially useful for REST.
The first argument for this exception should be a list of allowed methods.
Strictly speaking the response would be invalid if you don't provide valid
methods in the header which you can do with that list.
"""
code = 405
description = 'The method is not allowed for the requested URL.'
def __init__(self, valid_methods=None, description=None):
"""Takes an optional list of valid http methods
starting with werkzeug 0.3 the list will be mandatory."""
HTTPException.__init__(self, description)
self.valid_methods = valid_methods
def get_headers(self, environ):
headers = HTTPException.get_headers(self, environ)
if self.valid_methods:
headers.append(('Allow', ', '.join(self.valid_methods)))
return headers
class NotAcceptable(HTTPException):
"""*406* `Not Acceptable`
Raise if the server can't return any content conforming to the
`Accept` headers of the client.
"""
code = 406
description = (
'The resource identified by the request is only capable of '
'generating response entities which have content characteristics '
'not acceptable according to the accept headers sent in the '
'request.'
)
class RequestTimeout(HTTPException):
"""*408* `Request Timeout`
Raise to signalize a timeout.
"""
code = 408
description = (
'The server closed the network connection because the browser '
'didn\'t finish the request within the specified time.'
)
class Conflict(HTTPException):
"""*409* `Conflict`
Raise to signal that a request cannot be completed because it conflicts
with the current state on the server.
.. versionadded:: 0.7
"""
code = 409
description = (
'A conflict happened while processing the request. The resource '
'might have been modified while the request was being processed.'
)
class Gone(HTTPException):
"""*410* `Gone`
Raise if a resource existed previously and went away without new location.
"""
code = 410
description = (
'The requested URL is no longer available on this server and there '
'is no forwarding address. If you followed a link from a foreign '
'page, please contact the author of this page.'
)
class LengthRequired(HTTPException):
"""*411* `Length Required`
Raise if the browser submitted data but no ``Content-Length`` header which
is required for the kind of processing the server does.
"""
code = 411
description = (
'A request with this method requires a valid <code>Content-'
'Length</code> header.'
)
class PreconditionFailed(HTTPException):
"""*412* `Precondition Failed`
Status code used in combination with ``If-Match``, ``If-None-Match``, or
``If-Unmodified-Since``.
"""
code = 412
description = (
'The precondition on the request for the URL failed positive '
'evaluation.'
)
class RequestEntityTooLarge(HTTPException):
"""*413* `Request Entity Too Large`
The status code one should return if the data submitted exceeded a given
limit.
"""
code = 413
description = (
'The data value transmitted exceeds the capacity limit.'
)
class RequestURITooLarge(HTTPException):
"""*414* `Request URI Too Large`
Like *413* but for too long URLs.
"""
code = 414
description = (
'The length of the requested URL exceeds the capacity limit '
'for this server. The request cannot be processed.'
)
class UnsupportedMediaType(HTTPException):
"""*415* `Unsupported Media Type`
The status code returned if the server is unable to handle the media type
the client transmitted.
"""
code = 415
description = (
'The server does not support the media type transmitted in '
'the request.'
)
class RequestedRangeNotSatisfiable(HTTPException):
"""*416* `Requested Range Not Satisfiable`
The client asked for a part of the file that lies beyond the end
of the file.
.. versionadded:: 0.7
"""
code = 416
description = (
'The server cannot provide the requested range.'
)
class ExpectationFailed(HTTPException):
"""*417* `Expectation Failed`
The server cannot meet the requirements of the Expect request-header.
.. versionadded:: 0.7
"""
code = 417
description = (
'The server could not meet the requirements of the Expect header'
)
class ImATeapot(HTTPException):
"""*418* `I'm a teapot`
The server should return this if it is a teapot and someone attempted
to brew coffee with it.
.. versionadded:: 0.7
"""
code = 418
description = (
'This server is a teapot, not a coffee machine'
)
class UnprocessableEntity(HTTPException):
"""*422* `Unprocessable Entity`
Used if the request is well formed, but the instructions are otherwise
incorrect.
"""
code = 422
description = (
'The request was well-formed but was unable to be followed '
'due to semantic errors.'
)
class PreconditionRequired(HTTPException):
"""*428* `Precondition Required`
The server requires this request to be conditional, typically to prevent
the lost update problem, which is a race condition between two or more
clients attempting to update a resource through PUT or DELETE. By requiring
each client to include a conditional header ("If-Match" or "If-Unmodified-
Since") with the proper value retained from a recent GET request, the
server ensures that each client has at least seen the previous revision of
the resource.
"""
code = 428
description = (
'This request is required to be conditional; try using "If-Match" '
'or "If-Unmodified-Since".'
)
class TooManyRequests(HTTPException):
"""*429* `Too Many Requests`
The server is limiting the rate at which this user receives responses, and
this request exceeds that rate. (The server may use any convenient method
to identify users and their request rates). The server may include a
"Retry-After" header to indicate how long the user should wait before
retrying.
"""
code = 429
description = (
'This user has exceeded an allotted request count. Try again later.'
)
class RequestHeaderFieldsTooLarge(HTTPException):
"""*431* `Request Header Fields Too Large`
The server refuses to process the request because the header fields are too
large. One or more individual fields may be too large, or the set of all
headers is too large.
"""
code = 431
description = (
'One or more header fields exceeds the maximum size.'
)
class InternalServerError(HTTPException):
"""*500* `Internal Server Error`
Raise if an internal server error occurred. This is a good fallback if an
unknown error occurred in the dispatcher.
"""
code = 500
description = (
'The server encountered an internal error and was unable to '
'complete your request. Either the server is overloaded or there '
'is an error in the application.'
)
class NotImplemented(HTTPException):
"""*501* `Not Implemented`
Raise if the application does not support the action requested by the
browser.
"""
code = 501
description = (
'The server does not support the action requested by the '
'browser.'
)
class BadGateway(HTTPException):
"""*502* `Bad Gateway`
If you do proxying in your application you should return this status code
if you received an invalid response from the upstream server it accessed
in attempting to fulfill the request.
"""
code = 502
description = (
'The proxy server received an invalid response from an upstream '
'server.'
)
class ServiceUnavailable(HTTPException):
"""*503* `Service Unavailable`
Status code you should return if a service is temporarily unavailable.
"""
code = 503
description = (
'The server is temporarily unable to service your request due to '
'maintenance downtime or capacity problems. Please try again '
'later.'
)
class GatewayTimeout(HTTPException):
"""*504* `Gateway Timeout`
Status code you should return if a connection to an upstream server
times out.
"""
code = 504
description = (
'The connection to an upstream server timed out.'
)
class HTTPVersionNotSupported(HTTPException):
"""*505* `HTTP Version Not Supported`
The server does not support the HTTP protocol version used in the request.
"""
code = 505
description = (
'The server does not support the HTTP protocol version used in the '
'request.'
)
default_exceptions = {}
__all__ = ['HTTPException']
def _find_exceptions():
for name, obj in iteritems(globals()):
try:
is_http_exception = issubclass(obj, HTTPException)
except TypeError:
is_http_exception = False
if not is_http_exception or obj.code is None:
continue
__all__.append(obj.__name__)
old_obj = default_exceptions.get(obj.code, None)
if old_obj is not None and issubclass(obj, old_obj):
continue
default_exceptions[obj.code] = obj
_find_exceptions()
del _find_exceptions
class Aborter(object):
"""
When passed a dict of code -> exception items it can be used as
callable that raises exceptions. If the first argument to the
callable is an integer it will be looked up in the mapping, if it's
a WSGI application it will be raised in a proxy exception.
The rest of the arguments are forwarded to the exception constructor.
"""
def __init__(self, mapping=None, extra=None):
if mapping is None:
mapping = default_exceptions
self.mapping = dict(mapping)
if extra is not None:
self.mapping.update(extra)
def __call__(self, code, *args, **kwargs):
if not args and not kwargs and not isinstance(code, integer_types):
raise HTTPException(response=code)
if code not in self.mapping:
raise LookupError('no exception for %r' % code)
raise self.mapping[code](*args, **kwargs)
abort = Aborter()
#: an exception that is used internally to signal both a key error and a
#: bad request. Used by a lot of the datastructures.
BadRequestKeyError = BadRequest.wrap(KeyError)
# imported here because of circular dependencies of werkzeug.utils
from werkzeug.utils import escape
from werkzeug.http import HTTP_STATUS_CODES
|
mit
|
wolfmanstout/dragonfly
|
dragonfly/test/test_engine_sapi5.py
|
2
|
6568
|
#
# This file is part of Dragonfly.
# (c) Copyright 2007, 2008 by Christo Butcher
# Licensed under the LGPL.
#
# Dragonfly is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dragonfly is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with Dragonfly. If not, see
# <http://www.gnu.org/licenses/>.
#
import unittest
from six import text_type, string_types
from dragonfly.engines import get_engine, EngineBase
from dragonfly.engines.base.dictation import DictationContainerBase
#---------------------------------------------------------------------------
class TestEngineSapi5(unittest.TestCase):
def test_get_engine_sapi5_is_usable(self):
""" Verify that the sapi5 engine is usable. """
engine = get_engine()
self.assertTrue(isinstance(engine, EngineBase))
self.assertTrue(engine.name.startswith("sapi5"))
engine.speak("testing WSR")
from dragonfly import Literal, Sequence
from dragonfly.test import ElementTester
seq = Sequence([Literal("hello"), Literal("world")])
tester = ElementTester(seq, engine=engine)
results = tester.recognize("hello world")
self.assertEqual([u"hello", u"world"], results)
def test_dictation(self):
# Test dictation separately for SAPI5 because test_dictation.py
# won't work with it.
from dragonfly import Dictation, Literal, Sequence
from dragonfly.test import ElementTester, RecognitionFailure
seq = Sequence([Literal("hello"), Dictation("text")])
tester = ElementTester(seq)
# Test one word.
results = tester.recognize("hello world")
assert results[0] == "hello"
# Verify recognition returned dictation result.
dictation = results[1]
if not isinstance(dictation, DictationContainerBase):
message = (u"Expected recognition result to be a dictation"
u" container, but received %r"
% (repr(dictation).decode("windows-1252"),))
self.fail(message.encode("windows-1252"))
# Verifying dictation converts/encode successfully.
self.assertEqual(str(dictation), "world")
self.assertEqual(text_type(dictation), "world")
self.assertTrue(isinstance(repr(dictation), string_types))
# Test incomplete.
results = tester.recognize("hello")
assert results is RecognitionFailure
def test_recognition_observers(self):
# RecognitionObservers are a bit quirky for the sapi5 engines,
# so the tests for them are repeated here to handle that.
from dragonfly import (Integer, Literal, RecognitionHistory,
RecognitionObserver)
from dragonfly.test import ElementTester, RecognitionFailure
class RecognitionObserverTester(RecognitionObserver):
""" RecognitionObserver class from the recobs doctests. """
def __init__(self):
RecognitionObserver.__init__(self)
self.waiting = False
self.words = None
def on_begin(self):
self.waiting = True
def on_recognition(self, words):
self.waiting = False
self.words = words
def on_failure(self):
self.waiting = False
self.words = False
test_recobs = RecognitionObserverTester()
test_recobs.register()
results = test_recobs.waiting, test_recobs.words
assert results == (False, None)
# Test simple literal element recognitions.
test_lit = ElementTester(Literal("hello world"))
assert test_lit.recognize("hello world") == "hello world"
results = test_recobs.waiting, test_recobs.words
assert results == (False, (u'hello', u'world'))
assert test_lit.recognize("hello universe") is RecognitionFailure
results = test_recobs.waiting, test_recobs.words
assert results == (False, False)
# Test Integer element recognitions
test_int = ElementTester(Integer(min=1, max=100))
assert test_int.recognize("seven") == 7
results = test_recobs.waiting, test_recobs.words
assert results == (False, (u'seven',))
assert test_int.recognize("forty seven") == 47
results = test_recobs.waiting, test_recobs.words
assert results == (False, (u'forty', u'seven'))
assert test_int.recognize("one hundred") is RecognitionFailure
results = test_recobs.waiting, test_recobs.words
assert results == (False, False)
assert test_lit.recognize("hello world") == u'hello world'
# Now test RecognitionHistory.
history = RecognitionHistory()
assert test_lit.recognize("hello world") == u'hello world'
# Not yet registered, so didn't receive previous recognition.
assert history == []
history.register()
assert test_lit.recognize("hello world") == u'hello world'
# Now registered, so should have received previous recognition.
assert history == [(u'hello', u'world')]
assert test_lit.recognize("hello universe") is RecognitionFailure
# Failed recognitions are ignored, so history is unchanged.
assert history == [(u'hello', u'world')]
assert test_int.recognize("eighty six") == 86
assert history == [(u'hello', u'world'), (u'eighty', u'six')]
# The RecognitionHistory class allows its maximum length to be set.
history = RecognitionHistory(3)
history.register()
assert history == []
for i, word in enumerate(["one", "two", "three", "four", "five"]):
assert test_int.recognize(word) == i + 1
assert history == [(u'three',), (u'four',), (u'five',)]
history = RecognitionHistory(1)
history.register()
assert history == []
for i, word in enumerate(["one", "two", "three", "four", "five"]):
assert test_int.recognize(word) == i + 1
assert history == [(u'five',)]
|
lgpl-3.0
|
ench0/hlte-kernel
|
tools/perf/scripts/python/netdev-times.py
|
11271
|
15048
|
# Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
|
gpl-2.0
|
agrista/odoo-saas
|
addons/crm_claim/report/__init__.py
|
446
|
1080
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm_claim_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
Kilhog/odoo
|
addons/email_template/ir_actions.py
|
281
|
3520
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013 OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class actions_server(osv.Model):
""" Add email option in server actions. """
_name = 'ir.actions.server'
_inherit = ['ir.actions.server']
def _get_states(self, cr, uid, context=None):
res = super(actions_server, self)._get_states(cr, uid, context=context)
res.insert(0, ('email', 'Send Email'))
return res
_columns = {
'email_from': fields.related(
'template_id', 'email_from', type='char',
readonly=True, string='From'
),
'email_to': fields.related(
'template_id', 'email_to', type='char',
readonly=True, string='To (Emails)'
),
'partner_to': fields.related(
'template_id', 'partner_to', type='char',
readonly=True, string='To (Partners)'
),
'subject': fields.related(
'template_id', 'subject', type='char',
readonly=True, string='Subject'
),
'body_html': fields.related(
'template_id', 'body_html', type='text',
readonly=True, string='Body'
),
'template_id': fields.many2one(
'email.template', 'Email Template', ondelete='set null',
domain="[('model_id', '=', model_id)]",
),
}
def on_change_template_id(self, cr, uid, ids, template_id, context=None):
""" Render the raw template in the server action fields. """
fields = ['subject', 'body_html', 'email_from', 'email_to', 'partner_to']
if template_id:
template_values = self.pool.get('email.template').read(cr, uid, [template_id], fields, context)[0]
values = dict((field, template_values[field]) for field in fields if template_values.get(field))
if not values.get('email_from'):
return {'warning': {'title': 'Incomplete template', 'message': 'Your template should define email_from'}, 'value': values}
else:
values = dict.fromkeys(fields, False)
return {'value': values}
def run_action_email(self, cr, uid, action, eval_context=None, context=None):
if not action.template_id or not context.get('active_id'):
return False
self.pool['email.template'].send_mail(cr, uid, action.template_id.id, context.get('active_id'),
force_send=False, raise_exception=False, context=context)
return False
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
h2oai/h2o-2
|
scripts/dontrun_r_examples.py
|
9
|
5399
|
#!/usr/bin/python
#
# This tool goes through every file in the 'man' directory and automatically makes the example \dontrun.
#
import sys
import os
import re
import shutil
STATE_NONE = 1
STATE_IN_EXAMPLES = 2
STATE_IN_CRAN_EXAMPLES = 3
STATE_IN_DONTRUN = 4
class Example:
def __init__(self, dir_name, file_name, new_dir_name):
self.dir_name = dir_name
self.file_name = file_name
self.new_dir_name = new_dir_name
self.lineno = 0
self.state = STATE_NONE
self.of = None
def parse_error(self, message):
print("ERROR " + message + " " + self.file_name + " line " + str(self.lineno))
sys.exit(1)
def set_state(self, new_state):
self.state = new_state
# print("state set to " + str(self.state))
def emit_line(self, s):
self.of.write(s)
def inject_line(self, s):
s2 = s
# s2 = s2 + " # injected"
s2 = s2 + "\n"
self.emit_line(s2)
def process(self):
# print("Processing " + self.file_name + "...")
self.set_state(STATE_NONE)
found_examples = False
injected_dontrun = False
found_dontrun = False
found_dontrun_closebrace = False
f = open(os.path.join(self.dir_name, self.file_name), "r")
self.of = open(os.path.join(self.new_dir_name, self.file_name), "w")
s = f.readline()
while (len(s) > 0):
self.lineno = self.lineno + 1
# print "s is:", s
match_groups = re.search(r"^\\examples{", s)
if (match_groups is not None):
if (self.state == STATE_IN_EXAMPLES):
self.parse_error("examples may not be in examples")
self.set_state(STATE_IN_EXAMPLES)
found_examples = True
self.emit_line(s)
s = f.readline()
continue
match_groups = re.search(r"-- CRAN examples begin --", s)
if (match_groups is not None):
if (self.state != STATE_IN_EXAMPLES):
self.parse_error("CRAN examples must be in examples")
self.state = STATE_IN_CRAN_EXAMPLES
self.emit_line(s)
s = f.readline()
continue
match_groups = re.search(r"-- CRAN examples end --", s)
if (match_groups is not None):
if (self.state != STATE_IN_CRAN_EXAMPLES):
self.parse_error("CRAN examples end must be in CRAN examples")
self.set_state(STATE_IN_EXAMPLES)
self.emit_line(s)
s = f.readline()
continue
if (self.state == STATE_IN_CRAN_EXAMPLES):
self.emit_line(s)
s = f.readline()
continue
match_groups = re.search(r"^\\dontrun{", s)
if (match_groups is not None):
if (self.state != STATE_IN_EXAMPLES):
self.parse_error("dontrun must be in examples")
if (found_dontrun):
self.parse_error("only one dontrun section is supported")
if (injected_dontrun):
self.inject_line("}")
injected_dontrun = False
self.set_state(STATE_IN_DONTRUN)
found_dontrun = True
self.emit_line(s)
s = f.readline()
continue
match_groups = re.search(r"^}", s)
if (found_examples and (match_groups is not None)):
if (self.state == STATE_IN_EXAMPLES):
if (injected_dontrun):
self.inject_line("}")
injected_dontrun = False
self.set_state(STATE_NONE)
elif (self.state == STATE_IN_DONTRUN):
self.set_state(STATE_IN_EXAMPLES)
found_dontrun_closebrace = True
else:
self.parse_error("unaccounted for close brace")
sys.exit(1)
self.emit_line(s)
s = f.readline()
continue
if (found_dontrun_closebrace):
self.parse_error("extra stuff after dontrun close brace")
if ((self.state == STATE_IN_EXAMPLES) and not injected_dontrun and not found_dontrun):
# Skip blank lines, but insert a dontrun block if there is content.
match_groups = re.match(r"^\s*$", s)
if (match_groups is None):
self.inject_line("\dontrun{")
injected_dontrun = True
self.emit_line(s)
s = f.readline()
continue
f.close()
self.of.close()
# if (not found_examples):
# self.parse_error("did not find examples")
def main(argv):
if (not os.path.exists("DESCRIPTION")):
print("ERROR: You must run this script inside the generated R package source directory.")
sys.exit(1)
os.mkdir("newman")
for root, dirs, files in os.walk("man"):
for f in files:
ex = Example("man", f, "newman")
ex.process()
# os.rename("man", "oldman")
shutil.rmtree("man")
os.rename("newman", "man")
if __name__ == "__main__":
main(sys.argv)
|
apache-2.0
|
francisco-dlp/hyperspy
|
hyperspy/samfire_utils/segmenters/histogram.py
|
6
|
3203
|
# -*- coding: utf-8 -*-
# Copyright 2007-2011 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from scipy.signal import argrelextrema
from hyperspy.external.astroML.histtools import histogram
class HistogramSegmenter(object):
"""Historam Segmenter strategy of the SAMFire. Uses histograms to estimate
parameter distribusions, and then passes the most frequent values as
the starting parameter estimates.
"""
def __init__(self, bins='freedman'):
self.database = None
self.bins = bins
self._min_points = 4
def most_frequent(self):
"""Calculates the most frequent values in the currently stored
histograms of the database. Does to by looking for local maxima in the
frequences.
"""
freq = {}
for c_n, comp in self.database.items():
comp_dict = {}
for p_n, (hist, bin_edges) in comp.items():
# calculate frequent values
maxima_hist_ind = argrelextrema(
np.append(
0,
hist),
np.greater,
mode='wrap')
middles_of_maxima = 0.5 * \
(bin_edges[maxima_hist_ind] +
bin_edges[([i - 1 for i in maxima_hist_ind[0]],)])
comp_dict[p_n] = middles_of_maxima.tolist()
freq[c_n] = comp_dict
return freq
# MUCH LATER: return boundaries of the n-dimensional domains, projected to
# the parameter axes, to be used as fitting boundaries.
def update(self, value_dict):
"""Recalculates the database, given value dictionary (with all values!)
Parameters
----------
value_dict : dict
dictionary of all already calculated values in the form of
{component_name: {parameter_name: values, ...}, ...}
"""
# recalculate with values. All values are passed, not just new
self.database = {}
for component_name, component in value_dict.items():
comp_dict = {}
for par_name, par in component.items():
if par.size <= self._min_points:
comp_dict[par_name] = np.histogram(par,
max(10,
self._min_points))
else:
comp_dict[par_name] = histogram(par, bins=self.bins)
self.database[component_name] = comp_dict
|
gpl-3.0
|
t794104/ansible
|
lib/ansible/modules/network/f5/bigip_profile_udp.py
|
38
|
14130
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_profile_udp
short_description: Manage UDP profiles on a BIG-IP
description:
- Manage UDP profiles on a BIG-IP. Many of UDP profiles exist; each with their
own adjustments to the standard C(udp) profile. Users of this module should be aware
that many of the adjustable knobs have no module default. Instead, the default is
assigned by the BIG-IP system itself which, in most cases, is acceptable.
version_added: 2.6
options:
name:
description:
- Specifies the name of the profile.
type: str
required: True
parent:
description:
- Specifies the profile from which this profile inherits settings.
- When creating a new profile, if this parameter is not specified, the default
is the system-supplied C(udp) profile.
type: str
idle_timeout:
description:
- Specifies the length of time that a connection is idle (has no traffic) before
the connection is eligible for deletion.
- When creating a new profile, if this parameter is not specified, the remote
device will choose a default value appropriate for the profile, based on its
C(parent) profile.
- When a number is specified, indicates the number of seconds that the UDP
connection can remain idle before the system deletes it.
- When C(0), or C(indefinite), specifies that UDP connections can remain idle
indefinitely.
- When C(immediate), specifies that you do not want the UDP connection to
remain idle, and that it is therefore immediately eligible for deletion.
type: str
datagram_load_balancing:
description:
- Specifies, when C(yes), that the system load balances UDP traffic
packet-by-packet.
type: bool
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
state:
description:
- When C(present), ensures that the profile exists.
- When C(absent), ensures the profile is removed.
type: str
choices:
- present
- absent
default: present
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create a TCP profile
bigip_profile_tcp:
name: foo
parent: udp
idle_timeout: 300
datagram_load_balancing: no
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
'''
RETURN = r'''
parent:
description: The new parent of the resource.
returned: changed
type: str
sample: udp
idle_timeout:
description: The new idle timeout of the resource.
returned: changed
type: int
sample: 100
datagram_load_balancing:
description: The new datagram load balancing setting of the resource.
returned: changed
type: bool
sample: True
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
class Parameters(AnsibleF5Parameters):
api_map = {
'datagramLoadBalancing': 'datagram_load_balancing',
'idleTimeout': 'idle_timeout',
'defaultsFrom': 'parent',
}
api_attributes = [
'datagramLoadBalancing',
'idleTimeout',
'defaultsFrom',
]
returnables = [
'datagram_load_balancing',
'idle_timeout',
'parent',
]
updatables = [
'datagram_load_balancing',
'idle_timeout',
'parent',
]
@property
def idle_timeout(self):
if self._values['idle_timeout'] is None:
return None
if self._values['idle_timeout'] in ['indefinite', 'immediate']:
return self._values['idle_timeout']
return int(self._values['idle_timeout'])
class ApiParameters(Parameters):
@property
def datagram_load_balancing(self):
if self._values['datagram_load_balancing'] is None:
return None
if self._values['datagram_load_balancing'] == 'enabled':
return True
return False
class ModuleParameters(Parameters):
@property
def parent(self):
if self._values['parent'] is None:
return None
result = fq_name(self.partition, self._values['parent'])
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def datagram_load_balancing(self):
if self._values['datagram_load_balancing'] is None:
return None
if self._values['datagram_load_balancing']:
return 'enabled'
return 'disabled'
class ReportableChanges(Changes):
@property
def datagram_load_balancing(self):
if self._values['datagram_load_balancing'] is None:
return None
if self._values['datagram_load_balancing'] == 'enabled':
return True
return False
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/udp/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/udp/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403, 404]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response['selfLink']
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/udp/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 404]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/udp/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/udp/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
parent=dict(),
idle_timeout=dict(),
datagram_load_balancing=dict(type='bool'),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
|
gpl-3.0
|
glogiotatidis/bedrock
|
tests/redirects/base.py
|
9
|
6982
|
import re
from urlparse import urlparse, parse_qs
from braceexpand import braceexpand
import requests
def get_abs_url(url, base_url):
try:
if url.pattern.startswith('/'):
# url is a compiled regular expression pattern
return re.compile(''.join([re.escape(base_url), url.pattern]))
except AttributeError:
if url.startswith('/'):
# urljoin messes with query strings too much
return ''.join([base_url, url])
return url
def url_test(url, location=None, status_code=requests.codes.moved_permanently,
req_headers=None, req_kwargs=None, resp_headers=None, query=None,
follow_redirects=False, final_status_code=requests.codes.ok):
"""
Function for producing a config dict for the redirect test.
You can use simple bash style brace expansion in the `url` and `location`
values. If you need the `location` to change with the `url` changes you must
use the same number of expansions or the `location` will be treated as non-expandable.
If you use brace expansion this function will return a list of dicts instead of a dict.
You must use the `flatten` function provided to prepare your test fixture if you do this.
If you combine brace expansion with a compiled regular expression pattern you must
escape any backslashes as this is the escape character for brace expansion.
example:
url_test('/about/drivers{/,.html}', 'https://wiki.mozilla.org/Firefox/Drivers'),
url_test('/projects/index.{de,fr,hr,sq}.html', '/{de,fr,hr,sq}/firefox/products/'),
url_test('/firefox/notes/', re.compile(r'\/firefox\/[\d\.]+\/releasenotes\/'),
url_test('/firefox/android/{,beta/}notes/', re.compile(r'\\/firefox\\/android\\/[\\d\\.]+{,beta}\\/releasenotes\\/'
:param url: The URL in question (absolute or relative).
:param location: If a redirect, either the expected value or a compiled regular expression to match the "Location" header.
:param status_code: Expected status code from the request.
:param req_headers: Extra headers to send with the request.
:param req_kwargs: Extra arguments to pass to requests.get()
:param resp_headers: Dict of headers expected in the response.
:param query: Dict of expected query params in `location` URL.
:param follow_redirects: Boolean indicating whether redirects should be followed.
:param final_status_code: Expected status code after following any redirects.
:return: dict or list of dicts
"""
test_data = {
'url': url,
'location': location,
'status_code': status_code,
'req_headers': req_headers,
'req_kwargs': req_kwargs,
'resp_headers': resp_headers,
'query': query,
'follow_redirects': follow_redirects,
'final_status_code': final_status_code,
}
expanded_urls = list(braceexpand(url))
num_urls = len(expanded_urls)
if num_urls == 1:
return test_data
try:
# location is a compiled regular expression pattern
location_pattern = location.pattern
test_data['location'] = location_pattern
except AttributeError:
location_pattern = None
new_urls = []
if location:
expanded_locations = list(braceexpand(test_data['location']))
num_locations = len(expanded_locations)
for i, url in enumerate(expanded_urls):
data = test_data.copy()
data['url'] = url
if location and num_urls == num_locations:
if location_pattern is not None:
# recompile the pattern after expansion
data['location'] = re.compile(expanded_locations[i])
else:
data['location'] = expanded_locations[i]
new_urls.append(data)
return new_urls
def assert_valid_url(url, location=None, status_code=requests.codes.moved_permanently,
req_headers=None, req_kwargs=None, resp_headers=None,
query=None, base_url=None, follow_redirects=False,
final_status_code=requests.codes.ok):
"""
Define a test of a URL's response.
:param url: The URL in question (absolute or relative).
:param location: If a redirect, either the expected value or a compiled regular expression to match the "Location" header.
:param status_code: Expected status code from the request.
:param req_headers: Extra headers to send with the request.
:param req_kwargs: Extra arguments to pass to requests.get()
:param resp_headers: Dict of headers expected in the response.
:param base_url: Base URL for the site to test.
:param query: Dict of expected query params in `location` URL.
:param follow_redirects: Boolean indicating whether redirects should be followed.
:param final_status_code: Expected status code after following any redirects.
"""
kwargs = {'allow_redirects': follow_redirects}
if req_headers:
kwargs['headers'] = req_headers
if req_kwargs:
kwargs.update(req_kwargs)
abs_url = get_abs_url(url, base_url)
resp = requests.get(abs_url, **kwargs)
# so that the value will appear in locals in test output
resp_location = resp.headers.get('location')
if follow_redirects:
assert resp.status_code == final_status_code
else:
assert resp.status_code == status_code
if location and not follow_redirects:
if query:
# all query values must be lists
for k, v in query.items():
if isinstance(v, basestring):
query[k] = [v]
# parse the QS from resp location header and compare to query arg
# since order doesn't matter.
resp_parsed = urlparse(resp_location)
assert query == parse_qs(resp_parsed.query)
# strip off query for further comparison
resp_location = resp_location.split('?')[0]
abs_location = get_abs_url(location, base_url)
try:
# location is a compiled regular expression pattern
assert abs_location.match(resp_location) is not None
except AttributeError:
assert abs_location == resp_location
if resp_headers and not follow_redirects:
for name, value in resp_headers.items():
print name, value
assert name in resp.headers
assert resp.headers[name].lower() == value.lower()
def flatten(urls_list):
"""Take a list of dicts which may itself contain some lists of dicts, and
return a generator that will return just the dicts in sequence.
Example:
list(flatten([{'dude': 'jeff'}, [{'walter': 'walter'}, {'donny': 'dead'}]]))
> [{'dude': 'jeff'}, {'walter': 'walter'}, {'donny': 'dead'}]
"""
for url in urls_list:
if isinstance(url, dict):
yield url
else:
for sub_url in url:
yield sub_url
|
mpl-2.0
|
feist/pcs
|
pcs/lib/cib/resource/primitive.py
|
1
|
8042
|
from lxml import etree
from pcs.common import report_codes
from pcs.lib import reports
from pcs.lib.cib.nvpair import (
append_new_instance_attributes,
append_new_meta_attributes,
get_value,
get_nvset_as_dict,
)
from pcs.lib.cib.resource.operations import(
prepare as prepare_operations,
create_operations,
)
from pcs.lib.cib.tools import does_id_exist, find_element_by_tag_and_id
from pcs.lib.errors import LibraryError
from pcs.lib.pacemaker.values import validate_id
TAG = "primitive"
def is_primitive(resource_el):
return resource_el.tag == TAG
def find_primitives_by_agent(resources_section, resource_agent_obj):
"""
Returns list of primitive resource elements which are using same resource
agent as specified by resource_agent_obj.
resources_section etree.Element -- element <resources/> from CIB
resource_agent_obj pcs.lib.resource_agent.CrmAgent -- agent of which
resources should be returned
"""
provider = resource_agent_obj.get_provider()
return resources_section.xpath(
".//primitive[@class='{_class}' and @type='{_type}'{_provider}]".format(
_class=resource_agent_obj.get_standard(),
_type=resource_agent_obj.get_type(),
_provider=f" and @provider='{provider}'" if provider else "",
)
)
def create(
report_processor, resources_section, id_provider, resource_id,
resource_agent,
raw_operation_list=None, meta_attributes=None, instance_attributes=None,
allow_invalid_operation=False,
allow_invalid_instance_attributes=False,
use_default_operations=True,
resource_type="resource"
):
# pylint: disable=too-many-arguments
"""
Prepare all parts of primitive resource and append it into cib.
report_processor is a tool for warning/info/error reporting
etree.Element resources_section is place where new element will be appended
IdProvider id_provider -- elements' ids generator
string resource_id is id of new resource
lib.resource_agent.CrmAgent resource_agent
list of dict raw_operation_list specifies operations of resource
dict meta_attributes specifies meta attributes of resource
dict instance_attributes specifies instance attributes of resource
bool allow_invalid_operation is flag for skipping validation of operations
bool allow_invalid_instance_attributes is flag for skipping validation of
instance_attributes
bool use_default_operations is flag for completion operations with default
actions specified in resource agent
string resource_type -- describes the resource for reports
"""
if raw_operation_list is None:
raw_operation_list = []
if meta_attributes is None:
meta_attributes = {}
if instance_attributes is None:
instance_attributes = {}
if does_id_exist(resources_section, resource_id):
raise LibraryError(reports.id_already_exists(resource_id))
validate_id(resource_id, "{0} name".format(resource_type))
operation_list = prepare_operations(
report_processor,
raw_operation_list,
resource_agent.get_cib_default_actions(
necessary_only=not use_default_operations
),
[operation["name"] for operation in resource_agent.get_actions()],
allow_invalid=allow_invalid_operation,
)
report_processor.process_list(
validate_resource_instance_attributes_create(
resource_agent,
instance_attributes,
resources_section,
force=allow_invalid_instance_attributes,
)
)
return append_new(
resources_section,
id_provider,
resource_id,
resource_agent.get_standard(),
resource_agent.get_provider(),
resource_agent.get_type(),
instance_attributes=instance_attributes,
meta_attributes=meta_attributes,
operation_list=operation_list
)
def append_new(
resources_section, id_provider, resource_id, standard, provider, agent_type,
instance_attributes=None,
meta_attributes=None,
operation_list=None
):
# pylint:disable=too-many-arguments
"""
Append a new primitive element to the resources_section.
etree.Element resources_section is place where new element will be appended
IdProvider id_provider -- elements' ids generator
string resource_id is id of new resource
string standard is a standard of resource agent (e.g. ocf)
string agent_type is a type of resource agent (e.g. IPaddr2)
string provider is a provider of resource agent (e.g. heartbeat)
dict instance_attributes will be nvpairs inside instance_attributes element
dict meta_attributes will be nvpairs inside meta_attributes element
list operation_list contains dicts representing operations
(e.g. [{"name": "monitor"}, {"name": "start"}])
"""
attributes = {
"id": resource_id,
"class": standard,
"type": agent_type,
}
if provider:
attributes["provider"] = provider
primitive_element = etree.SubElement(resources_section, TAG, attributes)
if instance_attributes:
append_new_instance_attributes(
primitive_element,
instance_attributes,
id_provider
)
if meta_attributes:
append_new_meta_attributes(
primitive_element,
meta_attributes,
id_provider
)
create_operations(
primitive_element,
id_provider,
operation_list if operation_list else []
)
return primitive_element
def validate_unique_instance_attributes(
resource_agent, instance_attributes, resources_section,
resource_id=None, force=False
):
report_list = []
report_creator = reports.get_problem_creator(
report_codes.FORCE_OPTIONS, force
)
ra_unique_attributes = [
param["name"]
for param in resource_agent.get_parameters()
if param["unique"]
]
same_agent_resources = find_primitives_by_agent(
resources_section, resource_agent
)
for attr in ra_unique_attributes:
if attr not in instance_attributes:
continue
conflicting_resources = {
primitive.get("id")
for primitive in same_agent_resources
if (
primitive.get("id") != resource_id
and
instance_attributes[attr] == get_value(
"instance_attributes", primitive, attr
)
)
}
if conflicting_resources:
report_list.append(
report_creator(
reports.resource_instance_attr_value_not_unique,
attr,
instance_attributes[attr],
resource_agent.get_name(),
conflicting_resources,
)
)
return report_list
def validate_resource_instance_attributes_create(
resource_agent, instance_attributes, resources_section, force=False
):
return (
resource_agent.validate_parameters_create(
instance_attributes, force=force,
)
+
validate_unique_instance_attributes(
resource_agent, instance_attributes, resources_section, force=force
)
)
def validate_resource_instance_attributes_update(
resource_agent, instance_attributes, resource_id, resources_section,
force=False
):
return (
resource_agent.validate_parameters_update(
get_nvset_as_dict(
"instance_attributes",
find_element_by_tag_and_id(
"primitive", resources_section, resource_id
)
),
instance_attributes,
force=force,
)
+
validate_unique_instance_attributes(
resource_agent, instance_attributes, resources_section,
resource_id=resource_id, force=force,
)
)
|
gpl-2.0
|
cesargtz/YecoraOdoo
|
addons/l10n_pa/__openerp__.py
|
260
|
1737
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Cubic ERP - Teradata SAC (<http://cubicerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Panama Localization Chart Account",
"version": "1.0",
"description": """
Panamenian accounting chart and tax localization.
Plan contable panameño e impuestos de acuerdo a disposiciones vigentes
Con la Colaboración de
- AHMNET CORP http://www.ahmnet.com
""",
"author": "Cubic ERP",
"website": "http://cubicERP.com",
"category": "Localization/Account Charts",
"depends": [
"account_chart",
],
"data":[
"account_tax_code.xml",
"l10n_pa_chart.xml",
"account_tax.xml",
"l10n_pa_wizard.xml",
],
"demo_xml": [
],
"active": False,
"installable": True,
"certificate" : "",
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
shams169/pythonProject
|
ContactsDir/env/lib/python3.6/site-packages/pip/_vendor/requests/packages/chardet/gb2312prober.py
|
2994
|
1681
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import GB2312DistributionAnalysis
from .mbcssm import GB2312SMModel
class GB2312Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(GB2312SMModel)
self._mDistributionAnalyzer = GB2312DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "GB2312"
|
mit
|
teije01/Doris-StaMPS-int
|
Sentinel/BperpDate.py
|
1
|
7306
|
#!/usr/bin/env python
import numpy as np
import sys
import numpy.polynomial.polynomial as poly
from scipy import interpolate
from scipy.optimize import fmin
arg = sys.argv;
#print arg
def readorb(filename):
with open(filename, "r") as ins:
array = []
mode = "search"
for line in ins:
if mode == "search":
if line.rstrip('\n') == '*_Start_precise_orbits:':
mode = "read"
skip = 3
else:
1+1
elif mode == "read":
if skip == 0:
array.append(line.rstrip('\n'))
if array[-1] == '******************************************************************* ':
del array[-1]
mode = "stop"
elif skip > 0:
skip = skip -1
elif mode == "stop":
1+1
orbits = np.zeros((0,4))
for line in array:
arr = np.fromstring(line, dtype=float, sep=' ')
orbits = np.vstack((orbits,arr[0:5]))
return orbits
def orbit_int(orbits, method = 'poly', degree = 4):
t = orbits[:,0]
x = orbits[:,1]
y = orbits[:,2]
z = orbits[:,3]
if method == 'poly':
print("Using: poly -", degree)
x_coef = poly.polyfit(t, x, degree )
y_coef = poly.polyfit(t, y, degree )
z_coef = poly.polyfit(t, z, degree )
coef = np.vstack((x_coef, y_coef, z_coef))
return coef
elif method == 'spline':
print "Using: Spline -", degree
if (float(degree)/2.0) == int(degree/2.0):
print "It is not recommended to have an even degree for splines"
smoothing = 3.0
x_tck = interpolate.splrep(t, x, k=degree, s = smoothing)
y_tck = interpolate.splrep(t, y, k=degree, s = smoothing)
z_tck = interpolate.splrep(t, z, k=degree, s = smoothing)
coef = {'x': x_tck, 'y': y_tck, 'z': z_tck } #dictionary
return coef
else:
print "method unkown, change method"
def xyz_t(coef, t, method = 'poly'):
if method == 'poly':
x_fit = poly.polyval(t, coef[0,:])
y_fit = poly.polyval(t, coef[1,:])
z_fit = poly.polyval(t, coef[2,:])
elif method == 'spline':
x_tck = coef['x']
y_tck = coef['y']
z_tck = coef['z']
x_fit = interpolate.splev(t, x_tck)
y_fit = interpolate.splev(t, y_tck)
z_fit = interpolate.splev(t, z_tck)
else:
print "method unkown, change method"
fit = np.vstack((np.float64(x_fit), np.float64(y_fit), np.float64(z_fit)))
return fit
def SR_dist(t, coef, xyz, method = 'poly'):
#if np.shape(xyz[0:1,:]) == (1, 3):
# print('xyz in good format: xyz = ',xyz)
fit = xyz_t(coef, t, method)
fit = np.double(fit)
xyz = np.double(xyz)
dist = np.sqrt( (fit[0,:] - xyz[0][0])**2 + (fit[1,:] - xyz[0][1])**2 +
(fit[2,:] - xyz[0][2])**2)
return dist
def findtmin(xyz, tstart, coef, method = 'poly'):
L = len(xyz)
tmin = np.zeros((L,1))
for idx in range(0,L):
tmin[idx] = fmin(SR_dist, tstart, args=(coef, xyz[idx:idx+1,:], method),
xtol=1e-10, ftol=1e-10, maxiter=250, maxfun=500, disp=0)
return tmin
def Baseline(xyz_m, xyz_s):
L = len(xyz_m)
B = np.zeros((L,1))
for idx in range(0,L):
B[idx,0] = np.sqrt( (xyz_m[idx,0] - xyz_s[idx,0])**2 +
(xyz_m[idx,1] - xyz_s[idx,1])**2 +
(xyz_m[idx,2] - xyz_s[idx,2])**2 )
return B
def ParBaseline(xyz, xyz_m, xyz_s, step = 100):
L = len(xyz)
Bp = np.zeros((L,1))
for idx in range(0,L):
idxs = int(np.floor(idx/step))
dmp = np.sqrt( (xyz_m[idxs,0] - xyz[idx,0])**2 +
(xyz_m[idxs,1] - xyz[idx,1])**2 +
(xyz_m[idxs,2] - xyz[idx,2])**2 )
dsp = np.sqrt( (xyz_s[idxs,0] - xyz[idx,0])**2 +
(xyz_s[idxs,1] - xyz[idx,1])**2 +
(xyz_s[idxs,2] - xyz[idx,2])**2 )
Bp[idx,0] = dmp - dsp
return Bp
def PerBaseline(Bpar, B, sign, step = 100):
L = len(Bpar)
Bp = np.zeros((L,1))
for idx in range(0,len(Bpar)):
idxs = int(np.floor(idx/step))
Bp[idx,0] = sign * np.sqrt( np.abs(B[idxs,0]**2 - Bpar[idx,0]**2) )
return Bp
def detsign(xyz, xyz_m, xyz_s):
P = xyz[50,:]
M = xyz_m[0,:]
S = xyz_s[0,:]
r1 = M-P
r2 = S-P
a2 = np.sum(np.square(P)); a = np.sqrt(a2)
b2_1 = np.sum(np.square(r1)); b_1 = np.sqrt(b2_1)
b2_2 = np.sum(np.square(r2)); b_2 = np.sqrt(b2_2)
c2_1 = np.sum(np.square(M));
c2_2 = np.sum(np.square(S));
gam_1 = np.arccos((a2+b2_1-c2_1)/(2*a*b_1)) * 180 / np.pi
gam_2 = np.arccos((a2+b2_2-c2_2)/(2*a*b_2)) * 180 / np.pi
#print(gam_1,gam_2)
if gam_1 < gam_2:
sign = 1
elif gam_1 >= gam_2:
sign = -1
else:
print("error")
return sign
def Bhva(Bper, Bpar, theta):
theta_r = theta * np.pi / 180
Bh = (
np.multiply(Bper[:,0], np.cos(theta_r)) +
np.multiply(Bpar[:,0], np.sin(theta_r)) )
Bv = (
np.multiply(Bper[:,0], np.sin(theta_r)) -
np.multiply(Bpar[:,0], np.cos(theta_r)) )
alpha = (np.arctan2(Bv,Bh))
alpha_d = alpha * 180 / np.pi
return Bh,Bv,alpha_d
if len(arg) < 2:
print "Needs at least one argument"
print "Usage:"
print "python BperpDate.py 'Filename' method degree"
print "If method choose: 'poly' or 'spline' with an uneven degree for splines"
sys.exit("Wrong number of input arguments")
FN = arg[1]
if len(arg) == 2:
met = 'poly'
deg = 4
elif len(arg) == 3:
met = arg[2]
deg = 4
elif len(arg) == 4:
met = arg[2]
deg = float(arg[3])
else:
print "Not functional yet: more than 3 input arguments"
sys.exit("Create new functionality")
print ""
print "-- Starting python script: BperpDate.py --"
print "Reading master and slave orbits"
morbits = readorb('master.res')
sorbits = readorb('slave.res' )
print "Importing master azimuth time and data xyz points"
theta = np.loadtxt('../look_angle.1.in')
taz_m = np.loadtxt('../tmin.txt')
xyz = np.loadtxt('../xyz.txt')
print "Interpollation of master and slave orbits"
mcoef = orbit_int(morbits, method = met, degree = deg)
scoef = orbit_int(sorbits, method = met, degree = deg)
xyz_m = xyz_t(mcoef, taz_m, method = met)
xyz_m = np.transpose(xyz_m)
print "Calculating slave azimuth time"
taz_s = findtmin(xyz_m, np.mean(taz_m), scoef, method = met)
taz_s = np.transpose(taz_s)
xyz_s = xyz_t(scoef, taz_s, method = met)
xyz_s = np.transpose(xyz_s)
print "Calculating Baseline parameters:"
B = Baseline(xyz_m, xyz_s)
Bpar = ParBaseline(xyz, xyz_m, xyz_s)
sign = detsign(xyz, xyz_m, xyz_s)
Bper = PerBaseline(Bpar, B, sign)
Bh, Bv, a = Bhva(Bper, Bpar, theta)
Bpars = np.array([np.mean(B), np.mean(Bpar), np.mean(Bper),
np.mean(Bh), np.mean(Bv), np.mean(a)])
print "B : ",Bpars[0]
print "Bpar : ",Bpars[1]
print "Bper : ",Bpars[2]
print "Bh : ",Bpars[3]
print "Bv : ",Bpars[4]
print "alpha: ",Bpars[5]
print "Saving Bperp parameter file in", FN
np.savetxt( FN, Bper, fmt='%6.4f' )
np.savetxt( "Baseline.pars", Bpars, fmt='%6.6f')
print "-- Python script finished --"
print ""
|
mit
|
ARM-software/bob-build
|
config_system/tests/run_tests_formatter.py
|
1
|
2381
|
#!/usr/bin/env python
# Copyright 2019 Arm Limited.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import sys
import tempfile
# Get file directory path
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
CFG_DIR = os.path.dirname(TEST_DIR)
sys.path.append(CFG_DIR)
import mconfigfmt # nopep8: E402 module level import not at top of file
def run_test(name, expected_output):
""" Test function to verify difference between two file contents"""
passed = True
print("Running %s" % name)
tmp_file = tempfile.NamedTemporaryFile(mode="w+", delete=False)
mconfigfmt.perform_formatting(name, tmp_file.file)
tmp_file.close()
with open(tmp_file.name) as test_out, open(expected_output) as exp_out:
out_lines, exp_lines = test_out.readlines(), exp_out.readlines()
for i in range(len(exp_lines)):
if out_lines[i] != exp_lines[i]:
print("Error: Line {} differs! Expected:".format(i + 1))
print(" ", repr(exp_lines[i]))
print("...but got:")
print(" ", repr(out_lines[i]))
passed = False
os.remove(tmp_file.name)
return passed
def main():
formatter_tests = os.path.join(TEST_DIR, "formatter")
tests_passed = 0
tests_failed = 0
for fname in os.listdir(formatter_tests):
base, ext = os.path.splitext(fname)
if ext == ".test":
test = os.path.join(formatter_tests, base)
passed = run_test(test + ".test", test + ".expected")
if passed:
tests_passed += 1
else:
tests_failed += 1
print("")
print("{} tests run, {} failed".format(tests_passed + tests_failed, tests_failed))
if tests_failed > 0:
sys.exit(1)
if __name__ == '__main__':
main()
|
apache-2.0
|
PinguinoIDE/pinguino-ide
|
pinguino/qtgui/ide/methods/timed_methods.py
|
1
|
10604
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import os
import codecs
from PySide2 import QtGui
from .core_threads import UpdateAutocompleter
from ..methods.decorators import Decorator
# from ..tools.code_navigator import self
from ..methods.dialogs import Dialogs
########################################################################
class TimedMethods(object):
#----------------------------------------------------------------------
@Decorator.timer(1500)
@Decorator.requiere_open_files()
@Decorator.requiere_browser_tab("Functions")
@Decorator.requiere_tools_tab("SourceBrowser")
@Decorator.requiere_main_focus()
def timer_update_functions(self):
functions_parse = self.get_functions()
index = 0
self.main.tableWidget_functions.setRowCount(len(functions_parse))
self.completer_funtions = []
for funtion in functions_parse:
item = QtGui.QTableWidgetItem()
self.main.tableWidget_functions.setVerticalHeaderItem(index, item)
item.setText(funtion["name"])
self.completer_funtions.append(funtion["name"])
self.main.tableWidget_functions.setItem(index, 0, QtGui.QTableWidgetItem())
self.main.tableWidget_functions.setItem(index, 1, QtGui.QTableWidgetItem())
self.main.tableWidget_functions.setItem(index, 2, QtGui.QTableWidgetItem())
if funtion["filename"]:
self.main.tableWidget_functions.item(index, 0).setText(os.path.split(funtion["filename"])[1])
else:
self.main.tableWidget_functions.item(index, 0).setText(self.get_current_filename())
self.main.tableWidget_functions.item(index, 1).setText(funtion["return"])
self.main.tableWidget_functions.item(index, 2).setText(funtion["args"])
setattr(item, "filename", funtion["filename"])
setattr(item, "line", funtion["line"])
index += 1
#----------------------------------------------------------------------
@Decorator.timer(1500)
@Decorator.requiere_open_files()
@Decorator.requiere_browser_tab("Directives")
@Decorator.requiere_tools_tab("SourceBrowser")
@Decorator.requiere_main_focus()
def timer_update_directives(self):
""""""
directives_parse = self.get_directives()
index = 0
self.main.tableWidget_directives.setRowCount(len(directives_parse))
self.completer_directives = []
for directive in directives_parse:
item = QtGui.QTableWidgetItem()
self.main.tableWidget_directives.setVerticalHeaderItem(index, item)
item.setText(directive["name"])
self.completer_directives.append(directive["name"])
self.main.tableWidget_directives.setItem(index, 0, QtGui.QTableWidgetItem())
self.main.tableWidget_directives.setItem(index, 1, QtGui.QTableWidgetItem())
self.main.tableWidget_directives.setItem(index, 2, QtGui.QTableWidgetItem())
if directive["filename"]:
self.main.tableWidget_directives.item(index, 0).setText(os.path.split(directive["filename"])[1])
else:
self.main.tableWidget_directives.item(index, 0).setText(self.get_current_filename())
self.main.tableWidget_directives.item(index, 1).setText(directive["type"])
self.main.tableWidget_directives.item(index, 2).setText(directive["value"])
setattr(item, "filename", directive["filename"])
setattr(item, "line", directive["line"])
index += 1
#----------------------------------------------------------------------
@Decorator.timer(1500)
@Decorator.requiere_open_files()
@Decorator.requiere_browser_tab("Variables")
@Decorator.requiere_tools_tab("SourceBrowser")
@Decorator.requiere_main_focus()
def timer_update_variables(self):
variables_parse = self.get_variables()
index = 0
self.main.tableWidget_variables.setRowCount(len(variables_parse))
self.completer_variables = []
for variable in variables_parse:
item = QtGui.QTableWidgetItem()
self.main.tableWidget_variables.setVerticalHeaderItem(index, item)
item.setText(variable["name"])
self.completer_variables.append([variable["name"], variable["type"]])
self.main.tableWidget_variables.setItem(index, 0, QtGui.QTableWidgetItem())
self.main.tableWidget_variables.setItem(index, 1, QtGui.QTableWidgetItem())
if variable["filename"]:
self.main.tableWidget_variables.item(index, 0).setText(os.path.split(variable["filename"])[1])
else:
self.main.tableWidget_variables.item(index, 0).setText(self.get_current_filename())
self.main.tableWidget_variables.item(index, 1).setText(variable["type"])
setattr(item, "filename", variable["filename"])
setattr(item, "line", variable["line"])
index += 1
#----------------------------------------------------------------------
@Decorator.timer(3000)
@Decorator.requiere_open_files()
@Decorator.requiere_text_mode()
@Decorator.requiere_main_focus()
def timer_check_changes(self):
editor = self.get_current_editor()
filename = getattr(editor, "path", None)
if not filename:
return
if os.path.exists(filename):
file_ = codecs.open(filename, "r", encoding="utf-8")
content_file = "".join(file_.readlines())
file_.close()
exist = True
else:
content_file = ""
exist = False
last_saved = getattr(editor, "last_saved")
# if content_file != last_saved:
# self.thread_variables()
if self.get_current_filename().endswith("(r/o)"):
if content_file != last_saved:
self.editor_reload_file()
self.ide_save_file()
return
if self.is_library():
name = self.get_project_name()
if self.get_current_filename().startswith(name) and (self.get_current_filename().endswith(".c")\
or self.get_current_filename().endswith(".h")\
or self.get_current_filename().endswith(".pdl")\
or self.get_current_filename().endswith(".pdl32")):
if content_file != last_saved:
self.editor_reload_file()
self.ide_save_file()
return
if content_file != last_saved:
reload_ = Dialogs.overwrite_file(self, filename, exist)
if reload_:
self.ide_save_file()
else:
self.editor_reload_file()
#----------------------------------------------------------------------
@Decorator.timer(3000)
@Decorator.requiere_open_files()
# @Decorator.requiere_text_mode()
@Decorator.requiere_main_focus()
def timer_backup_file(self):
editor = self.get_current_editor()
# index = self.main.tabWidget_files.indexOf(editor)
# filename_tab = self.main.tabWidget_files.tabText(index)
filename = getattr(editor, "path", None)
if not filename:
return
content_saved = getattr(editor, "last_saved", None)
if self.is_graphical() is True:
content = self.PinguinoKIT.get_gpde()
self.PinguinoKIT.save_raw_parser(content, editor.path+"~")
elif self.is_graphical() is False:
content = editor.text_edit.toPlainText()
filename_backup = filename + "~"
#if os.path.exists(filename) and filename_tab.endswith("*"):
if os.path.exists(filename) and (content_saved != content):
file_ = codecs.open(filename_backup, "w", encoding="utf-8")
file_.write(content)
file_.close()
elif content_saved == content and os.path.exists(filename_backup):
os.remove(filename_backup)
#----------------------------------------------------------------------
@Decorator.timer(1000)
@Decorator.requiere_open_files()
@Decorator.requiere_tools_tab("SourceBrowser")
@Decorator.requiere_main_focus()
def timer_update_assiatant(self):
if not self.is_graphical() is None:
editor = self.get_current_editor()
if not hasattr(editor, "text_edit"): return
tc = editor.text_edit.textCursor()
tc.movePosition(tc.EndOfWord, tc.MoveAnchor)
editor.text_edit.smart_under_selection(tc)
selected = tc.selectedText()
self.update_assistant(selected)
#----------------------------------------------------------------------
@Decorator.timer(3000)
@Decorator.requiere_open_files()
@Decorator.requiere_text_mode()
@Decorator.if_autocomplete_is_enable()
@Decorator.requiere_main_focus()
def timer_update_autocompleter(self):
""""""
if not self.is_graphical() is False: return
editor = self.get_current_editor()
if (not hasattr(self, "thread_autocompleter")) and hasattr(editor.text_edit, "completer"):
self.thread_autocompleter = UpdateAutocompleter()
self.thread_autocompleter.signal_set_variables.connect(self.set_variables)
self.thread_autocompleter.signal_set_directives.connect(self.set_directives)
self.thread_autocompleter.signal_set_functions.connect(self.set_functions)
self.thread_autocompleter.signal_add_autocompleter.connect(editor.text_edit.completer.addTemporalItem)
self.thread_autocompleter.signal_rm_autocompleter.connect(editor.text_edit.completer.removeTemporalItems)
if not hasattr(self, "thread_autocompleter"):
return
if not self.thread_autocompleter.isRunning():
self.thread_autocompleter.set_files(self.get_files_to_explore())
self.thread_autocompleter.setTerminationEnabled(True)
self.thread_autocompleter.start()
##else:
##logging.debug("Working...")
##self.thread_autocompleter.terminate()
##self.thread_autocompleter.set_files(self.get_files_to_explore())
##self.thread_autocompleter.setTerminationEnabled(True)
##self.thread_autocompleter.start()
|
gpl-2.0
|
adw0rd/lettuce
|
tests/integration/lib/Django-1.3/django/contrib/sessions/backends/base.py
|
245
|
10286
|
import base64
import os
import random
import sys
import time
from datetime import datetime, timedelta
try:
import cPickle as pickle
except ImportError:
import pickle
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.utils.hashcompat import md5_constructor
from django.utils.crypto import constant_time_compare, salted_hmac
# Use the system (hardware-based) random number generator if it exists.
if hasattr(random, 'SystemRandom'):
randrange = random.SystemRandom().randrange
else:
randrange = random.randrange
MAX_SESSION_KEY = 18446744073709551616L # 2 << 63
class CreateError(Exception):
"""
Used internally as a consistent exception type to catch from save (see the
docstring for SessionBase.save() for details).
"""
pass
class SessionBase(object):
"""
Base class for all Session classes.
"""
TEST_COOKIE_NAME = 'testcookie'
TEST_COOKIE_VALUE = 'worked'
def __init__(self, session_key=None):
self._session_key = session_key
self.accessed = False
self.modified = False
def __contains__(self, key):
return key in self._session
def __getitem__(self, key):
return self._session[key]
def __setitem__(self, key, value):
self._session[key] = value
self.modified = True
def __delitem__(self, key):
del self._session[key]
self.modified = True
def keys(self):
return self._session.keys()
def items(self):
return self._session.items()
def get(self, key, default=None):
return self._session.get(key, default)
def pop(self, key, *args):
self.modified = self.modified or key in self._session
return self._session.pop(key, *args)
def setdefault(self, key, value):
if key in self._session:
return self._session[key]
else:
self.modified = True
self._session[key] = value
return value
def set_test_cookie(self):
self[self.TEST_COOKIE_NAME] = self.TEST_COOKIE_VALUE
def test_cookie_worked(self):
return self.get(self.TEST_COOKIE_NAME) == self.TEST_COOKIE_VALUE
def delete_test_cookie(self):
del self[self.TEST_COOKIE_NAME]
def _hash(self, value):
key_salt = "django.contrib.sessions" + self.__class__.__name__
return salted_hmac(key_salt, value).hexdigest()
def encode(self, session_dict):
"Returns the given session dictionary pickled and encoded as a string."
pickled = pickle.dumps(session_dict, pickle.HIGHEST_PROTOCOL)
hash = self._hash(pickled)
return base64.encodestring(hash + ":" + pickled)
def decode(self, session_data):
encoded_data = base64.decodestring(session_data)
try:
# could produce ValueError if there is no ':'
hash, pickled = encoded_data.split(':', 1)
expected_hash = self._hash(pickled)
if not constant_time_compare(hash, expected_hash):
raise SuspiciousOperation("Session data corrupted")
else:
return pickle.loads(pickled)
except Exception:
# ValueError, SuspiciousOperation, unpickling exceptions
# Fall back to Django 1.2 method
# PendingDeprecationWarning <- here to remind us to
# remove this fallback in Django 1.5
try:
return self._decode_old(session_data)
except Exception:
# Unpickling can cause a variety of exceptions. If something happens,
# just return an empty dictionary (an empty session).
return {}
def _decode_old(self, session_data):
encoded_data = base64.decodestring(session_data)
pickled, tamper_check = encoded_data[:-32], encoded_data[-32:]
if not constant_time_compare(md5_constructor(pickled + settings.SECRET_KEY).hexdigest(),
tamper_check):
raise SuspiciousOperation("User tampered with session cookie.")
return pickle.loads(pickled)
def update(self, dict_):
self._session.update(dict_)
self.modified = True
def has_key(self, key):
return self._session.has_key(key)
def values(self):
return self._session.values()
def iterkeys(self):
return self._session.iterkeys()
def itervalues(self):
return self._session.itervalues()
def iteritems(self):
return self._session.iteritems()
def clear(self):
# To avoid unnecessary persistent storage accesses, we set up the
# internals directly (loading data wastes time, since we are going to
# set it to an empty dict anyway).
self._session_cache = {}
self.accessed = True
self.modified = True
def _get_new_session_key(self):
"Returns session key that isn't being used."
# The random module is seeded when this Apache child is created.
# Use settings.SECRET_KEY as added salt.
try:
pid = os.getpid()
except AttributeError:
# No getpid() in Jython, for example
pid = 1
while 1:
session_key = md5_constructor("%s%s%s%s"
% (randrange(0, MAX_SESSION_KEY), pid, time.time(),
settings.SECRET_KEY)).hexdigest()
if not self.exists(session_key):
break
return session_key
def _get_session_key(self):
if self._session_key:
return self._session_key
else:
self._session_key = self._get_new_session_key()
return self._session_key
def _set_session_key(self, session_key):
self._session_key = session_key
session_key = property(_get_session_key, _set_session_key)
def _get_session(self, no_load=False):
"""
Lazily loads session from storage (unless "no_load" is True, when only
an empty dict is stored) and stores it in the current instance.
"""
self.accessed = True
try:
return self._session_cache
except AttributeError:
if self._session_key is None or no_load:
self._session_cache = {}
else:
self._session_cache = self.load()
return self._session_cache
_session = property(_get_session)
def get_expiry_age(self):
"""Get the number of seconds until the session expires."""
expiry = self.get('_session_expiry')
if not expiry: # Checks both None and 0 cases
return settings.SESSION_COOKIE_AGE
if not isinstance(expiry, datetime):
return expiry
delta = expiry - datetime.now()
return delta.days * 86400 + delta.seconds
def get_expiry_date(self):
"""Get session the expiry date (as a datetime object)."""
expiry = self.get('_session_expiry')
if isinstance(expiry, datetime):
return expiry
if not expiry: # Checks both None and 0 cases
expiry = settings.SESSION_COOKIE_AGE
return datetime.now() + timedelta(seconds=expiry)
def set_expiry(self, value):
"""
Sets a custom expiration for the session. ``value`` can be an integer,
a Python ``datetime`` or ``timedelta`` object or ``None``.
If ``value`` is an integer, the session will expire after that many
seconds of inactivity. If set to ``0`` then the session will expire on
browser close.
If ``value`` is a ``datetime`` or ``timedelta`` object, the session
will expire at that specific future time.
If ``value`` is ``None``, the session uses the global session expiry
policy.
"""
if value is None:
# Remove any custom expiration for this session.
try:
del self['_session_expiry']
except KeyError:
pass
return
if isinstance(value, timedelta):
value = datetime.now() + value
self['_session_expiry'] = value
def get_expire_at_browser_close(self):
"""
Returns ``True`` if the session is set to expire when the browser
closes, and ``False`` if there's an expiry date. Use
``get_expiry_date()`` or ``get_expiry_age()`` to find the actual expiry
date/age, if there is one.
"""
if self.get('_session_expiry') is None:
return settings.SESSION_EXPIRE_AT_BROWSER_CLOSE
return self.get('_session_expiry') == 0
def flush(self):
"""
Removes the current session data from the database and regenerates the
key.
"""
self.clear()
self.delete()
self.create()
def cycle_key(self):
"""
Creates a new session key, whilst retaining the current session data.
"""
data = self._session_cache
key = self.session_key
self.create()
self._session_cache = data
self.delete(key)
# Methods that child classes must implement.
def exists(self, session_key):
"""
Returns True if the given session_key already exists.
"""
raise NotImplementedError
def create(self):
"""
Creates a new session instance. Guaranteed to create a new object with
a unique key and will have saved the result once (with empty data)
before the method returns.
"""
raise NotImplementedError
def save(self, must_create=False):
"""
Saves the session data. If 'must_create' is True, a new session object
is created (otherwise a CreateError exception is raised). Otherwise,
save() can update an existing object with the same key.
"""
raise NotImplementedError
def delete(self, session_key=None):
"""
Deletes the session data under this key. If the key is None, the
current session key value is used.
"""
raise NotImplementedError
def load(self):
"""
Loads the session data and returns a dictionary.
"""
raise NotImplementedError
|
gpl-3.0
|
hickey/amforth
|
core/devices/at90can64/device.py
|
5
|
9972
|
# Partname: AT90CAN64
# generated automatically, do not edit
MCUREGS = {
'PORTA': '&34',
'DDRA': '&33',
'PINA': '&32',
'PORTB': '&37',
'DDRB': '&36',
'PINB': '&35',
'PORTC': '&40',
'DDRC': '&39',
'PINC': '&38',
'PORTD': '&43',
'DDRD': '&42',
'PIND': '&41',
'PORTE': '&46',
'DDRE': '&45',
'PINE': '&44',
'PORTF': '&49',
'DDRF': '&48',
'PINF': '&47',
'OCDR': '&81',
'MCUCR': '&85',
'MCUCR_JTD': '$80',
'MCUSR': '&84',
'MCUSR_JTRF': '$10',
'SPCR': '&76',
'SPCR_SPIE': '$80',
'SPCR_SPE': '$40',
'SPCR_DORD': '$20',
'SPCR_MSTR': '$10',
'SPCR_CPOL': '$08',
'SPCR_CPHA': '$04',
'SPCR_SPR': '$03',
'SPSR': '&77',
'SPSR_SPIF': '$80',
'SPSR_WCOL': '$40',
'SPSR_SPI2X': '$01',
'SPDR': '&78',
'TWBR': '&184',
'TWCR': '&188',
'TWCR_TWINT': '$80',
'TWCR_TWEA': '$40',
'TWCR_TWSTA': '$20',
'TWCR_TWSTO': '$10',
'TWCR_TWWC': '$08',
'TWCR_TWEN': '$04',
'TWCR_TWIE': '$01',
'TWSR': '&185',
'TWSR_TWS': '$F8',
'TWSR_TWPS': '$03',
'TWDR': '&187',
'TWAR': '&186',
'TWAR_TWA': '$FE',
'TWAR_TWGCE': '$01',
'UDR0': '&198',
'UCSR0A': '&192',
'UCSR0A_RXC0': '$80',
'UCSR0A_TXC0': '$40',
'UCSR0A_UDRE0': '$20',
'UCSR0A_FE0': '$10',
'UCSR0A_DOR0': '$08',
'UCSR0A_UPE0': '$04',
'UCSR0A_U2X0': '$02',
'UCSR0A_MPCM0': '$01',
'UCSR0B': '&193',
'UCSR0B_RXCIE0': '$80',
'UCSR0B_TXCIE0': '$40',
'UCSR0B_UDRIE0': '$20',
'UCSR0B_RXEN0': '$10',
'UCSR0B_TXEN0': '$08',
'UCSR0B_UCSZ02': '$04',
'UCSR0B_RXB80': '$02',
'UCSR0B_TXB80': '$01',
'UCSR0C': '&194',
'UCSR0C_UMSEL0': '$40',
'UCSR0C_UPM0': '$30',
'UCSR0C_USBS0': '$08',
'UCSR0C_UCSZ0': '$06',
'UCSR0C_UCPOL0': '$01',
'UBRR0': '&196',
'UDR1': '&206',
'UCSR1A': '&200',
'UCSR1A_RXC1': '$80',
'UCSR1A_TXC1': '$40',
'UCSR1A_UDRE1': '$20',
'UCSR1A_FE1': '$10',
'UCSR1A_DOR1': '$08',
'UCSR1A_UPE1': '$04',
'UCSR1A_U2X1': '$02',
'UCSR1A_MPCM1': '$01',
'UCSR1B': '&201',
'UCSR1B_RXCIE1': '$80',
'UCSR1B_TXCIE1': '$40',
'UCSR1B_UDRIE1': '$20',
'UCSR1B_RXEN1': '$10',
'UCSR1B_TXEN1': '$08',
'UCSR1B_UCSZ12': '$04',
'UCSR1B_RXB81': '$02',
'UCSR1B_TXB81': '$01',
'UCSR1C': '&202',
'UCSR1C_UMSEL1': '$40',
'UCSR1C_UPM1': '$30',
'UCSR1C_USBS1': '$08',
'UCSR1C_UCSZ1': '$06',
'UCSR1C_UCPOL1': '$01',
'UBRR1': '&204',
'SREG': '&95',
'SREG_I': '$80',
'SREG_T': '$40',
'SREG_H': '$20',
'SREG_S': '$10',
'SREG_V': '$08',
'SREG_N': '$04',
'SREG_Z': '$02',
'SREG_C': '$01',
'SP': '&93',
'XMCRA': '&116',
'XMCRA_SRE': '$80',
'XMCRA_SRL': '$70',
'XMCRA_SRW1': '$0C',
'XMCRA_SRW0': '$03',
'XMCRB': '&117',
'XMCRB_XMBK': '$80',
'XMCRB_XMM': '$07',
'OSCCAL': '&102',
'CLKPR': '&97',
'CLKPR_CLKPCE': '$80',
'CLKPR_CLKPS': '$0F',
'SMCR': '&83',
'SMCR_SM': '$0E',
'SMCR_SE': '$01',
'RAMPZ': '&91',
'RAMPZ_RAMPZ0': '$01',
'GPIOR2': '&75',
'GPIOR2_GPIOR': '$FF',
'GPIOR1': '&74',
'GPIOR1_GPIOR': '$FF',
'GPIOR0': '&62',
'GPIOR0_GPIOR07': '$80',
'GPIOR0_GPIOR06': '$40',
'GPIOR0_GPIOR05': '$20',
'GPIOR0_GPIOR04': '$10',
'GPIOR0_GPIOR03': '$08',
'GPIOR0_GPIOR02': '$04',
'GPIOR0_GPIOR01': '$02',
'GPIOR0_GPIOR00': '$01',
'SPMCSR': '&87',
'SPMCSR_SPMIE': '$80',
'SPMCSR_RWWSB': '$40',
'SPMCSR_RWWSRE': '$10',
'SPMCSR_BLBSET': '$08',
'SPMCSR_PGWRT': '$04',
'SPMCSR_PGERS': '$02',
'SPMCSR_SPMEN': '$01',
'EICRA': '&105',
'EICRA_ISC3': '$C0',
'EICRA_ISC2': '$30',
'EICRA_ISC1': '$0C',
'EICRA_ISC0': '$03',
'EICRB': '&106',
'EICRB_ISC7': '$C0',
'EICRB_ISC6': '$30',
'EICRB_ISC5': '$0C',
'EICRB_ISC4': '$03',
'EIMSK': '&61',
'EIMSK_INT': '$FF',
'EIFR': '&60',
'EIFR_INTF': '$FF',
'EEAR': '&65',
'EEDR': '&64',
'EECR': '&63',
'EECR_EERIE': '$08',
'EECR_EEMWE': '$04',
'EECR_EEWE': '$02',
'EECR_EERE': '$01',
'PORTG': '&52',
'DDRG': '&51',
'PING': '&50',
'TCCR0A': '&68',
'TCCR0A_FOC0A': '$80',
'TCCR0A_WGM00': '$40',
'TCCR0A_COM0A': '$30',
'TCCR0A_WGM01': '$08',
'TCCR0A_CS0': '$07',
'TCNT0': '&70',
'OCR0A': '&71',
'TIMSK0': '&110',
'TIMSK0_OCIE0A': '$02',
'TIMSK0_TOIE0': '$01',
'TIFR0': '&53',
'TIFR0_OCF0A': '$02',
'TIFR0_TOV0': '$01',
'GTCCR': '&67',
'GTCCR_TSM': '$80',
'GTCCR_PSR310': '$01',
'TCCR1A': '&128',
'TCCR1A_COM1A': '$C0',
'TCCR1A_COM1B': '$30',
'TCCR1A_COM1C': '$0C',
'TCCR1A_WGM1': '$03',
'TCCR1B': '&129',
'TCCR1B_ICNC1': '$80',
'TCCR1B_ICES1': '$40',
'TCCR1B_WGM1': '$18',
'TCCR1B_CS1': '$07',
'TCCR1C': '&130',
'TCCR1C_FOC1A': '$80',
'TCCR1C_FOC1B': '$40',
'TCCR1C_FOC1C': '$20',
'TCNT1': '&132',
'OCR1A': '&136',
'OCR1B': '&138',
'OCR1C': '&140',
'ICR1': '&134',
'TIMSK1': '&111',
'TIMSK1_ICIE1': '$20',
'TIMSK1_OCIE1C': '$08',
'TIMSK1_OCIE1B': '$04',
'TIMSK1_OCIE1A': '$02',
'TIMSK1_TOIE1': '$01',
'TIFR1': '&54',
'TIFR1_ICF1': '$20',
'TIFR1_OCF1C': '$08',
'TIFR1_OCF1B': '$04',
'TIFR1_OCF1A': '$02',
'TIFR1_TOV1': '$01',
'TCCR3A': '&144',
'TCCR3A_COM3A': '$C0',
'TCCR3A_COM3B': '$30',
'TCCR3A_COM3C': '$0C',
'TCCR3A_WGM3': '$03',
'TCCR3B': '&145',
'TCCR3B_ICNC3': '$80',
'TCCR3B_ICES3': '$40',
'TCCR3B_WGM3': '$18',
'TCCR3B_CS3': '$07',
'TCCR3C': '&146',
'TCCR3C_FOC3A': '$80',
'TCCR3C_FOC3B': '$40',
'TCCR3C_FOC3C': '$20',
'TCNT3': '&148',
'OCR3A': '&152',
'OCR3B': '&154',
'OCR3C': '&156',
'ICR3': '&150',
'TIMSK3': '&113',
'TIMSK3_ICIE3': '$20',
'TIMSK3_OCIE3C': '$08',
'TIMSK3_OCIE3B': '$04',
'TIMSK3_OCIE3A': '$02',
'TIMSK3_TOIE3': '$01',
'TIFR3': '&56',
'TIFR3_ICF3': '$20',
'TIFR3_OCF3C': '$08',
'TIFR3_OCF3B': '$04',
'TIFR3_OCF3A': '$02',
'TIFR3_TOV3': '$01',
'TCCR2': '&176',
'TCCR2_FOC2A': '$80',
'TCCR2_WGM20': '$40',
'TCCR2_COM2A': '$30',
'TCCR2_WGM21': '$08',
'TCCR2_CS2': '$07',
'TCNT2': '&178',
'OCR2A': '&179',
'TIMSK2': '&112',
'TIMSK2_OCIE2A': '$02',
'TIMSK2_TOIE2': '$01',
'TIFR2': '&55',
'TIFR2_OCF2A': '$02',
'TIFR2_TOV2': '$01',
'ASSR': '&182',
'ASSR_EXCLK': '$10',
'ASSR_AS2': '$08',
'ASSR_TCN2UB': '$04',
'ASSR_OCR2UB': '$02',
'ASSR_TCR2UB': '$01',
'WDTCR': '&96',
'WDTCR_WDCE': '$10',
'WDTCR_WDE': '$08',
'WDTCR_WDP': '$07',
'ADMUX': '&124',
'ADMUX_REFS': '$C0',
'ADMUX_ADLAR': '$20',
'ADMUX_MUX': '$1F',
'ADCSRA': '&122',
'ADCSRA_ADEN': '$80',
'ADCSRA_ADSC': '$40',
'ADCSRA_ADATE': '$20',
'ADCSRA_ADIF': '$10',
'ADCSRA_ADIE': '$08',
'ADCSRA_ADPS': '$07',
'ADC': '&120',
'ADCSRB': '&123',
'ADCSRB_ADHSM': '$80',
'ADCSRB_ADTS': '$07',
'DIDR0': '&126',
'DIDR0_ADC7D': '$80',
'DIDR0_ADC6D': '$40',
'DIDR0_ADC5D': '$20',
'DIDR0_ADC4D': '$10',
'DIDR0_ADC3D': '$08',
'DIDR0_ADC2D': '$04',
'DIDR0_ADC1D': '$02',
'DIDR0_ADC0D': '$01',
'ACSR': '&80',
'ACSR_ACD': '$80',
'ACSR_ACBG': '$40',
'ACSR_ACO': '$20',
'ACSR_ACI': '$10',
'ACSR_ACIE': '$08',
'ACSR_ACIC': '$04',
'ACSR_ACIS': '$03',
'DIDR1': '&127',
'DIDR1_AIN1D': '$02',
'DIDR1_AIN0D': '$01',
'CANGCON': '&216',
'CANGCON_ABRQ': '$80',
'CANGCON_OVRQ': '$40',
'CANGCON_TTC': '$20',
'CANGCON_SYNTTC': '$10',
'CANGCON_LISTEN': '$08',
'CANGCON_TEST': '$04',
'CANGCON_ENASTB': '$02',
'CANGCON_SWRES': '$01',
'CANGSTA': '&217',
'CANGSTA_OVRG': '$40',
'CANGSTA_TXBSY': '$10',
'CANGSTA_RXBSY': '$08',
'CANGSTA_ENFG': '$04',
'CANGSTA_BOFF': '$02',
'CANGSTA_ERRP': '$01',
'CANGIT': '&218',
'CANGIT_CANIT': '$80',
'CANGIT_BOFFIT': '$40',
'CANGIT_OVRTIM': '$20',
'CANGIT_BXOK': '$10',
'CANGIT_SERG': '$08',
'CANGIT_CERG': '$04',
'CANGIT_FERG': '$02',
'CANGIT_AERG': '$01',
'CANGIE': '&219',
'CANGIE_ENIT': '$80',
'CANGIE_ENBOFF': '$40',
'CANGIE_ENRX': '$20',
'CANGIE_ENTX': '$10',
'CANGIE_ENERR': '$08',
'CANGIE_ENBX': '$04',
'CANGIE_ENERG': '$02',
'CANGIE_ENOVRT': '$01',
'CANEN2': '&220',
'CANEN1': '&221',
'CANIE2': '&222',
'CANIE1': '&223',
'CANSIT2': '&224',
'CANSIT1': '&225',
'CANBT1': '&226',
'CANBT1_BRP': '$7E',
'CANBT2': '&227',
'CANBT2_SJW': '$60',
'CANBT2_PRS': '$0E',
'CANBT3': '&228',
'CANBT3_PHS2': '$70',
'CANBT3_PHS1': '$0E',
'CANBT3_SMP': '$01',
'CANTCON': '&229',
'CANTIML': '&230',
'CANTIMH': '&231',
'CANTTCL': '&232',
'CANTTCH': '&233',
'CANTEC': '&234',
'CANREC': '&235',
'CANHPMOB': '&236',
'CANPAGE': '&237',
'CANPAGE_MOBNB': '$F0',
'CANPAGE_AINC': '$08',
'CANPAGE_INDX': '$07',
'CANSTMOB': '&238',
'CANSTMOB_DLCW': '$80',
'CANSTMOB_TXOK': '$40',
'CANSTMOB_RXOK': '$20',
'CANSTMOB_BERR': '$10',
'CANSTMOB_SERR': '$08',
'CANSTMOB_CERR': '$04',
'CANSTMOB_FERR': '$02',
'CANSTMOB_AERR': '$01',
'CANCDMOB': '&239',
'CANCDMOB_CONMOB': '$C0',
'CANCDMOB_RPLV': '$20',
'CANCDMOB_IDE': '$10',
'CANCDMOB_DLC': '$0F',
'CANIDT4': '&240',
'CANIDT3': '&241',
'CANIDT2': '&242',
'CANIDT1': '&243',
'CANIDM4': '&244',
'CANIDM3': '&245',
'CANIDM2': '&246',
'CANIDM1': '&247',
'CANSTML': '&248',
'CANSTMH': '&249',
'CANMSG': '&250',
'INT0Addr': '2',
'INT1Addr': '4',
'INT2Addr': '6',
'INT3Addr': '8',
'INT4Addr': '10',
'INT5Addr': '12',
'INT6Addr': '14',
'INT7Addr': '16',
'TIMER2_COMPAddr': '18',
'TIMER2_OVFAddr': '20',
'TIMER1_CAPTAddr': '22',
'TIMER1_COMPAAddr': '24',
'TIMER1_COMPBAddr': '26',
'TIMER1_COMPCAddr': '28',
'TIMER1_OVFAddr': '30',
'TIMER0_COMPAddr': '32',
'TIMER0_OVFAddr': '34',
'CANITAddr': '36',
'OVRITAddr': '38',
'SPI__STCAddr': '40',
'USART0__RXAddr': '42',
'USART0__UDREAddr': '44',
'USART0__TXAddr': '46',
'ANALOG_COMPAddr': '48',
'ADCAddr': '50',
'EE_READYAddr': '52',
'TIMER3_CAPTAddr': '54',
'TIMER3_COMPAAddr': '56',
'TIMER3_COMPBAddr': '58',
'TIMER3_COMPCAddr': '60',
'TIMER3_OVFAddr': '62',
'USART1__RXAddr': '64',
'USART1__UDREAddr': '66',
'USART1__TXAddr': '68',
'TWIAddr': '70',
'SPM_READYAddr': '72'
}
|
gpl-2.0
|
ct-23/home-assistant
|
homeassistant/components/sensor/google_travel_time.py
|
3
|
9936
|
"""
Support for Google travel time sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.google_travel_time/
"""
from datetime import datetime
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.helpers.entity import Entity
from homeassistant.const import (
CONF_API_KEY, CONF_NAME, EVENT_HOMEASSISTANT_START, ATTR_LATITUDE,
ATTR_LONGITUDE)
from homeassistant.util import Throttle
import homeassistant.helpers.config_validation as cv
import homeassistant.helpers.location as location
import homeassistant.util.dt as dt_util
REQUIREMENTS = ['googlemaps==2.4.6']
_LOGGER = logging.getLogger(__name__)
CONF_DESTINATION = 'destination'
CONF_MODE = 'mode'
CONF_OPTIONS = 'options'
CONF_ORIGIN = 'origin'
CONF_TRAVEL_MODE = 'travel_mode'
DEFAULT_NAME = 'Google Travel Time'
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=5)
ALL_LANGUAGES = ['ar', 'bg', 'bn', 'ca', 'cs', 'da', 'de', 'el', 'en', 'es',
'eu', 'fa', 'fi', 'fr', 'gl', 'gu', 'hi', 'hr', 'hu', 'id',
'it', 'iw', 'ja', 'kn', 'ko', 'lt', 'lv', 'ml', 'mr', 'nl',
'no', 'pl', 'pt', 'pt-BR', 'pt-PT', 'ro', 'ru', 'sk', 'sl',
'sr', 'sv', 'ta', 'te', 'th', 'tl', 'tr', 'uk', 'vi',
'zh-CN', 'zh-TW']
AVOID = ['tolls', 'highways', 'ferries', 'indoor']
TRANSIT_PREFS = ['less_walking', 'fewer_transfers']
TRANSPORT_TYPE = ['bus', 'subway', 'train', 'tram', 'rail']
TRAVEL_MODE = ['driving', 'walking', 'bicycling', 'transit']
TRAVEL_MODEL = ['best_guess', 'pessimistic', 'optimistic']
UNITS = ['metric', 'imperial']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_DESTINATION): cv.string,
vol.Required(CONF_ORIGIN): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_TRAVEL_MODE): vol.In(TRAVEL_MODE),
vol.Optional(CONF_OPTIONS, default={CONF_MODE: 'driving'}): vol.All(
dict, vol.Schema({
vol.Optional(CONF_MODE, default='driving'): vol.In(TRAVEL_MODE),
vol.Optional('language'): vol.In(ALL_LANGUAGES),
vol.Optional('avoid'): vol.In(AVOID),
vol.Optional('units'): vol.In(UNITS),
vol.Exclusive('arrival_time', 'time'): cv.string,
vol.Exclusive('departure_time', 'time'): cv.string,
vol.Optional('traffic_model'): vol.In(TRAVEL_MODEL),
vol.Optional('transit_mode'): vol.In(TRANSPORT_TYPE),
vol.Optional('transit_routing_preference'): vol.In(TRANSIT_PREFS)
}))
})
TRACKABLE_DOMAINS = ['device_tracker', 'sensor', 'zone']
def convert_time_to_utc(timestr):
"""Take a string like 08:00:00 and convert it to a unix timestamp."""
combined = datetime.combine(
dt_util.start_of_local_day(), dt_util.parse_time(timestr))
if combined < datetime.now():
combined = combined + timedelta(days=1)
return dt_util.as_timestamp(combined)
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
"""Set up the Google travel time platform."""
def run_setup(event):
"""Delay the setup until Home Assistant is fully initialized.
This allows any entities to be created already
"""
options = config.get(CONF_OPTIONS)
if options.get('units') is None:
options['units'] = hass.config.units.name
travel_mode = config.get(CONF_TRAVEL_MODE)
mode = options.get(CONF_MODE)
if travel_mode is not None:
wstr = ("Google Travel Time: travel_mode is deprecated, please "
"add mode to the options dictionary instead!")
_LOGGER.warning(wstr)
if mode is None:
options[CONF_MODE] = travel_mode
titled_mode = options.get(CONF_MODE).title()
formatted_name = "{} - {}".format(DEFAULT_NAME, titled_mode)
name = config.get(CONF_NAME, formatted_name)
api_key = config.get(CONF_API_KEY)
origin = config.get(CONF_ORIGIN)
destination = config.get(CONF_DESTINATION)
sensor = GoogleTravelTimeSensor(
hass, name, api_key, origin, destination, options)
if sensor.valid_api_connection:
add_devices_callback([sensor])
# Wait until start event is sent to load this component.
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, run_setup)
class GoogleTravelTimeSensor(Entity):
"""Representation of a Google travel time sensor."""
def __init__(self, hass, name, api_key, origin, destination, options):
"""Initialize the sensor."""
self._hass = hass
self._name = name
self._options = options
self._unit_of_measurement = 'min'
self._matrix = None
self.valid_api_connection = True
# Check if location is a trackable entity
if origin.split('.', 1)[0] in TRACKABLE_DOMAINS:
self._origin_entity_id = origin
else:
self._origin = origin
if destination.split('.', 1)[0] in TRACKABLE_DOMAINS:
self._destination_entity_id = destination
else:
self._destination = destination
import googlemaps
self._client = googlemaps.Client(api_key, timeout=10)
try:
self.update()
except googlemaps.exceptions.ApiError as exp:
_LOGGER .error(exp)
self.valid_api_connection = False
return
@property
def state(self):
"""Return the state of the sensor."""
if self._matrix is None:
return None
_data = self._matrix['rows'][0]['elements'][0]
if 'duration_in_traffic' in _data:
return round(_data['duration_in_traffic']['value']/60)
if 'duration' in _data:
return round(_data['duration']['value']/60)
return None
@property
def name(self):
"""Get the name of the sensor."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self._matrix is None:
return None
res = self._matrix.copy()
res.update(self._options)
del res['rows']
_data = self._matrix['rows'][0]['elements'][0]
if 'duration_in_traffic' in _data:
res['duration_in_traffic'] = _data['duration_in_traffic']['text']
if 'duration' in _data:
res['duration'] = _data['duration']['text']
if 'distance' in _data:
res['distance'] = _data['distance']['text']
return res
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from Google."""
options_copy = self._options.copy()
dtime = options_copy.get('departure_time')
atime = options_copy.get('arrival_time')
if dtime is not None and ':' in dtime:
options_copy['departure_time'] = convert_time_to_utc(dtime)
elif dtime is not None:
options_copy['departure_time'] = dtime
elif atime is None:
options_copy['departure_time'] = 'now'
if atime is not None and ':' in atime:
options_copy['arrival_time'] = convert_time_to_utc(atime)
elif atime is not None:
options_copy['arrival_time'] = atime
# Convert device_trackers to google friendly location
if hasattr(self, '_origin_entity_id'):
self._origin = self._get_location_from_entity(
self._origin_entity_id
)
if hasattr(self, '_destination_entity_id'):
self._destination = self._get_location_from_entity(
self._destination_entity_id
)
self._destination = self._resolve_zone(self._destination)
self._origin = self._resolve_zone(self._origin)
if self._destination is not None and self._origin is not None:
self._matrix = self._client.distance_matrix(
self._origin, self._destination, **options_copy)
def _get_location_from_entity(self, entity_id):
"""Get the location from the entity state or attributes."""
entity = self._hass.states.get(entity_id)
if entity is None:
_LOGGER.error("Unable to find entity %s", entity_id)
self.valid_api_connection = False
return None
# Check if the entity has location attributes
if location.has_location(entity):
return self._get_location_from_attributes(entity)
# Check if device is in a zone
zone_entity = self._hass.states.get("zone.%s" % entity.state)
if location.has_location(zone_entity):
_LOGGER.debug(
"%s is in %s, getting zone location",
entity_id, zone_entity.entity_id
)
return self._get_location_from_attributes(zone_entity)
# If zone was not found in state then use the state as the location
if entity_id.startswith("sensor."):
return entity.state
# When everything fails just return nothing
return None
@staticmethod
def _get_location_from_attributes(entity):
"""Get the lat/long string from an entities attributes."""
attr = entity.attributes
return "%s,%s" % (attr.get(ATTR_LATITUDE), attr.get(ATTR_LONGITUDE))
def _resolve_zone(self, friendly_name):
entities = self._hass.states.all()
for entity in entities:
if entity.domain == 'zone' and entity.name == friendly_name:
return self._get_location_from_attributes(entity)
return friendly_name
|
apache-2.0
|
fbradyirl/home-assistant
|
script/hassfest/__main__.py
|
1
|
2256
|
"""Validate manifests."""
import pathlib
import sys
from .model import Integration, Config
from . import codeowners, config_flow, dependencies, manifest, services, ssdp, zeroconf
PLUGINS = [codeowners, config_flow, dependencies, manifest, services, ssdp, zeroconf]
def get_config() -> Config:
"""Return config."""
if not pathlib.Path("requirements_all.txt").is_file():
raise RuntimeError("Run from project root")
return Config(
root=pathlib.Path(".").absolute(),
action="validate" if sys.argv[-1] == "validate" else "generate",
)
def main():
"""Validate manifests."""
try:
config = get_config()
except RuntimeError as err:
print(err)
return 1
integrations = Integration.load_dir(pathlib.Path("homeassistant/components"))
for plugin in PLUGINS:
plugin.validate(integrations, config)
# When we generate, all errors that are fixable will be ignored,
# as generating them will be fixed.
if config.action == "generate":
general_errors = [err for err in config.errors if not err.fixable]
invalid_itg = [
itg
for itg in integrations.values()
if any(not error.fixable for error in itg.errors)
]
else:
# action == validate
general_errors = config.errors
invalid_itg = [itg for itg in integrations.values() if itg.errors]
print("Integrations:", len(integrations))
print("Invalid integrations:", len(invalid_itg))
if not invalid_itg and not general_errors:
for plugin in PLUGINS:
if hasattr(plugin, "generate"):
plugin.generate(integrations, config)
return 0
print()
if config.action == "generate":
print("Found errors. Generating files canceled.")
print()
if general_errors:
print("General errors:")
for error in general_errors:
print("*", error)
print()
for integration in sorted(invalid_itg, key=lambda itg: itg.domain):
print("Integration {}:".format(integration.domain))
for error in integration.errors:
print("*", error)
print()
return 1
if __name__ == "__main__":
sys.exit(main())
|
apache-2.0
|
nikhilprathapani/python-for-android
|
python3-alpha/python3-src/Lib/test/test_tempfile.py
|
46
|
35200
|
# tempfile.py unit tests.
import tempfile
import os
import sys
import re
import warnings
import unittest
from test import support
if hasattr(os, 'stat'):
import stat
has_stat = 1
else:
has_stat = 0
has_textmode = (tempfile._text_openflags != tempfile._bin_openflags)
has_spawnl = hasattr(os, 'spawnl')
# TEST_FILES may need to be tweaked for systems depending on the maximum
# number of files that can be opened at one time (see ulimit -n)
if sys.platform in ('openbsd3', 'openbsd4'):
TEST_FILES = 48
else:
TEST_FILES = 100
# This is organized as one test for each chunk of code in tempfile.py,
# in order of their appearance in the file. Testing which requires
# threads is not done here.
# Common functionality.
class TC(unittest.TestCase):
str_check = re.compile(r"[a-zA-Z0-9_-]{6}$")
def setUp(self):
self._warnings_manager = support.check_warnings()
self._warnings_manager.__enter__()
warnings.filterwarnings("ignore", category=RuntimeWarning,
message="mktemp", module=__name__)
def tearDown(self):
self._warnings_manager.__exit__(None, None, None)
def failOnException(self, what, ei=None):
if ei is None:
ei = sys.exc_info()
self.fail("%s raised %s: %s" % (what, ei[0], ei[1]))
def nameCheck(self, name, dir, pre, suf):
(ndir, nbase) = os.path.split(name)
npre = nbase[:len(pre)]
nsuf = nbase[len(nbase)-len(suf):]
# check for equality of the absolute paths!
self.assertEqual(os.path.abspath(ndir), os.path.abspath(dir),
"file '%s' not in directory '%s'" % (name, dir))
self.assertEqual(npre, pre,
"file '%s' does not begin with '%s'" % (nbase, pre))
self.assertEqual(nsuf, suf,
"file '%s' does not end with '%s'" % (nbase, suf))
nbase = nbase[len(pre):len(nbase)-len(suf)]
self.assertTrue(self.str_check.match(nbase),
"random string '%s' does not match /^[a-zA-Z0-9_-]{6}$/"
% nbase)
test_classes = []
class test_exports(TC):
def test_exports(self):
# There are no surprising symbols in the tempfile module
dict = tempfile.__dict__
expected = {
"NamedTemporaryFile" : 1,
"TemporaryFile" : 1,
"mkstemp" : 1,
"mkdtemp" : 1,
"mktemp" : 1,
"TMP_MAX" : 1,
"gettempprefix" : 1,
"gettempdir" : 1,
"tempdir" : 1,
"template" : 1,
"SpooledTemporaryFile" : 1,
"TemporaryDirectory" : 1,
}
unexp = []
for key in dict:
if key[0] != '_' and key not in expected:
unexp.append(key)
self.assertTrue(len(unexp) == 0,
"unexpected keys: %s" % unexp)
test_classes.append(test_exports)
class test__RandomNameSequence(TC):
"""Test the internal iterator object _RandomNameSequence."""
def setUp(self):
self.r = tempfile._RandomNameSequence()
super().setUp()
def test_get_six_char_str(self):
# _RandomNameSequence returns a six-character string
s = next(self.r)
self.nameCheck(s, '', '', '')
def test_many(self):
# _RandomNameSequence returns no duplicate strings (stochastic)
dict = {}
r = self.r
for i in range(TEST_FILES):
s = next(r)
self.nameCheck(s, '', '', '')
self.assertNotIn(s, dict)
dict[s] = 1
def supports_iter(self):
# _RandomNameSequence supports the iterator protocol
i = 0
r = self.r
try:
for s in r:
i += 1
if i == 20:
break
except:
self.failOnException("iteration")
test_classes.append(test__RandomNameSequence)
class test__candidate_tempdir_list(TC):
"""Test the internal function _candidate_tempdir_list."""
def test_nonempty_list(self):
# _candidate_tempdir_list returns a nonempty list of strings
cand = tempfile._candidate_tempdir_list()
self.assertFalse(len(cand) == 0)
for c in cand:
self.assertIsInstance(c, str)
def test_wanted_dirs(self):
# _candidate_tempdir_list contains the expected directories
# Make sure the interesting environment variables are all set.
with support.EnvironmentVarGuard() as env:
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname:
env[envname] = os.path.abspath(envname)
cand = tempfile._candidate_tempdir_list()
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname: raise ValueError
self.assertIn(dirname, cand)
try:
dirname = os.getcwd()
except (AttributeError, os.error):
dirname = os.curdir
self.assertIn(dirname, cand)
# Not practical to try to verify the presence of OS-specific
# paths in this list.
test_classes.append(test__candidate_tempdir_list)
# We test _get_default_tempdir by testing gettempdir.
class test__get_candidate_names(TC):
"""Test the internal function _get_candidate_names."""
def test_retval(self):
# _get_candidate_names returns a _RandomNameSequence object
obj = tempfile._get_candidate_names()
self.assertIsInstance(obj, tempfile._RandomNameSequence)
def test_same_thing(self):
# _get_candidate_names always returns the same object
a = tempfile._get_candidate_names()
b = tempfile._get_candidate_names()
self.assertTrue(a is b)
test_classes.append(test__get_candidate_names)
class test__mkstemp_inner(TC):
"""Test the internal function _mkstemp_inner."""
class mkstemped:
_bflags = tempfile._bin_openflags
_tflags = tempfile._text_openflags
_close = os.close
_unlink = os.unlink
def __init__(self, dir, pre, suf, bin):
if bin: flags = self._bflags
else: flags = self._tflags
(self.fd, self.name) = tempfile._mkstemp_inner(dir, pre, suf, flags)
def write(self, str):
os.write(self.fd, str)
def __del__(self):
self._close(self.fd)
self._unlink(self.name)
def do_create(self, dir=None, pre="", suf="", bin=1):
if dir is None:
dir = tempfile.gettempdir()
try:
file = self.mkstemped(dir, pre, suf, bin)
except:
self.failOnException("_mkstemp_inner")
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
# _mkstemp_inner can create files
self.do_create().write(b"blat")
self.do_create(pre="a").write(b"blat")
self.do_create(suf="b").write(b"blat")
self.do_create(pre="a", suf="b").write(b"blat")
self.do_create(pre="aa", suf=".txt").write(b"blat")
def test_basic_many(self):
# _mkstemp_inner can create many files (stochastic)
extant = list(range(TEST_FILES))
for i in extant:
extant[i] = self.do_create(pre="aa")
def test_choose_directory(self):
# _mkstemp_inner can create files in a user-selected directory
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir).write(b"blat")
finally:
os.rmdir(dir)
def test_file_mode(self):
# _mkstemp_inner creates files with the proper mode
if not has_stat:
return # ugh, can't use SkipTest.
file = self.do_create()
mode = stat.S_IMODE(os.stat(file.name).st_mode)
expected = 0o600
if sys.platform in ('win32', 'os2emx'):
# There's no distinction among 'user', 'group' and 'world';
# replicate the 'user' bits.
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
def test_noinherit(self):
# _mkstemp_inner file handles are not inherited by child processes
if not has_spawnl:
return # ugh, can't use SkipTest.
if support.verbose:
v="v"
else:
v="q"
file = self.do_create()
fd = "%d" % file.fd
try:
me = __file__
except NameError:
me = sys.argv[0]
# We have to exec something, so that FD_CLOEXEC will take
# effect. The core of this test is therefore in
# tf_inherit_check.py, which see.
tester = os.path.join(os.path.dirname(os.path.abspath(me)),
"tf_inherit_check.py")
# On Windows a spawn* /path/ with embedded spaces shouldn't be quoted,
# but an arg with embedded spaces should be decorated with double
# quotes on each end
if sys.platform in ('win32',):
decorated = '"%s"' % sys.executable
tester = '"%s"' % tester
else:
decorated = sys.executable
retval = os.spawnl(os.P_WAIT, sys.executable, decorated, tester, v, fd)
self.assertFalse(retval < 0,
"child process caught fatal signal %d" % -retval)
self.assertFalse(retval > 0, "child process reports failure %d"%retval)
def test_textmode(self):
# _mkstemp_inner can create files in text mode
if not has_textmode:
return # ugh, can't use SkipTest.
# A text file is truncated at the first Ctrl+Z byte
f = self.do_create(bin=0)
f.write(b"blat\x1a")
f.write(b"extra\n")
os.lseek(f.fd, 0, os.SEEK_SET)
self.assertEqual(os.read(f.fd, 20), b"blat")
test_classes.append(test__mkstemp_inner)
class test_gettempprefix(TC):
"""Test gettempprefix()."""
def test_sane_template(self):
# gettempprefix returns a nonempty prefix string
p = tempfile.gettempprefix()
self.assertIsInstance(p, str)
self.assertTrue(len(p) > 0)
def test_usable_template(self):
# gettempprefix returns a usable prefix string
# Create a temp directory, avoiding use of the prefix.
# Then attempt to create a file whose name is
# prefix + 'xxxxxx.xxx' in that directory.
p = tempfile.gettempprefix() + "xxxxxx.xxx"
d = tempfile.mkdtemp(prefix="")
try:
p = os.path.join(d, p)
try:
fd = os.open(p, os.O_RDWR | os.O_CREAT)
except:
self.failOnException("os.open")
os.close(fd)
os.unlink(p)
finally:
os.rmdir(d)
test_classes.append(test_gettempprefix)
class test_gettempdir(TC):
"""Test gettempdir()."""
def test_directory_exists(self):
# gettempdir returns a directory which exists
dir = tempfile.gettempdir()
self.assertTrue(os.path.isabs(dir) or dir == os.curdir,
"%s is not an absolute path" % dir)
self.assertTrue(os.path.isdir(dir),
"%s is not a directory" % dir)
def test_directory_writable(self):
# gettempdir returns a directory writable by the user
# sneaky: just instantiate a NamedTemporaryFile, which
# defaults to writing into the directory returned by
# gettempdir.
try:
file = tempfile.NamedTemporaryFile()
file.write(b"blat")
file.close()
except:
self.failOnException("create file in %s" % tempfile.gettempdir())
def test_same_thing(self):
# gettempdir always returns the same object
a = tempfile.gettempdir()
b = tempfile.gettempdir()
self.assertTrue(a is b)
test_classes.append(test_gettempdir)
class test_mkstemp(TC):
"""Test mkstemp()."""
def do_create(self, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
(fd, name) = tempfile.mkstemp(dir=dir, prefix=pre, suffix=suf)
(ndir, nbase) = os.path.split(name)
adir = os.path.abspath(dir)
self.assertEqual(adir, ndir,
"Directory '%s' incorrectly returned as '%s'" % (adir, ndir))
except:
self.failOnException("mkstemp")
try:
self.nameCheck(name, dir, pre, suf)
finally:
os.close(fd)
os.unlink(name)
def test_basic(self):
# mkstemp can create files
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
self.do_create(dir=".")
def test_choose_directory(self):
# mkstemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir)
finally:
os.rmdir(dir)
test_classes.append(test_mkstemp)
class test_mkdtemp(TC):
"""Test mkdtemp()."""
def do_create(self, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
name = tempfile.mkdtemp(dir=dir, prefix=pre, suffix=suf)
except:
self.failOnException("mkdtemp")
try:
self.nameCheck(name, dir, pre, suf)
return name
except:
os.rmdir(name)
raise
def test_basic(self):
# mkdtemp can create directories
os.rmdir(self.do_create())
os.rmdir(self.do_create(pre="a"))
os.rmdir(self.do_create(suf="b"))
os.rmdir(self.do_create(pre="a", suf="b"))
os.rmdir(self.do_create(pre="aa", suf=".txt"))
def test_basic_many(self):
# mkdtemp can create many directories (stochastic)
extant = list(range(TEST_FILES))
try:
for i in extant:
extant[i] = self.do_create(pre="aa")
finally:
for i in extant:
if(isinstance(i, str)):
os.rmdir(i)
def test_choose_directory(self):
# mkdtemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
os.rmdir(self.do_create(dir=dir))
finally:
os.rmdir(dir)
def test_mode(self):
# mkdtemp creates directories with the proper mode
if not has_stat:
return # ugh, can't use SkipTest.
dir = self.do_create()
try:
mode = stat.S_IMODE(os.stat(dir).st_mode)
mode &= 0o777 # Mask off sticky bits inherited from /tmp
expected = 0o700
if sys.platform in ('win32', 'os2emx'):
# There's no distinction among 'user', 'group' and 'world';
# replicate the 'user' bits.
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
finally:
os.rmdir(dir)
test_classes.append(test_mkdtemp)
class test_mktemp(TC):
"""Test mktemp()."""
# For safety, all use of mktemp must occur in a private directory.
# We must also suppress the RuntimeWarning it generates.
def setUp(self):
self.dir = tempfile.mkdtemp()
super().setUp()
def tearDown(self):
if self.dir:
os.rmdir(self.dir)
self.dir = None
super().tearDown()
class mktemped:
_unlink = os.unlink
_bflags = tempfile._bin_openflags
def __init__(self, dir, pre, suf):
self.name = tempfile.mktemp(dir=dir, prefix=pre, suffix=suf)
# Create the file. This will raise an exception if it's
# mysteriously appeared in the meanwhile.
os.close(os.open(self.name, self._bflags, 0o600))
def __del__(self):
self._unlink(self.name)
def do_create(self, pre="", suf=""):
try:
file = self.mktemped(self.dir, pre, suf)
except:
self.failOnException("mktemp")
self.nameCheck(file.name, self.dir, pre, suf)
return file
def test_basic(self):
# mktemp can choose usable file names
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
def test_many(self):
# mktemp can choose many usable file names (stochastic)
extant = list(range(TEST_FILES))
for i in extant:
extant[i] = self.do_create(pre="aa")
## def test_warning(self):
## # mktemp issues a warning when used
## warnings.filterwarnings("error",
## category=RuntimeWarning,
## message="mktemp")
## self.assertRaises(RuntimeWarning,
## tempfile.mktemp, dir=self.dir)
test_classes.append(test_mktemp)
# We test _TemporaryFileWrapper by testing NamedTemporaryFile.
class test_NamedTemporaryFile(TC):
"""Test NamedTemporaryFile()."""
def do_create(self, dir=None, pre="", suf="", delete=True):
if dir is None:
dir = tempfile.gettempdir()
try:
file = tempfile.NamedTemporaryFile(dir=dir, prefix=pre, suffix=suf,
delete=delete)
except:
self.failOnException("NamedTemporaryFile")
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
# NamedTemporaryFile can create files
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
def test_creates_named(self):
# NamedTemporaryFile creates files with names
f = tempfile.NamedTemporaryFile()
self.assertTrue(os.path.exists(f.name),
"NamedTemporaryFile %s does not exist" % f.name)
def test_del_on_close(self):
# A NamedTemporaryFile is deleted when closed
dir = tempfile.mkdtemp()
try:
f = tempfile.NamedTemporaryFile(dir=dir)
f.write(b'blat')
f.close()
self.assertFalse(os.path.exists(f.name),
"NamedTemporaryFile %s exists after close" % f.name)
finally:
os.rmdir(dir)
def test_dis_del_on_close(self):
# Tests that delete-on-close can be disabled
dir = tempfile.mkdtemp()
tmp = None
try:
f = tempfile.NamedTemporaryFile(dir=dir, delete=False)
tmp = f.name
f.write(b'blat')
f.close()
self.assertTrue(os.path.exists(f.name),
"NamedTemporaryFile %s missing after close" % f.name)
finally:
if tmp is not None:
os.unlink(tmp)
os.rmdir(dir)
def test_multiple_close(self):
# A NamedTemporaryFile can be closed many times without error
f = tempfile.NamedTemporaryFile()
f.write(b'abc\n')
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
def test_context_manager(self):
# A NamedTemporaryFile can be used as a context manager
with tempfile.NamedTemporaryFile() as f:
self.assertTrue(os.path.exists(f.name))
self.assertFalse(os.path.exists(f.name))
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
# How to test the mode and bufsize parameters?
test_classes.append(test_NamedTemporaryFile)
class test_SpooledTemporaryFile(TC):
"""Test SpooledTemporaryFile()."""
def do_create(self, max_size=0, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
file = tempfile.SpooledTemporaryFile(max_size=max_size, dir=dir, prefix=pre, suffix=suf)
except:
self.failOnException("SpooledTemporaryFile")
return file
def test_basic(self):
# SpooledTemporaryFile can create files
f = self.do_create()
self.assertFalse(f._rolled)
f = self.do_create(max_size=100, pre="a", suf=".txt")
self.assertFalse(f._rolled)
def test_del_on_close(self):
# A SpooledTemporaryFile is deleted when closed
dir = tempfile.mkdtemp()
try:
f = tempfile.SpooledTemporaryFile(max_size=10, dir=dir)
self.assertFalse(f._rolled)
f.write(b'blat ' * 5)
self.assertTrue(f._rolled)
filename = f.name
f.close()
self.assertFalse(isinstance(filename, str) and os.path.exists(filename),
"SpooledTemporaryFile %s exists after close" % filename)
finally:
os.rmdir(dir)
def test_rewrite_small(self):
# A SpooledTemporaryFile can be written to multiple within the max_size
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
for i in range(5):
f.seek(0, 0)
f.write(b'x' * 20)
self.assertFalse(f._rolled)
def test_write_sequential(self):
# A SpooledTemporaryFile should hold exactly max_size bytes, and roll
# over afterward
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
f.write(b'x' * 20)
self.assertFalse(f._rolled)
f.write(b'x' * 10)
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_writelines(self):
# Verify writelines with a SpooledTemporaryFile
f = self.do_create()
f.writelines((b'x', b'y', b'z'))
f.seek(0)
buf = f.read()
self.assertEqual(buf, b'xyz')
def test_writelines_sequential(self):
# A SpooledTemporaryFile should hold exactly max_size bytes, and roll
# over afterward
f = self.do_create(max_size=35)
f.writelines((b'x' * 20, b'x' * 10, b'x' * 5))
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_sparse(self):
# A SpooledTemporaryFile that is written late in the file will extend
# when that occurs
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
f.seek(100, 0)
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_fileno(self):
# A SpooledTemporaryFile should roll over to a real file on fileno()
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
self.assertTrue(f.fileno() > 0)
self.assertTrue(f._rolled)
def test_multiple_close_before_rollover(self):
# A SpooledTemporaryFile can be closed many times without error
f = tempfile.SpooledTemporaryFile()
f.write(b'abc\n')
self.assertFalse(f._rolled)
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
def test_multiple_close_after_rollover(self):
# A SpooledTemporaryFile can be closed many times without error
f = tempfile.SpooledTemporaryFile(max_size=1)
f.write(b'abc\n')
self.assertTrue(f._rolled)
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
def test_bound_methods(self):
# It should be OK to steal a bound method from a SpooledTemporaryFile
# and use it independently; when the file rolls over, those bound
# methods should continue to function
f = self.do_create(max_size=30)
read = f.read
write = f.write
seek = f.seek
write(b"a" * 35)
write(b"b" * 35)
seek(0, 0)
self.assertEqual(read(70), b'a'*35 + b'b'*35)
def test_text_mode(self):
# Creating a SpooledTemporaryFile with a text mode should produce
# a file object reading and writing (Unicode) text strings.
f = tempfile.SpooledTemporaryFile(mode='w+', max_size=10)
f.write("abc\n")
f.seek(0)
self.assertEqual(f.read(), "abc\n")
f.write("def\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\n")
f.write("xyzzy\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\nxyzzy\n")
# Check that Ctrl+Z doesn't truncate the file
f.write("foo\x1abar\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\nxyzzy\nfoo\x1abar\n")
def test_text_newline_and_encoding(self):
f = tempfile.SpooledTemporaryFile(mode='w+', max_size=10,
newline='', encoding='utf-8')
f.write("\u039B\r\n")
f.seek(0)
self.assertEqual(f.read(), "\u039B\r\n")
self.assertFalse(f._rolled)
f.write("\u039B" * 20 + "\r\n")
f.seek(0)
self.assertEqual(f.read(), "\u039B\r\n" + ("\u039B" * 20) + "\r\n")
self.assertTrue(f._rolled)
def test_context_manager_before_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
with tempfile.SpooledTemporaryFile(max_size=1) as f:
self.assertFalse(f._rolled)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_context_manager_during_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
with tempfile.SpooledTemporaryFile(max_size=1) as f:
self.assertFalse(f._rolled)
f.write(b'abc\n')
f.flush()
self.assertTrue(f._rolled)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_context_manager_after_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
f = tempfile.SpooledTemporaryFile(max_size=1)
f.write(b'abc\n')
f.flush()
self.assertTrue(f._rolled)
with f:
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
test_classes.append(test_SpooledTemporaryFile)
class test_TemporaryFile(TC):
"""Test TemporaryFile()."""
def test_basic(self):
# TemporaryFile can create files
# No point in testing the name params - the file has no name.
try:
tempfile.TemporaryFile()
except:
self.failOnException("TemporaryFile")
def test_has_no_name(self):
# TemporaryFile creates files with no names (on this system)
dir = tempfile.mkdtemp()
f = tempfile.TemporaryFile(dir=dir)
f.write(b'blat')
# Sneaky: because this file has no name, it should not prevent
# us from removing the directory it was created in.
try:
os.rmdir(dir)
except:
ei = sys.exc_info()
# cleanup
f.close()
os.rmdir(dir)
self.failOnException("rmdir", ei)
def test_multiple_close(self):
# A TemporaryFile can be closed many times without error
f = tempfile.TemporaryFile()
f.write(b'abc\n')
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
# How to test the mode and bufsize parameters?
def test_mode_and_encoding(self):
def roundtrip(input, *args, **kwargs):
with tempfile.TemporaryFile(*args, **kwargs) as fileobj:
fileobj.write(input)
fileobj.seek(0)
self.assertEqual(input, fileobj.read())
roundtrip(b"1234", "w+b")
roundtrip("abdc\n", "w+")
roundtrip("\u039B", "w+", encoding="utf-16")
roundtrip("foo\r\n", "w+", newline="")
if tempfile.NamedTemporaryFile is not tempfile.TemporaryFile:
test_classes.append(test_TemporaryFile)
# Helper for test_del_on_shutdown
class NulledModules:
def __init__(self, *modules):
self.refs = [mod.__dict__ for mod in modules]
self.contents = [ref.copy() for ref in self.refs]
def __enter__(self):
for d in self.refs:
for key in d:
d[key] = None
def __exit__(self, *exc_info):
for d, c in zip(self.refs, self.contents):
d.clear()
d.update(c)
class test_TemporaryDirectory(TC):
"""Test TemporaryDirectory()."""
def do_create(self, dir=None, pre="", suf="", recurse=1):
if dir is None:
dir = tempfile.gettempdir()
try:
tmp = tempfile.TemporaryDirectory(dir=dir, prefix=pre, suffix=suf)
except:
self.failOnException("TemporaryDirectory")
self.nameCheck(tmp.name, dir, pre, suf)
# Create a subdirectory and some files
if recurse:
self.do_create(tmp.name, pre, suf, recurse-1)
with open(os.path.join(tmp.name, "test.txt"), "wb") as f:
f.write(b"Hello world!")
return tmp
def test_mkdtemp_failure(self):
# Check no additional exception if mkdtemp fails
# Previously would raise AttributeError instead
# (noted as part of Issue #10188)
with tempfile.TemporaryDirectory() as nonexistent:
pass
with self.assertRaises(os.error):
tempfile.TemporaryDirectory(dir=nonexistent)
def test_explicit_cleanup(self):
# A TemporaryDirectory is deleted when cleaned up
dir = tempfile.mkdtemp()
try:
d = self.do_create(dir=dir)
self.assertTrue(os.path.exists(d.name),
"TemporaryDirectory %s does not exist" % d.name)
d.cleanup()
self.assertFalse(os.path.exists(d.name),
"TemporaryDirectory %s exists after cleanup" % d.name)
finally:
os.rmdir(dir)
@support.skip_unless_symlink
def test_cleanup_with_symlink_to_a_directory(self):
# cleanup() should not follow symlinks to directories (issue #12464)
d1 = self.do_create()
d2 = self.do_create()
# Symlink d1/foo -> d2
os.symlink(d2.name, os.path.join(d1.name, "foo"))
# This call to cleanup() should not follow the "foo" symlink
d1.cleanup()
self.assertFalse(os.path.exists(d1.name),
"TemporaryDirectory %s exists after cleanup" % d1.name)
self.assertTrue(os.path.exists(d2.name),
"Directory pointed to by a symlink was deleted")
self.assertEqual(os.listdir(d2.name), ['test.txt'],
"Contents of the directory pointed to by a symlink "
"were deleted")
d2.cleanup()
@support.cpython_only
def test_del_on_collection(self):
# A TemporaryDirectory is deleted when garbage collected
dir = tempfile.mkdtemp()
try:
d = self.do_create(dir=dir)
name = d.name
del d # Rely on refcounting to invoke __del__
self.assertFalse(os.path.exists(name),
"TemporaryDirectory %s exists after __del__" % name)
finally:
os.rmdir(dir)
@unittest.expectedFailure # See issue #10188
def test_del_on_shutdown(self):
# A TemporaryDirectory may be cleaned up during shutdown
# Make sure it works with the relevant modules nulled out
with self.do_create() as dir:
d = self.do_create(dir=dir)
# Mimic the nulling out of modules that
# occurs during system shutdown
modules = [os, os.path]
if has_stat:
modules.append(stat)
# Currently broken, so suppress the warning
# that is otherwise emitted on stdout
with support.captured_stderr() as err:
with NulledModules(*modules):
d.cleanup()
# Currently broken, so stop spurious exception by
# indicating the object has already been closed
d._closed = True
# And this assert will fail, as expected by the
# unittest decorator...
self.assertFalse(os.path.exists(d.name),
"TemporaryDirectory %s exists after cleanup" % d.name)
def test_warnings_on_cleanup(self):
# Two kinds of warning on shutdown
# Issue 10888: may write to stderr if modules are nulled out
# ResourceWarning will be triggered by __del__
with self.do_create() as dir:
if os.sep != '\\':
# Embed a backslash in order to make sure string escaping
# in the displayed error message is dealt with correctly
suffix = '\\check_backslash_handling'
else:
suffix = ''
d = self.do_create(dir=dir, suf=suffix)
#Check for the Issue 10888 message
modules = [os, os.path]
if has_stat:
modules.append(stat)
with support.captured_stderr() as err:
with NulledModules(*modules):
d.cleanup()
message = err.getvalue().replace('\\\\', '\\')
self.assertIn("while cleaning up", message)
self.assertIn(d.name, message)
# Check for the resource warning
with support.check_warnings(('Implicitly', ResourceWarning), quiet=False):
warnings.filterwarnings("always", category=ResourceWarning)
d.__del__()
self.assertFalse(os.path.exists(d.name),
"TemporaryDirectory %s exists after __del__" % d.name)
def test_multiple_close(self):
# Can be cleaned-up many times without error
d = self.do_create()
d.cleanup()
try:
d.cleanup()
d.cleanup()
except:
self.failOnException("cleanup")
def test_context_manager(self):
# Can be used as a context manager
d = self.do_create()
with d as name:
self.assertTrue(os.path.exists(name))
self.assertEqual(name, d.name)
self.assertFalse(os.path.exists(name))
test_classes.append(test_TemporaryDirectory)
def test_main():
support.run_unittest(*test_classes)
if __name__ == "__main__":
test_main()
|
apache-2.0
|
fredyangliu/linux-2.6-imx
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
|
12980
|
5411
|
# SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
|
gpl-2.0
|
dwfreed/mitmproxy
|
test/pathod/tservers.py
|
4
|
3497
|
import tempfile
import re
import shutil
import requests
import io
import urllib
from mitmproxy.net import tcp
from mitmproxy.test import tutils
from pathod import language
from pathod import pathoc
from pathod import pathod
from pathod import test
def treader(bytes):
"""
Construct a tcp.Read object from bytes.
"""
fp = io.BytesIO(bytes)
return tcp.Reader(fp)
class DaemonTests:
nohang = False
ssl = False
timeout = None
hexdump = False
ssloptions = None
nocraft = False
explain = True
@classmethod
def setup_class(cls):
opts = cls.ssloptions or {}
cls.confdir = tempfile.mkdtemp()
opts["confdir"] = cls.confdir
so = pathod.SSLOptions(**opts)
cls.d = test.Daemon(
staticdir=tutils.test_data.path("pathod/data"),
anchors=[
(re.compile("/anchor/.*"), "202:da")
],
ssl=cls.ssl,
ssloptions=so,
sizelimit=1 * 1024 * 1024,
nohang=cls.nohang,
timeout=cls.timeout,
hexdump=cls.hexdump,
nocraft=cls.nocraft,
logreq=True,
logresp=True,
explain=cls.explain
)
@classmethod
def teardown_class(cls):
cls.d.shutdown()
shutil.rmtree(cls.confdir)
def teardown(self):
self.d.wait_for_silence()
self.d.clear_log()
def _getpath(self, path, params=None):
scheme = "https" if self.ssl else "http"
resp = requests.get(
"%s://localhost:%s/%s" % (
scheme,
self.d.port,
path
),
verify=False,
params=params
)
return resp
def getpath(self, path, params=None):
logfp = io.StringIO()
c = pathoc.Pathoc(
("localhost", self.d.port),
ssl=self.ssl,
fp=logfp,
)
with c.connect():
if params:
path = path + "?" + urllib.parse.urlencode(params)
resp = c.request("get:%s" % path)
return resp
def get(self, spec):
logfp = io.StringIO()
c = pathoc.Pathoc(
("localhost", self.d.port),
ssl=self.ssl,
fp=logfp,
)
with c.connect():
resp = c.request(
"get:/p/%s" % urllib.parse.quote(spec)
)
return resp
def pathoc(
self,
specs,
timeout=None,
connect_to=None,
ssl=None,
ws_read_limit=None,
use_http2=False,
):
"""
Returns a (messages, text log) tuple.
"""
if ssl is None:
ssl = self.ssl
logfp = io.StringIO()
c = pathoc.Pathoc(
("localhost", self.d.port),
ssl=ssl,
ws_read_limit=ws_read_limit,
timeout=timeout,
fp=logfp,
use_http2=use_http2,
)
with c.connect(connect_to):
ret = []
for i in specs:
resp = c.request(i)
if resp:
ret.append(resp)
for frm in c.wait():
ret.append(frm)
c.stop()
return ret, logfp.getvalue()
def render(r, settings=language.Settings()):
r = r.resolve(settings)
s = io.BytesIO()
assert language.serve(r, s, settings)
return s.getvalue()
|
mit
|
Yuriy-Leonov/nova
|
nova/api/openstack/compute/contrib/extended_floating_ips.py
|
15
|
1055
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import extensions
class Extended_floating_ips(extensions.ExtensionDescriptor):
"""Adds optional fixed_address to the add floating IP command."""
name = "ExtendedFloatingIps"
alias = "os-extended-floating-ips"
namespace = ("http://docs.openstack.org/compute/ext/"
"extended_floating_ips/api/v2")
updated = "2013-04-19T00:00:00+00:00"
|
apache-2.0
|
ROMFactory/android_external_chromium_org
|
build/protoc_java.py
|
90
|
1317
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generate java source files from protobufs
Usage:
protoc_java.py {protoc} {proto_path} {java_out} {stamp_file} {proto_files}
This is a helper file for the genproto_java action in protoc_java.gypi.
It performs the following steps:
1. Deletes all old sources (ensures deleted classes are not part of new jars).
2. Creates source directory.
3. Generates Java files using protoc.
4. Creates a new stamp file.
"""
import os
import shutil
import subprocess
import sys
def main(argv):
if len(argv) < 5:
usage()
return 1
protoc_path, proto_path, java_out, stamp_file = argv[1:5]
proto_files = argv[5:]
# Delete all old sources
if os.path.exists(java_out):
shutil.rmtree(java_out)
# Create source directory
os.makedirs(java_out)
# Generate Java files using protoc
ret = subprocess.call(
[protoc_path, '--proto_path', proto_path, '--java_out', java_out]
+ proto_files)
if ret == 0:
# Create a new stamp file
with file(stamp_file, 'a'):
os.utime(stamp_file, None)
return ret
def usage():
print(__doc__);
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
bsd-3-clause
|
kambysese/mne-python
|
examples/preprocessing/plot_virtual_evoked.py
|
10
|
1912
|
"""
=======================
Remap MEG channel types
=======================
In this example, MEG data are remapped from one channel type to another.
This is useful to:
- visualize combined magnetometers and gradiometers as magnetometers
or gradiometers.
- run statistics from both magnetometers and gradiometers while
working with a single type of channels.
"""
# Author: Mainak Jas <mainak.jas@telecom-paristech.fr>
# License: BSD (3-clause)
import mne
from mne.datasets import sample
print(__doc__)
# read the evoked
data_path = sample.data_path()
fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
evoked = mne.read_evokeds(fname, condition='Left Auditory', baseline=(None, 0))
###############################################################################
# First, let's call remap gradiometers to magnometers, and plot
# the original and remapped topomaps of the magnetometers.
# go from grad + mag to mag and plot original mag
virt_evoked = evoked.as_type('mag')
evoked.plot_topomap(ch_type='mag', title='mag (original)', time_unit='s')
###############################################################################
# plot interpolated grad + mag
virt_evoked.plot_topomap(ch_type='mag', time_unit='s',
title='mag (interpolated from mag + grad)')
###############################################################################
# Now, we remap magnometers to gradiometers, and plot
# the original and remapped topomaps of the gradiometers
# go from grad + mag to grad and plot original grad
virt_evoked = evoked.as_type('grad')
evoked.plot_topomap(ch_type='grad', title='grad (original)', time_unit='s')
###############################################################################
# plot interpolated grad + mag
virt_evoked.plot_topomap(ch_type='grad', time_unit='s',
title='grad (interpolated from mag + grad)')
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.