code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot import model, utils
from .message import Message
class Keyword(model.Keyword):
"""Results of a single keyword."""
__slots__ = ['kwname', 'libname', 'status', 'starttime', 'endtime', 'message']
message_class = Message
def __init__(self, kwname='', libname='', doc='', args=(), assign=(),
tags=(), timeout='', type='kw', status='FAIL', starttime=None,
endtime=None):
model.Keyword.__init__(self, '', doc, args, assign, tags, timeout, type)
#: Name of the keyword without library or resource name.
self.kwname = kwname
#: Name of library or resource containing this keyword.
self.libname = libname
#: String 'PASS' or 'FAIL'.
self.status = status
#: Keyword execution start time in format ``%Y%m%d %H:%M:%S.%f``.
self.starttime = starttime
#: Keyword execution end time in format ``%Y%m%d %H:%M:%S.%f``.
self.endtime = endtime
#: Keyword status message. Used only with suite teardowns.
self.message = ''
@property
def elapsedtime(self):
"""Elapsed execution time of the keyword in milliseconds."""
return utils.get_elapsed_time(self.starttime, self.endtime)
@property
def name(self):
if not self.libname:
return self.kwname
return '%s.%s' % (self.libname, self.kwname)
@property
def passed(self):
"""``True`` if the keyword did pass, ``False`` otherwise."""
return self.status == 'PASS'
|
caio2k/RIDE
|
src/robotide/lib/robot/result/keyword.py
|
Python
|
apache-2.0
| 2,149
|
# -*- coding: utf-8 -*-
# * Copyright (c) 2009-2018. Authors: see NOTICE file.
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import pytest
from cytomine.models.social import *
from cytomine.tests.conftest import random_string
__author__ = "Rubens Ulysse <urubens@uliege.be>"
class TestPosition:
def test_positions(self, connect, dataset):
positions = PositionCollection().fetch_with_filter("imageinstance", dataset["image_instance"].id)
assert (isinstance(positions, PositionCollection))
if len(positions) > 0:
position = Position().fetch(positions[0].id)
assert (isinstance(position, Position))
class TestAnnotationAction:
@pytest.mark.skip(reason="Not yet implemented in core (see https://github.com/Cytomine-ULiege/Cytomine-core/commit/ef3ab08d02a9daa43192fcaa7f7f045ca51e999a)")
def test_annotationactions(self, connect, dataset):
annot_actions = AnnotationActionCollection().fetch_with_filter("imageinstance", dataset["image_instance"].id)
assert (isinstance(annot_actions, AnnotationActionCollection))
if len(annot_actions) > 0:
annot_action = AnnotationAction().fetch(annot_actions[0].id)
assert (isinstance(annot_action, AnnotationAction))
|
cytomine/Cytomine-python-client
|
cytomine/tests/test_social.py
|
Python
|
apache-2.0
| 1,941
|
#!/usr/bin/python3
import os
listTests = os.listdir('.')
print(listTests)
for test in listTests:
if os.path.isdir(test) == True:
os.chdir(test)
print('cleaning environment for test: ',test)
os.system('rm -rf logs mpiCode data logClean logCompilation logExecution')
os.system('scons -c > logClean')
print('compiling test: ',test)
os.system('scons > logCompilation')
print('executing test: ',test)
os.system('mpirun -np 4 ./'+test + ' > logExecution')
print('done: ',test)
os.chdir('../')
|
montanier/pandora
|
tests/functionalTests/cppFunctionalTests/parallelTests/executeTestsLocal.py
|
Python
|
lgpl-3.0
| 575
|
"""
tests for dns_parser.py
The latest version of this package is available at:
<https://github.com/jantman/pydnstest>
##################################################################################
Copyright 2013-2017 Jason Antman <jason@jasonantman.com>
This file is part of pydnstest.
pydnstest is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pydnstest is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Foobar. If not, see <http://www.gnu.org/licenses/>.
The Copyright and Authors attributions contained herein may not be removed or
otherwise altered, except to add the Author attribution of a contributor to
this work. (Additional Terms pursuant to Section 7b of the AGPL v3)
##################################################################################
While not legally required, I sincerely request that anyone who finds
bugs please submit them at <https://github.com/jantman/pydnstest> or
to me via email, and that you send any contributions or improvements
either as a pull request on GitHub, or to me via email.
##################################################################################
AUTHORS:
Jason Antman <jason@jasonantman.com> <http://www.jasonantman.com>
"""
import pytest
import sys
import os
from pydnstest.parser import DnstestParser
from pyparsing import ParseException
class TestLanguageParsing:
"""
Class to test the natural language parsing features of dnstest.py
Grammars supported:
'add (record|name|entry)? <hostname_or_fqdn> (with ?)(value|address|target)? <hostname_fqdn_or_ip>'
'remove (record|name|entry)? <hostname_or_fqdn>'
'rename (record|name|entry)? <hostname_or_fqdn> (with ?)(value ?) <value> to <hostname_or_fqdn>'
'change (record|name|entry)? <hostname_or_fqdn> to <hostname_fqdn_or_ip>'
'confirm <hostname_or_fqdn>'
"""
@pytest.mark.parametrize(("line", "parsed_dict"), [
("add fooHostOne value fooHostTwo", {'operation': 'add', 'hostname': 'fooHostOne', 'value': 'fooHostTwo'}),
("add foobar value 10.104.92.243", {'operation': 'add', 'hostname': 'foobar', 'value': '10.104.92.243'}),
("add entry foobar with value baz", {'operation': 'add', 'hostname': 'foobar', 'value': 'baz'}),
("add record foobar.example.com target blam", {'operation': 'add', 'hostname': 'foobar.example.com', 'value': 'blam'}),
("add name foobar address 192.168.0.139", {'operation': 'add', 'hostname': 'foobar', 'value': '192.168.0.139'}),
("add foobar.example.com with target 172.16.132.10", {'operation': 'add', 'hostname': 'foobar.example.com', 'value': '172.16.132.10'}),
("add foobar.hosts.example.com value 172.16.132.10", {'operation': 'add', 'hostname': 'foobar.hosts.example.com', 'value': '172.16.132.10'}),
("remove fooHostOne", {'operation': 'remove', 'hostname': 'fooHostOne'}),
("remove record fooHostOne", {'operation': 'remove', 'hostname': 'fooHostOne'}),
("remove name fooHostOne", {'operation': 'remove', 'hostname': 'fooHostOne'}),
("remove entry fooHostOne", {'operation': 'remove', 'hostname': 'fooHostOne'}),
("remove foo.example.com", {'operation': 'remove', 'hostname': 'foo.example.com'}),
("remove record foo.example.com", {'operation': 'remove', 'hostname': 'foo.example.com'}),
("remove name foo.example.com", {'operation': 'remove', 'hostname': 'foo.example.com'}),
("remove entry foo.example.com", {'operation': 'remove', 'hostname': 'foo.example.com'}),
("remove entry foo.bar.baz.example.com", {'operation': 'remove', 'hostname': 'foo.bar.baz.example.com'}),
("rename fooHostOne with target targ to fooHostTwo", {'operation': 'rename', 'hostname': 'fooHostOne', 'newname': 'fooHostTwo', 'value': 'targ'}),
("rename entry foobar foo.bar.net to baz", {'operation': 'rename', 'hostname': 'foobar', 'newname': 'baz', 'value': 'foo.bar.net'}),
("rename record foobar.example.com with address 1.2.3.4 to blam", {'operation': 'rename', 'hostname': 'foobar.example.com', 'newname': 'blam', 'value': '1.2.3.4'}),
("rename name foobar 1.2.3.5 to baz.example.com", {'operation': 'rename', 'hostname': 'foobar', 'newname': 'baz.example.com', 'value': '1.2.3.5'}),
("rename foobar.example.com value 1.2.3.4 to baz.blam.hosts.example.com", {'operation': 'rename', 'hostname': 'foobar.example.com', 'newname': 'baz.blam.hosts.example.com', 'value': '1.2.3.4'}),
("rename foobar.hosts.example.com with value baz to blam", {'operation': 'rename', 'hostname': 'foobar.hosts.example.com', 'newname': 'blam', 'value': 'baz'}),
("rename foo.subdomain.example.com with value 10.188.8.76 to bar.subdomain.example.com", {'operation': 'rename', 'hostname': 'foo.subdomain.example.com', 'newname': 'bar.subdomain.example.com', 'value': '10.188.8.76'}),
("change fooHostOne to fooHostTwo", {'operation': 'change', 'hostname': 'fooHostOne', 'value': 'fooHostTwo'}),
("change foobar to 10.104.92.243", {'operation': 'change', 'hostname': 'foobar', 'value': '10.104.92.243'}),
("change entry foobar to baz", {'operation': 'change', 'hostname': 'foobar', 'value': 'baz'}),
("change record foobar.example.com to blam", {'operation': 'change', 'hostname': 'foobar.example.com', 'value': 'blam'}),
("change name foobar to 192.168.0.139", {'operation': 'change', 'hostname': 'foobar', 'value': '192.168.0.139'}),
("change foobar.example.com to 172.16.132.10", {'operation': 'change', 'hostname': 'foobar.example.com', 'value': '172.16.132.10'}),
("change foobar.hosts.example.com to 172.16.132.10", {'operation': 'change', 'hostname': 'foobar.hosts.example.com', 'value': '172.16.132.10'}),
("change entry foobar.hosts.example.com to 172.16.132.10", {'operation': 'change', 'hostname': 'foobar.hosts.example.com', 'value': '172.16.132.10'}),
("change name foobar to foobar.hosts.example.com", {'operation': 'change', 'hostname': 'foobar', 'value': 'foobar.hosts.example.com'}),
("change name foobar to foobar.example.com", {'operation': 'change', 'hostname': 'foobar', 'value': 'foobar.example.com'}),
("confirm foo.example.com", {'operation': 'confirm', 'hostname': 'foo.example.com'}),
("confirm record foo.example.com", {'operation': 'confirm', 'hostname': 'foo.example.com'}),
("confirm entry foo.example.com", {'operation': 'confirm', 'hostname': 'foo.example.com'}),
("confirm name foo.example.com", {'operation': 'confirm', 'hostname': 'foo.example.com'}),
("confirm 1.2.3.4", None),
("confirm record 1.2.3.4", None),
("confirm entry 1.2.3.4", None),
("confirm name 1.2.3.4", None),
("confirm foo", {'operation': 'confirm', 'hostname': 'foo'}),
("confirm record foo", {'operation': 'confirm', 'hostname': 'foo'}),
("confirm entry foo", {'operation': 'confirm', 'hostname': 'foo'}),
("confirm name foo", {'operation': 'confirm', 'hostname': 'foo'}),
("confirm m.example.com", {'operation': 'confirm', 'hostname': 'm.example.com'}),
("confirm foo.m.example.com", {'operation': 'confirm', 'hostname': 'foo.m.example.com'}),
("confirm m", {'operation': 'confirm', 'hostname': 'm'}),
("confirm m._foo.example.com", {'operation': 'confirm', 'hostname': 'm._foo.example.com'}),
("confirm _bar.example.com", {'operation': 'confirm', 'hostname': '_bar.example.com'}),
("add record _foobar.example.com address 1.2.3.4", {'operation': 'add', 'hostname': '_foobar.example.com', 'value': '1.2.3.4'}),
("add record foobar._discover.example.com target blam", {'operation': 'add', 'hostname': 'foobar._discover.example.com', 'value': 'blam'})
])
def test_parse_should_succeed(self, line, parsed_dict):
foo = None
try:
p = DnstestParser()
foo = p.parse_line(line)
except ParseException:
# assert will fail, no need to do anything here
pass
assert foo == parsed_dict
@pytest.mark.parametrize("line", [
"add extraword record foobar.example.com target blam",
"add foobar value blam extraword",
"remove foo blam",
"rename foobar.example.com to baz.blam.hosts.example.com EXTRAWORD"
"change foobar",
"change foobar to",
"change foobar.hosts.example.com to",
"add m.foo.example.com with target foo.example.com.edgesuite.net.",
])
def test_parse_should_raise_exception(self, line):
with pytest.raises(ParseException):
p = DnstestParser()
p.parse_line(line)
def test_get_grammar(self):
p = DnstestParser()
expected = ['add (record|name|entry)? <hostname_or_fqdn> (with ?)(value|address|target)? <hostname_fqdn_or_ip>',
'remove (record|name|entry)? <hostname_or_fqdn>',
'rename (record|name|entry)? <hostname_or_fqdn> (with ?)(value ?) <value> to <hostname_or_fqdn>',
'change (record|name|entry)? <hostname_or_fqdn> to <hostname_fqdn_or_ip>',
'confirm (record|name|entry)? <hostname_or_fqdn>',
]
result = p.get_grammar()
assert result == expected
|
jantman/pydnstest
|
pydnstest/tests/dnstest_parser_test.py
|
Python
|
agpl-3.0
| 9,750
|
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from .core import (
ANNOTATION_KEY,
FilterValidationError,
OPERATORS,
FilterRegistry,
Filter,
Or,
And,
Not,
ValueFilter,
AgeFilter,
EventFilter,
ReduceFilter,
)
from .config import ConfigCompliance
from .health import HealthEventFilter
from .iamaccess import CrossAccountAccessFilter, PolicyChecker
from .iamanalyzer import AccessAnalyzer
from .metrics import MetricsFilter, ShieldMetrics
from .vpc import DefaultVpcBase
|
thisisshi/cloud-custodian
|
c7n/filters/__init__.py
|
Python
|
apache-2.0
| 548
|
{
'name': 'eCommerce FairMarket',
'category': 'Website',
'summary': 'Modification in website_sale for FairMarket',
'website': 'https://www.odoo.com/page/e-commerce',
'version': '1.0',
'description': """
OpenERP E-Commerce modifications for FairMarket
==================
""",
'author': 'Punto0 - FairCoop',
'depends': ['website_sale','sale'],
'data': [
'views/templates.xml',
],
'demo': [
#'data/demo.xml',
],
#'qweb': ['static/src/xml/*.xml'],
'installable': True,
'application': False,
}
|
Punto0/addons-fm
|
website_sale_fm/__openerp__.py
|
Python
|
agpl-3.0
| 570
|
# Copyright 2012 Patrick Varilly, Stefano Angioletti-Uberti
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#!/usr/bin/env python
# Python script to compare explicit-tether bond probabilities between two
# plates with competing linkages against Monte Carlo results
#
# See examples/competingLinkages for the mean-field analogues
#
# Note: The Monte Carlo results were calculated using the same grafting
# points by Bortolo's Fortran program, given in CompetingData/4p. The
# results are in CompetingData/nX_G, where X = a/b for strong/weak bonds
# (alpha-alpha' vs [alpha-beta' and beta-alpha']) and G is the difference in
# binding strength between strong and weak bonds. Each file has two
# columns: the value of beta_DeltaG0 for strong bonds and the corresponding
# number of X-type bonds formed relative to the maximum possible number of
# weak bonds.
import numpy as np
from math import pi, sqrt
import subprocess
import scipy.interpolate
import dnacc
from dnacc.units import nm
# Basic plate properties
L = 20 * nm
S = 0.75 * sqrt(2.0)
sigma = 1 / (S * L) ** 2
NAlphas = 500
num_tethers = 4 * NAlphas
boxL = sqrt(NAlphas / sigma)
def setup():
global plates, ALPHAS, ALPHA_PS, BETAS, BETA_PS
# Assign grafting points
#grafting_pts_ALPHA = boxL * np.random.random_sample( (NAlphas,2) )
#grafting_pts_ALPHA_P = boxL * np.random.random_sample( (NAlphas,2) )
#grafting_pts_BETA = boxL * np.random.random_sample( (NAlphas,2) )
#grafting_pts_BETA_P = boxL * np.random.random_sample( (NAlphas,2) )
all_grafting_pts = L * np.loadtxt('CompetingData/4p', skiprows=1)
grafting_pts_ALPHA = all_grafting_pts[:NAlphas, 0:2]
grafting_pts_ALPHA_P = all_grafting_pts[:NAlphas, 2:4]
grafting_pts_BETA = all_grafting_pts[NAlphas:2 * NAlphas, 0:2]
grafting_pts_BETA_P = all_grafting_pts[NAlphas:2 * NAlphas, 2:4]
# Set up system
plates = dnacc.Plates(boxL, boxL, periodic=True)
ALPHAS, ALPHA_PS, BETAS, BETA_PS = set(), set(), set(), set()
plates.set_tether_prototype(plate='lower', L=L, sigma=sigma)
for pt in grafting_pts_ALPHA:
ALPHAS.add(plates.add_tether(sticky_end='alpha', pos=pt))
for pt in grafting_pts_BETA:
BETAS.add(plates.add_tether(sticky_end='beta', pos=pt))
plates.set_tether_prototype(plate='upper', L=L, sigma=sigma)
for pt in grafting_pts_ALPHA_P:
ALPHA_PS.add(plates.add_tether(sticky_end='alpha_p', pos=pt))
for pt in grafting_pts_BETA_P:
BETA_PS.add(plates.add_tether(sticky_end='beta_p', pos=pt))
# Set up its initial separation and energy scales (from here on,
# the configurational binding entropies of all pairs of tethers
# are fixed, so its much quicker to change binding energies)
#
# It doesn't matter what beta_DeltaG0 is set to at this stage, other
# than it's something different from infinity for all the pairs
# of strands that may bind during this run
#
print "Initializing configurational entropy factors..."
plates.beta_DeltaG0['alpha', 'alpha_p'] = -10
plates.beta_DeltaG0['alpha', 'beta_p'] = -5
plates.beta_DeltaG0['beta', 'alpha_p'] = -5
plates.separation = L
plates.update()
print "Done"
# For a given difference between strong and weak bond binding strength,
# map out bonding probabilities
def do_it(beta_DeltaDeltaG):
print "Looking at beta_DeltaDeltaG = %g" % beta_DeltaDeltaG
with open('competing_deltaDelta%g.txt' % beta_DeltaDeltaG, 'w') as f:
for beta_DeltaG0Strong in xrange(0, -51, -1):
print " beta_DeltaG0Strong = %g" % beta_DeltaG0Strong
beta_DeltaG0Weak = beta_DeltaG0Strong + beta_DeltaDeltaG
plates.beta_DeltaG0['alpha', 'alpha_p'] = beta_DeltaG0Strong
plates.beta_DeltaG0['alpha', 'beta_p'] = beta_DeltaG0Weak
plates.beta_DeltaG0['beta', 'alpha_p'] = beta_DeltaG0Weak
plates.update(DeltaG0_only=True)
# Count strong and weak bonds
num_strong_bonds = plates.count_bonds(ALPHAS, ALPHA_PS)
num_weak_bonds = (plates.count_bonds(ALPHAS, BETA_PS) +
plates.count_bonds(BETAS, ALPHA_PS))
# Output
f.write('%.2f\t%.3f\t%.3f\n' %
(beta_DeltaG0Strong, num_strong_bonds, num_weak_bonds))
# Main module
setup()
do_it(beta_DeltaDeltaG=3)
do_it(beta_DeltaDeltaG=5)
do_it(beta_DeltaDeltaG=8)
do_it(beta_DeltaDeltaG=11)
do_it(beta_DeltaDeltaG=14)
|
patvarilly/DNACC
|
examples/explicit_competing_linkages/explicit_competing_linkages.py
|
Python
|
gpl-3.0
| 5,092
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('email_campaigns', '0002_auto_20160722_1657'),
]
operations = [
migrations.AlterField(
model_name='campaign',
name='questionnaires',
field=models.ManyToManyField(related_name='campaigns', to='questionnaire.Questionnaire', blank=True),
),
]
|
mpetyx/psymbiosys-questionnaire
|
email_campaigns/migrations/0003_auto_20160907_1146.py
|
Python
|
mit
| 483
|
# -*- coding: utf-8 -*-1
"""Tests of phy spike sorting commands."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
from phy.scripts import main
#------------------------------------------------------------------------------
# Tests
#------------------------------------------------------------------------------
def test_version():
main('-v')
def test_cluster_auto_prm(chdir_tempdir):
main('download hybrid_10sec.dat')
main('download hybrid_10sec.prm')
main('detect hybrid_10sec.prm')
main('cluster-auto hybrid_10sec.prm --channel-group=0')
def test_quick_start(chdir_tempdir):
main('download hybrid_10sec.dat')
main('download hybrid_10sec.prm')
main('spikesort hybrid_10sec.prm')
# TODO: implement auto-close
# main('cluster-manual hybrid_10sec.kwik')
# def test_traces(chdir_tempdir):
# TODO: implement auto-close
# main('download hybrid_10sec.dat')
# main('traces --n-channels=32 --dtype=int16 '
# '--sample-rate=20000 --interval=0,3 hybrid_10sec.dat')
|
nippoo/phy
|
tests/scripts/test_phy_spikesort.py
|
Python
|
bsd-3-clause
| 1,152
|
class Solution:
def exist(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
for i in range(0, len(board)):
if not board:
return False
for j in range(0, len(board[0])):
if self.dfs(i, j, board, word, 0):
return True
return False
def dfs(self, i, j, board, word, index):
if index == len(word):
return True
else:
if i < 0 or i >= len(board) or j < 0 or j >= len(board[0]):
return False
if word[index] == board[i][j]:
tmp = board[i][j]
board[i][j] = '.'
ret = self.dfs(i-1,j,board, word, index+1) or self.dfs(i+1,j,board, word, index+1) or self.dfs(i,j-1,board, word, index+1) or self.dfs(i,j+1,board, word, index+1)
board[i][j] = tmp
return ret
else:
return False
|
MingfeiPan/leetcode
|
backtracking/79.py
|
Python
|
apache-2.0
| 1,039
|
import sqlalchemy
from www import server
from www import login
from common.config import config
import common.rpc
import datetime
import pytz
import flask
import common.storm
from common import googlecalendar
@server.app.route("/api/stormcount")
def stormcount():
return flask.jsonify({
'twitch-subscription': common.storm.get(server.db.engine, server.db.metadata, 'twitch-subscription'),
'twitch-resubscription': common.storm.get(server.db.engine, server.db.metadata, 'twitch-resubscription'),
'twitch-follow': common.storm.get(server.db.engine, server.db.metadata, 'twitch-follow'),
'twitch-cheer': common.storm.get(server.db.engine, server.db.metadata, 'twitch-cheer'),
'patreon-pledge': common.storm.get(server.db.engine, server.db.metadata, 'patreon-pledge'),
})
@server.app.route("/api/next")
async def nextstream():
return await googlecalendar.get_next_event_text(googlecalendar.CALENDAR_LRL, verbose=False)
@server.app.route("/api/show/<show>")
@login.with_minimal_session
async def set_show(session, show):
if not session['user']['is_mod']:
return "%s is not a mod" % (session['user']['display_name'])
if show == "off":
show = ""
await common.rpc.bot.set_show(show)
return ""
@server.app.route("/api/game")
async def get_game():
game_id = await common.rpc.bot.get_game_id()
if game_id is None:
return "-"
show_id = await common.rpc.bot.get_show_id()
games = server.db.metadata.tables["games"]
with server.db.engine.begin() as conn:
return conn.execute(sqlalchemy.select([games.c.name]).where(games.c.id == game_id)).first()[0]
@server.app.route("/api/show")
async def get_show():
show_id = await common.rpc.bot.get_show_id()
shows = server.db.metadata.tables["shows"]
with server.db.engine.begin() as conn:
show, = conn.execute(sqlalchemy.select([shows.c.string_id]).where(shows.c.id == show_id)).first()
return show or "-"
@server.app.route("/api/tweet")
@login.with_minimal_session
async def get_tweet(session):
tweet = None
if session['user']['is_mod']:
tweet = await common.rpc.bot.get_tweet()
return tweet or "-"
@server.app.route("/api/disconnect")
@login.with_minimal_session
async def disconnect(session):
if session['user']['is_mod']:
await common.rpc.bot.disconnect_from_chat()
return flask.jsonify(status="OK")
else:
return flask.jsonify(status="ERR")
CLIP_URL = "https://clips.twitch.tv/{}"
@server.app.route("/api/clips")
@login.with_minimal_session
async def get_clips(session):
if not session['user']['is_mod']:
return flask.jsonify(status="ERR")
days = float(flask.request.values.get('days', 14))
startdt = datetime.datetime.now(pytz.UTC) - datetime.timedelta(days=days)
full = int(flask.request.values.get('full', 0))
clips = server.db.metadata.tables["clips"]
with server.db.engine.begin() as conn:
if full:
clipdata = conn.execute(sqlalchemy.select(
[clips.c.slug, clips.c.title, clips.c.vodid, clips.c.rating])
.where(clips.c.time >= startdt)
.where(clips.c.deleted == False)
.order_by(clips.c.time.asc())).fetchall()
clipdata = [
{
'slug': slug, 'title': title, 'vodid': vodid, 'rating': rating,
'url': CLIP_URL.format(slug),
}
for slug, title, vodid, rating in clipdata
]
return flask.jsonify(clipdata)
else:
clipdata = conn.execute(sqlalchemy.select([clips.c.slug])
.where(clips.c.rating == True)
.where(clips.c.time >= startdt)
.where(clips.c.deleted == False)
.order_by(clips.c.time.asc())).fetchall()
clipdata = "\n".join(CLIP_URL.format(slug) for slug, in clipdata)
return flask.wrappers.Response(clipdata, mimetype="text/plain")
@server.app.route("/api/polls")
async def get_polls():
data = await common.rpc.bot.get_polls()
return flask.jsonify(data)
|
andreasots/lrrbot
|
www/api.py
|
Python
|
apache-2.0
| 3,738
|
# Copyright (c) 2005 The Regents of The University of Michigan
# Copyright (c) 2010 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
# Gabe Black
# metric prefixes
exa = 1.0e18
peta = 1.0e15
tera = 1.0e12
giga = 1.0e9
mega = 1.0e6
kilo = 1.0e3
milli = 1.0e-3
micro = 1.0e-6
nano = 1.0e-9
pico = 1.0e-12
femto = 1.0e-15
atto = 1.0e-18
# power of 2 prefixes
kibi = 1024
mebi = kibi * 1024
gibi = mebi * 1024
tebi = gibi * 1024
pebi = tebi * 1024
exbi = pebi * 1024
# memory size configuration stuff
def toFloat(value):
if not isinstance(value, str):
raise TypeError, "wrong type '%s' should be str" % type(value)
if value.endswith('Ei'):
return float(value[:-2]) * exbi
elif value.endswith('Pi'):
return float(value[:-2]) * pebi
elif value.endswith('Ti'):
return float(value[:-2]) * tebi
elif value.endswith('Gi'):
return float(value[:-2]) * gibi
elif value.endswith('Mi'):
return float(value[:-2]) * mebi
elif value.endswith('ki'):
return float(value[:-2]) * kibi
elif value.endswith('E'):
return float(value[:-1]) * exa
elif value.endswith('P'):
return float(value[:-1]) * peta
elif value.endswith('T'):
return float(value[:-1]) * tera
elif value.endswith('G'):
return float(value[:-1]) * giga
elif value.endswith('M'):
return float(value[:-1]) * mega
elif value.endswith('k'):
return float(value[:-1]) * kilo
elif value.endswith('m'):
return float(value[:-1]) * milli
elif value.endswith('u'):
return float(value[:-1]) * micro
elif value.endswith('n'):
return float(value[:-1]) * nano
elif value.endswith('p'):
return float(value[:-1]) * pico
elif value.endswith('f'):
return float(value[:-1]) * femto
else:
return float(value)
def toInteger(value):
value = toFloat(value)
result = long(value)
if value != result:
raise ValueError, "cannot convert '%s' to integer" % value
return result
_bool_dict = {
'true' : True, 't' : True, 'yes' : True, 'y' : True, '1' : True,
'false' : False, 'f' : False, 'no' : False, 'n' : False, '0' : False
}
def toBool(value):
if not isinstance(value, str):
raise TypeError, "wrong type '%s' should be str" % type(value)
value = value.lower()
result = _bool_dict.get(value, None)
if result == None:
raise ValueError, "cannot convert '%s' to bool" % value
return result
def toFrequency(value):
if not isinstance(value, str):
raise TypeError, "wrong type '%s' should be str" % type(value)
if value.endswith('THz'):
return float(value[:-3]) * tera
elif value.endswith('GHz'):
return float(value[:-3]) * giga
elif value.endswith('MHz'):
return float(value[:-3]) * mega
elif value.endswith('kHz'):
return float(value[:-3]) * kilo
elif value.endswith('Hz'):
return float(value[:-2])
raise ValueError, "cannot convert '%s' to frequency" % value
def toLatency(value):
if not isinstance(value, str):
raise TypeError, "wrong type '%s' should be str" % type(value)
if value.endswith('ps'):
return float(value[:-2]) * pico
elif value.endswith('ns'):
return float(value[:-2]) * nano
elif value.endswith('us'):
return float(value[:-2]) * micro
elif value.endswith('ms'):
return float(value[:-2]) * milli
elif value.endswith('s'):
return float(value[:-1])
raise ValueError, "cannot convert '%s' to latency" % value
def anyToLatency(value):
"""result is a clock period"""
if not isinstance(value, str):
raise TypeError, "wrong type '%s' should be str" % type(value)
try:
val = toFrequency(value)
if val != 0:
val = 1 / val
return val
except ValueError:
pass
try:
val = toLatency(value)
return val
except ValueError:
pass
raise ValueError, "cannot convert '%s' to clock period" % value
def anyToFrequency(value):
"""result is a clock period"""
if not isinstance(value, str):
raise TypeError, "wrong type '%s' should be str" % type(value)
try:
val = toFrequency(value)
return val
except ValueError:
pass
try:
val = toLatency(value)
if val != 0:
val = 1 / val
return val
except ValueError:
pass
raise ValueError, "cannot convert '%s' to clock period" % value
def toNetworkBandwidth(value):
if not isinstance(value, str):
raise TypeError, "wrong type '%s' should be str" % type(value)
if value.endswith('Tbps'):
return float(value[:-4]) * tera
elif value.endswith('Gbps'):
return float(value[:-4]) * giga
elif value.endswith('Mbps'):
return float(value[:-4]) * mega
elif value.endswith('kbps'):
return float(value[:-4]) * kilo
elif value.endswith('bps'):
return float(value[:-3])
else:
return float(value)
raise ValueError, "cannot convert '%s' to network bandwidth" % value
def toMemoryBandwidth(value):
if not isinstance(value, str):
raise TypeError, "wrong type '%s' should be str" % type(value)
if value.endswith('PB/s'):
return float(value[:-4]) * pebi
elif value.endswith('TB/s'):
return float(value[:-4]) * tebi
elif value.endswith('GB/s'):
return float(value[:-4]) * gibi
elif value.endswith('MB/s'):
return float(value[:-4]) * mebi
elif value.endswith('kB/s'):
return float(value[:-4]) * kibi
elif value.endswith('B/s'):
return float(value[:-3])
raise ValueError, "cannot convert '%s' to memory bandwidth" % value
def toMemorySize(value):
if not isinstance(value, str):
raise TypeError, "wrong type '%s' should be str" % type(value)
if value.endswith('PB'):
return long(value[:-2]) * pebi
elif value.endswith('TB'):
return long(value[:-2]) * tebi
elif value.endswith('GB'):
return long(value[:-2]) * gibi
elif value.endswith('MB'):
return long(value[:-2]) * mebi
elif value.endswith('kB'):
return long(value[:-2]) * kibi
elif value.endswith('B'):
return long(value[:-1])
raise ValueError, "cannot convert '%s' to memory size" % value
def toIpAddress(value):
if not isinstance(value, str):
raise TypeError, "wrong type '%s' should be str" % type(value)
bytes = value.split('.')
if len(bytes) != 4:
raise ValueError, 'invalid ip address %s' % value
for byte in bytes:
if not 0 <= int(byte) <= 0xff:
raise ValueError, 'invalid ip address %s' % value
return (int(bytes[0]) << 24) | (int(bytes[1]) << 16) | \
(int(bytes[2]) << 8) | (int(bytes[3]) << 0)
def toIpNetmask(value):
if not isinstance(value, str):
raise TypeError, "wrong type '%s' should be str" % type(value)
(ip, netmask) = value.split('/')
ip = toIpAddress(ip)
netmaskParts = netmask.split('.')
if len(netmaskParts) == 1:
if not 0 <= int(netmask) <= 32:
raise ValueError, 'invalid netmask %s' % netmask
return (ip, int(netmask))
elif len(netmaskParts) == 4:
netmaskNum = toIpAddress(netmask)
if netmaskNum == 0:
return (ip, 0)
testVal = 0
for i in range(32):
testVal |= (1 << (31 - i))
if testVal == netmaskNum:
return (ip, i + 1)
raise ValueError, 'invalid netmask %s' % netmask
else:
raise ValueError, 'invalid netmask %s' % netmask
def toIpWithPort(value):
if not isinstance(value, str):
raise TypeError, "wrong type '%s' should be str" % type(value)
(ip, port) = value.split(':')
ip = toIpAddress(ip)
if not 0 <= int(port) <= 0xffff:
raise ValueError, 'invalid port %s' % port
return (ip, int(port))
def toVoltage(value):
if not isinstance(value, str):
raise TypeError, "wrong type '%s' should be str" % type(value)
if value.endswith('mV'):
return float(value[:-2]) * milli
elif value.endswith('V'):
return float(value[:-1])
raise ValueError, "cannot convert '%s' to voltage" % value
|
bxshi/gem5
|
src/python/m5/util/convert.py
|
Python
|
bsd-3-clause
| 9,855
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core data model classes."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import collections
import copy
import logging
import os
import sys
import time
from config import ConfigProperty
import counters
from counters import PerfCounter
from entities import BaseEntity
import services
import transforms
import appengine_config
from common import caching
from common import utils as common_utils
from google.appengine.api import memcache
from google.appengine.api import namespace_manager
from google.appengine.api import users
from google.appengine.ext import db
# We want to use memcache for both objects that exist and do not exist in the
# datastore. If object exists we cache its instance, if object does not exist
# we cache this object below.
NO_OBJECT = {}
# The default amount of time to cache the items for in memcache.
DEFAULT_CACHE_TTL_SECS = 60 * 5
# https://developers.google.com/appengine/docs/python/memcache/#Python_Limits
MEMCACHE_MAX = (1024 * 1024 - 96 - 250)
MEMCACHE_MULTI_MAX = 32 * 1024 * 1024
# Global memcache controls.
CAN_USE_MEMCACHE = ConfigProperty(
'gcb_can_use_memcache', bool, (
'Whether or not to cache various objects in memcache. For production '
'this value should be on to enable maximum performance. For '
'development this value should be off so you can see your changes to '
'course content instantaneously.'),
appengine_config.PRODUCTION_MODE)
# performance counters
CACHE_PUT = PerfCounter(
'gcb-models-cache-put',
'A number of times an object was put into memcache.')
CACHE_PUT_TOO_BIG = PerfCounter(
'gcb-models-cache-put-too-big',
'Number of times an object was too big to put in memcache.')
CACHE_HIT = PerfCounter(
'gcb-models-cache-hit',
'A number of times an object was found in memcache.')
CACHE_MISS = PerfCounter(
'gcb-models-cache-miss',
'A number of times an object was not found in memcache.')
CACHE_DELETE = PerfCounter(
'gcb-models-cache-delete',
'A number of times an object was deleted from memcache.')
# performance counters for in-process cache
CACHE_PUT_LOCAL = PerfCounter(
'gcb-models-cache-put-local',
'A number of times an object was put into local memcache.')
CACHE_HIT_LOCAL = PerfCounter(
'gcb-models-cache-hit-local',
'A number of times an object was found in local memcache.')
CACHE_MISS_LOCAL = PerfCounter(
'gcb-models-cache-miss-local',
'A number of times an object was not found in local memcache.')
# Intent for sending welcome notifications.
WELCOME_NOTIFICATION_INTENT = 'welcome'
class MemcacheManager(object):
"""Class that consolidates all memcache operations."""
_LOCAL_CACHE = None
_IS_READONLY = False
_READONLY_REENTRY_COUNT = 0
_READONLY_APP_CONTEXT = None
@classmethod
def _is_same_app_context_if_set(cls):
if cls._READONLY_APP_CONTEXT is None:
return True
# pylint: disable-msg=g-import-not-at-top
from controllers import sites
app_context = sites.get_course_for_current_request()
return cls._READONLY_APP_CONTEXT == app_context
@classmethod
def _assert_true_clear_cache_and_raise_if_not(cls, value_to_assert, msg):
if not value_to_assert:
cls.clear_readonly_cache()
raise AssertionError(msg)
@classmethod
def _fs_begin_readonly(cls):
# pylint: disable-msg=g-import-not-at-top
from controllers import sites
cls._READONLY_APP_CONTEXT = sites.get_course_for_current_request()
if cls._READONLY_APP_CONTEXT:
cls._READONLY_APP_CONTEXT.fs.begin_readonly()
@classmethod
def _fs_end_readonly(cls):
if cls._READONLY_APP_CONTEXT:
cls._READONLY_APP_CONTEXT.fs.end_readonly()
cls._READONLY_APP_CONTEXT = None
@classmethod
def begin_readonly(cls):
cls._assert_true_clear_cache_and_raise_if_not(
cls._READONLY_REENTRY_COUNT >= 0, 'Re-entry counter is < 0.')
cls._assert_true_clear_cache_and_raise_if_not(
cls._is_same_app_context_if_set(), 'Unable to switch app_context.')
if cls._READONLY_REENTRY_COUNT == 0:
appengine_config.log_appstats_event(
'MemcacheManager.begin_readonly')
cls._IS_READONLY = True
cls._LOCAL_CACHE = {}
cls._fs_begin_readonly()
cls._READONLY_REENTRY_COUNT += 1
@classmethod
def end_readonly(cls):
cls._assert_true_clear_cache_and_raise_if_not(
cls._READONLY_REENTRY_COUNT > 0, 'Re-entry counter <= 0.')
cls._assert_true_clear_cache_and_raise_if_not(
cls._is_same_app_context_if_set(), 'Unable to switch app_context.')
cls._READONLY_REENTRY_COUNT -= 1
if cls._READONLY_REENTRY_COUNT == 0:
cls._fs_end_readonly()
cls._IS_READONLY = False
cls._LOCAL_CACHE = None
cls._READONLY_APP_CONTEXT = None
appengine_config.log_appstats_event('MemcacheManager.end_readonly')
@classmethod
def clear_readonly_cache(cls):
cls._LOCAL_CACHE = None
cls._IS_READONLY = False
cls._READONLY_REENTRY_COUNT = 0
if cls._READONLY_APP_CONTEXT and (
cls._READONLY_APP_CONTEXT.fs.is_in_readonly):
cls._READONLY_APP_CONTEXT.fs.end_readonly()
cls._READONLY_APP_CONTEXT = None
@classmethod
def _local_cache_get(cls, key, namespace):
if cls._IS_READONLY:
assert cls._is_same_app_context_if_set()
_dict = cls._LOCAL_CACHE.get(namespace)
if not _dict:
_dict = {}
cls._LOCAL_CACHE[namespace] = _dict
if key in _dict:
CACHE_HIT_LOCAL.inc()
value = _dict[key]
return True, value
else:
CACHE_MISS_LOCAL.inc()
return False, None
@classmethod
def _local_cache_put(cls, key, namespace, value):
if cls._IS_READONLY:
assert cls._is_same_app_context_if_set()
_dict = cls._LOCAL_CACHE.get(namespace)
if not _dict:
_dict = {}
cls._LOCAL_CACHE[namespace] = _dict
_dict[key] = value
CACHE_PUT_LOCAL.inc()
@classmethod
def _local_cache_get_multi(cls, keys, namespace):
if cls._IS_READONLY:
assert cls._is_same_app_context_if_set()
values = []
for key in keys:
is_cached, value = cls._local_cache_get(key, namespace)
if not is_cached:
return False, []
else:
values.append(value)
return True, values
return False, []
@classmethod
def _local_cache_put_multi(cls, values, namespace):
if cls._IS_READONLY:
assert cls._is_same_app_context_if_set()
for key, value in values.items():
cls._local_cache_put(key, namespace, value)
@classmethod
def get_namespace(cls):
"""Look up namespace from namespace_manager or use default."""
namespace = namespace_manager.get_namespace()
if namespace:
return namespace
return appengine_config.DEFAULT_NAMESPACE_NAME
@classmethod
def _get_namespace(cls, namespace):
if namespace is not None:
return namespace
return cls.get_namespace()
@classmethod
def get(cls, key, namespace=None):
"""Gets an item from memcache if memcache is enabled."""
if not CAN_USE_MEMCACHE.value:
return None
_namespace = cls._get_namespace(namespace)
is_cached, value = cls._local_cache_get(key, _namespace)
if is_cached:
return value
value = memcache.get(key, namespace=_namespace)
# We store some objects in memcache that don't evaluate to True, but are
# real objects, '{}' for example. Count a cache miss only in a case when
# an object is None.
if value is not None:
CACHE_HIT.inc()
else:
CACHE_MISS.inc(context=key)
cls._local_cache_put(key, _namespace, value)
return value
@classmethod
def get_multi(cls, keys, namespace=None):
"""Gets a set of items from memcache if memcache is enabled."""
if not CAN_USE_MEMCACHE.value:
return {}
_namespace = cls._get_namespace(namespace)
is_cached, values = cls._local_cache_get_multi(keys, _namespace)
if is_cached:
return values
values = memcache.get_multi(keys, namespace=_namespace)
for key, value in values.items():
if value is not None:
CACHE_HIT.inc()
else:
logging.info('Cache miss, key: %s. %s', key, Exception())
CACHE_MISS.inc(context=key)
cls._local_cache_put_multi(values, _namespace)
return values
@classmethod
def set(cls, key, value, ttl=DEFAULT_CACHE_TTL_SECS, namespace=None):
"""Sets an item in memcache if memcache is enabled."""
try:
if CAN_USE_MEMCACHE.value:
size = sys.getsizeof(value)
if size > MEMCACHE_MAX:
CACHE_PUT_TOO_BIG.inc()
else:
CACHE_PUT.inc()
_namespace = cls._get_namespace(namespace)
memcache.set(key, value, ttl, namespace=_namespace)
cls._local_cache_put(key, _namespace, value)
except: # pylint: disable-msg=bare-except
logging.exception(
'Failed to set: %s, %s', key, cls._get_namespace(namespace))
return None
@classmethod
def set_multi(cls, mapping, ttl=DEFAULT_CACHE_TTL_SECS, namespace=None):
"""Sets a dict of items in memcache if memcache is enabled."""
try:
if CAN_USE_MEMCACHE.value:
if not mapping:
return
size = sum([
sys.getsizeof(key) + sys.getsizeof(value)
for key, value in mapping.items()])
if size > MEMCACHE_MULTI_MAX:
CACHE_PUT_TOO_BIG.inc()
else:
CACHE_PUT.inc()
_namespace = cls._get_namespace(namespace)
memcache.set_multi(mapping, time=ttl, namespace=_namespace)
cls._local_cache_put_multi(mapping, _namespace)
except: # pylint: disable-msg=bare-except
logging.exception(
'Failed to set_multi: %s, %s',
mapping, cls._get_namespace(namespace))
return None
@classmethod
def delete(cls, key, namespace=None):
"""Deletes an item from memcache if memcache is enabled."""
assert not cls._IS_READONLY
if CAN_USE_MEMCACHE.value:
CACHE_DELETE.inc()
memcache.delete(key, namespace=cls._get_namespace(namespace))
@classmethod
def delete_multi(cls, key_list, namespace=None):
"""Deletes a list of items from memcache if memcache is enabled."""
assert not cls._IS_READONLY
if CAN_USE_MEMCACHE.value:
CACHE_DELETE.inc(increment=len(key_list))
memcache.delete_multi(
key_list, namespace=cls._get_namespace(namespace))
@classmethod
def incr(cls, key, delta, namespace=None):
"""Incr an item in memcache if memcache is enabled."""
if CAN_USE_MEMCACHE.value:
memcache.incr(
key, delta,
namespace=cls._get_namespace(namespace), initial_value=0)
CAN_AGGREGATE_COUNTERS = ConfigProperty(
'gcb_can_aggregate_counters', bool,
'Whether or not to aggregate and record counter values in memcache. '
'This allows you to see counter values aggregated across all frontend '
'application instances. Without recording, you only see counter values '
'for one frontend instance you are connected to right now. Enabling '
'aggregation improves quality of performance metrics, but adds a small '
'amount of latency to all your requests.',
default_value=False)
def incr_counter_global_value(name, delta):
if CAN_AGGREGATE_COUNTERS.value:
MemcacheManager.incr(
'counter:' + name, delta,
namespace=appengine_config.DEFAULT_NAMESPACE_NAME)
def get_counter_global_value(name):
if CAN_AGGREGATE_COUNTERS.value:
return MemcacheManager.get(
'counter:' + name,
namespace=appengine_config.DEFAULT_NAMESPACE_NAME)
else:
return None
counters.get_counter_global_value = get_counter_global_value
counters.incr_counter_global_value = incr_counter_global_value
# Whether to record tag events in a database.
CAN_SHARE_STUDENT_PROFILE = ConfigProperty(
'gcb_can_share_student_profile', bool, (
'Whether or not to share student profile between different courses.'),
False)
class CollisionError(Exception):
"""Exception raised to show that a collision in a namespace has occurred."""
class ValidationError(Exception):
"""Exception raised to show that a validation failed."""
class ContentChunkEntity(BaseEntity):
"""Defines storage for ContentChunk, a blob of opaque content to display."""
_PROPERTY_EXPORT_BLACKLIST = [] # No PII in ContentChunks.
# A string that gives the type of the content chunk. At the data layer we
# make no restrictions on the values that can be used here -- we only
# require that a type is given. The type here may be independent of any
# notion of Content-Type in an HTTP header.
content_type = db.StringProperty(required=True)
# UTC last modification timestamp.
last_modified = db.DateTimeProperty(auto_now=True, required=True)
# Whether or not the chunk supports custom tags. If True, the renderer may
# be extended to parse and render those tags at display time (this is a stub
# for future functionality that does not exist yet). If False, the contents
# of the chunk will be rendered verbatim.
supports_custom_tags = db.BooleanProperty(default=False)
# Optional identifier for the chunk in the system it was sourced from.
# Format is type_id:resource_id where type_id is an identifier that maps to
# an external system and resource_id is the identifier for a resource within
# that system (e.g. 'drive:1234' or 'web:http://example.com/index.html').
# Exact values are up to the caller, but if either type_id or resource_id is
# given, both must be, they must both be truthy, and type_id cannot contain
# ':'. Max size is 500B, enforced by datastore.
uid = db.StringProperty(indexed=True)
# Payload of the chunk. Max size is 1MB, enforced by datastore.
contents = db.TextProperty()
class ContentChunkDAO(object):
"""Data access object for ContentChunks."""
@classmethod
def delete(cls, entity_id):
"""Deletes ContentChunkEntity for datastore id int; returns None."""
memcache_key = cls._get_memcache_key(entity_id)
entity = ContentChunkEntity.get_by_id(entity_id)
if entity:
db.delete(entity)
MemcacheManager.delete(memcache_key)
@classmethod
def get(cls, entity_id):
"""Gets ContentChunkEntityDTO or None from given datastore id int."""
if entity_id is None:
return
memcache_key = cls._get_memcache_key(entity_id)
found = MemcacheManager.get(memcache_key)
if found == NO_OBJECT:
return None
elif found:
return found
else:
result = None
cache_value = NO_OBJECT
entity = ContentChunkEntity.get_by_id(entity_id)
if entity:
result = cls._make_dto(entity)
cache_value = result
MemcacheManager.set(memcache_key, cache_value)
return result
@classmethod
def get_by_uid(cls, uid):
"""Gets list of DTOs for all entities with given uid string."""
results = ContentChunkEntity.all().filter(
ContentChunkEntity.uid.name, uid
).fetch(1000)
return sorted(
[cls._make_dto(result) for result in results],
key=lambda dto: dto.id)
@classmethod
def make_uid(cls, type_id, resource_id):
"""Makes a uid string (or None) from the given strings (or Nones)."""
if type_id is None and resource_id is None:
return None
assert type_id and resource_id and ':' not in type_id
return '%s:%s' % (type_id, resource_id)
@classmethod
def save(cls, dto):
"""Saves contents of a DTO and returns the key of the saved entity.
Handles both creating new and updating existing entities. If the id of
the passed DTO is found, the entity will be updated.
Note that this method does not refetch the saved entity from the
datastore after put since this is impossible in a transaction. This
means the last_modified date we put in the cache skews from the actual
saved value by however long put took. This is expected datastore
behavior; we do not at present have a use case for perfect accuracy in
this value for our getters.
Args:
dto: ContentChunkDTO. last_modified will be ignored.
Returns:
db.Key of saved ContentChunkEntity.
"""
if dto.id is None:
entity = ContentChunkEntity(content_type=dto.content_type)
else:
entity = ContentChunkEntity.get_by_id(dto.id)
if entity is None:
entity = ContentChunkEntity(content_type=dto.content_type)
entity.contents = dto.contents
entity.supports_custom_tags = dto.supports_custom_tags
entity.uid = cls.make_uid(dto.type_id, dto.resource_id)
entity.put()
MemcacheManager.set(
cls._get_memcache_key(entity.key().id()), cls._make_dto(entity))
return entity.key()
@classmethod
def _get_memcache_key(cls, entity_id):
assert entity_id is not None
return '(%s:%s)' % (ContentChunkEntity.kind(), entity_id)
@classmethod
def _make_dto(cls, entity):
type_id, resource_id = cls._split_uid(entity.uid)
return ContentChunkDTO({
'content_type': entity.content_type,
'contents': entity.contents,
'id': entity.key().id(),
'last_modified': entity.last_modified,
'resource_id': resource_id,
'supports_custom_tags': entity.supports_custom_tags,
'type_id': type_id,
})
@classmethod
def _split_uid(cls, uid):
resource_id = None
type_id = None
if uid is not None:
assert ':' in uid
type_id, resource_id = uid.split(':', 1)
assert type_id and resource_id
return type_id, resource_id
class ContentChunkDTO(object):
"""Data transfer object for ContentChunks."""
def __init__(self, entity_dict):
self.content_type = entity_dict.get('content_type')
self.contents = entity_dict.get('contents')
self.id = entity_dict.get('id')
self.last_modified = entity_dict.get('last_modified')
self.resource_id = entity_dict.get('resource_id')
self.supports_custom_tags = entity_dict.get('supports_custom_tags')
self.type_id = entity_dict.get('type_id')
def __eq__(self, other):
return (
isinstance(other, ContentChunkDTO) and
self.content_type == other.content_type and
self.contents == other.contents and
self.id == other.id and
self.last_modified == other.last_modified and
self.resource_id == other.resource_id and
self.supports_custom_tags == other.supports_custom_tags and
self.type_id == other.type_id)
class PersonalProfile(BaseEntity):
"""Personal information not specific to any course instance."""
email = db.StringProperty(indexed=False)
legal_name = db.StringProperty(indexed=False)
nick_name = db.StringProperty(indexed=False)
date_of_birth = db.DateProperty(indexed=False)
enrollment_info = db.TextProperty()
course_info = db.TextProperty()
_PROPERTY_EXPORT_BLACKLIST = [email, legal_name, nick_name, date_of_birth]
@property
def user_id(self):
return self.key().name()
@classmethod
def safe_key(cls, db_key, transform_fn):
return db.Key.from_path(cls.kind(), transform_fn(db_key.name()))
class PersonalProfileDTO(object):
"""DTO for PersonalProfile."""
def __init__(self, personal_profile=None):
self.enrollment_info = '{}'
self.course_info = '{}'
if personal_profile:
self.user_id = personal_profile.user_id
self.email = personal_profile.email
self.legal_name = personal_profile.legal_name
self.nick_name = personal_profile.nick_name
self.date_of_birth = personal_profile.date_of_birth
self.enrollment_info = personal_profile.enrollment_info
self.course_info = personal_profile.course_info
class StudentProfileDAO(object):
"""All access and mutation methods for PersonalProfile and Student."""
TARGET_NAMESPACE = appengine_config.DEFAULT_NAMESPACE_NAME
@classmethod
def _memcache_key(cls, key):
"""Makes a memcache key from primary key."""
return 'entity:personal-profile:%s' % key
@classmethod
def _get_profile_by_user_id(cls, user_id):
"""Loads profile given a user_id and returns Entity object."""
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(cls.TARGET_NAMESPACE)
profile = MemcacheManager.get(
cls._memcache_key(user_id), namespace=cls.TARGET_NAMESPACE)
if profile == NO_OBJECT:
return None
if profile:
return profile
profile = PersonalProfile.get_by_key_name(user_id)
MemcacheManager.set(
cls._memcache_key(user_id), profile if profile else NO_OBJECT,
namespace=cls.TARGET_NAMESPACE)
return profile
finally:
namespace_manager.set_namespace(old_namespace)
@classmethod
def _add_new_profile(cls, user_id, email):
"""Adds new profile for a user_id and returns Entity object."""
if not CAN_SHARE_STUDENT_PROFILE.value:
return None
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(cls.TARGET_NAMESPACE)
profile = PersonalProfile(key_name=user_id)
profile.email = email
profile.enrollment_info = '{}'
profile.put()
return profile
finally:
namespace_manager.set_namespace(old_namespace)
@classmethod
def _update_global_profile_attributes(
cls, profile,
email=None, legal_name=None, nick_name=None,
date_of_birth=None, is_enrolled=None, final_grade=None,
course_info=None):
"""Modifies various attributes of Student's Global Profile."""
# TODO(psimakov): update of email does not work for student
if email is not None:
profile.email = email
if legal_name is not None:
profile.legal_name = legal_name
if nick_name is not None:
profile.nick_name = nick_name
if date_of_birth is not None:
profile.date_of_birth = date_of_birth
if not (is_enrolled is None and final_grade is None and
course_info is None):
# Defer to avoid circular import.
# pylint: disable-msg=g-import-not-at-top
from controllers import sites
course = sites.get_course_for_current_request()
course_namespace = course.get_namespace_name()
if is_enrolled is not None:
enrollment_dict = transforms.loads(profile.enrollment_info)
enrollment_dict[course_namespace] = is_enrolled
profile.enrollment_info = transforms.dumps(enrollment_dict)
if final_grade is not None or course_info is not None:
course_info_dict = {}
if profile.course_info:
course_info_dict = transforms.loads(profile.course_info)
if course_namespace in course_info_dict.keys():
info = course_info_dict[course_namespace]
else:
info = {}
if final_grade:
info['final_grade'] = final_grade
if course_info:
info['info'] = course_info
course_info_dict[course_namespace] = info
profile.course_info = transforms.dumps(course_info_dict)
@classmethod
def _update_course_profile_attributes(
cls, student, nick_name=None, is_enrolled=None, labels=None):
"""Modifies various attributes of Student's Course Profile."""
if nick_name is not None:
student.name = nick_name
if is_enrolled is not None:
student.is_enrolled = is_enrolled
if labels is not None:
student.labels = labels
@classmethod
def _update_attributes(
cls, profile, student,
email=None, legal_name=None, nick_name=None,
date_of_birth=None, is_enrolled=None, final_grade=None,
course_info=None, labels=None):
"""Modifies various attributes of Student and Profile."""
if profile:
cls._update_global_profile_attributes(
profile, email=email, legal_name=legal_name,
nick_name=nick_name, date_of_birth=date_of_birth,
is_enrolled=is_enrolled, final_grade=final_grade,
course_info=course_info)
if student:
cls._update_course_profile_attributes(
student, nick_name=nick_name, is_enrolled=is_enrolled,
labels=labels)
@classmethod
def _put_profile(cls, profile):
"""Does a put() on profile objects."""
if not profile:
return
profile.put()
MemcacheManager.delete(
cls._memcache_key(profile.user_id),
namespace=cls.TARGET_NAMESPACE)
@classmethod
def get_profile_by_user_id(cls, user_id):
"""Loads profile given a user_id and returns DTO object."""
profile = cls._get_profile_by_user_id(user_id)
if profile:
return PersonalProfileDTO(personal_profile=profile)
return None
@classmethod
def add_new_profile(cls, user_id, email):
return cls._add_new_profile(user_id, email)
@classmethod
def add_new_student_for_current_user(
cls, nick_name, additional_fields, handler, labels=None, mock_user=None):
# UM_CODE_START
if mock_user:
user = mock_user
else:
# UM_CODE_END
user = users.get_current_user()
student_by_uid = Student.get_student_by_user_id(user.user_id())
is_valid_student = (student_by_uid is None or
student_by_uid.user_id == user.user_id())
assert is_valid_student, (
'Student\'s email and user id do not match.')
# UM_CODE_START
student = cls._add_new_student_for_current_user(
user.user_id(), user.email(), nick_name, additional_fields, labels)
# UM_CODE_END
try:
cls._send_welcome_notification(handler, user.email())
except Exception, e: # On purpose. pylint: disable-msg=broad-except
logging.error(
'Unable to send welcome notification; error was: ' + str(e))
# UM_CODE_START
return student
# UM_CODE_END
@classmethod
@db.transactional(xg=True)
def _add_new_student_for_current_user(
cls, user_id, email, nick_name, additional_fields, labels=None):
"""Create new or re-enroll old student."""
# create profile if does not exist
profile = cls._get_profile_by_user_id(user_id)
if not profile:
profile = cls._add_new_profile(user_id, email)
# create new student or re-enroll existing
student = Student.get_by_email(email)
if not student:
# TODO(psimakov): we must move to user_id as a key
student = Student(key_name=email)
# update profile
cls._update_attributes(
profile, student, nick_name=nick_name, is_enrolled=True,
labels=labels)
# update student
student.user_id = user_id
student.additional_fields = additional_fields
# put both
cls._put_profile(profile)
student.put()
# UM_CODE_START
return student
# UM_CODE_END
@classmethod
def _send_welcome_notification(cls, handler, email):
if not cls._can_send_welcome_notifications(handler):
return
if services.unsubscribe.has_unsubscribed(email):
return
# Imports don't resolve at top.
# pylint: disable-msg=g-import-not-at-top
from controllers import sites
context = sites.get_course_for_current_request()
course_title = handler.app_context.get_environ()['course']['title']
sender = cls._get_welcome_notifications_sender(handler)
assert sender, 'Must set welcome_notifications_sender in course.yaml'
subject = 'Welcome to ' + course_title
context = {
'course_title': course_title,
'course_url': handler.get_base_href(handler),
'unsubscribe_url': services.unsubscribe.get_unsubscribe_url(
handler, email)
}
jinja_environment = handler.app_context.fs.get_jinja_environ(
[os.path.join(
appengine_config.BUNDLE_ROOT, 'views', 'notifications')],
autoescape=False)
template = jinja_environment.get_template('welcome.txt')
services.notifications.send_async(
email, sender, WELCOME_NOTIFICATION_INTENT,
template.render(context), subject, audit_trail=context,
)
@classmethod
def _can_send_welcome_notifications(cls, handler):
return (
services.notifications.enabled() and services.unsubscribe.enabled()
and cls._get_send_welcome_notifications(handler))
@classmethod
def _get_send_welcome_notifications(cls, handler):
return handler.app_context.get_environ().get(
'course', {}
).get('send_welcome_notifications', False)
@classmethod
def _get_welcome_notifications_sender(cls, handler):
return handler.app_context.get_environ().get(
'course', {}
).get('welcome_notifications_sender')
@classmethod
def get_enrolled_student_by_email_for(cls, email, app_context):
"""Returns student for a specific course."""
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(app_context.get_namespace_name())
return Student.get_enrolled_student_by_email(email)
finally:
namespace_manager.set_namespace(old_namespace)
@classmethod
@db.transactional(xg=True)
def update(
cls, user_id, email, legal_name=None, nick_name=None,
date_of_birth=None, is_enrolled=None, final_grade=None,
course_info=None, labels=None, profile_only=False):
"""Updates a student and/or their global profile."""
student = None
if not profile_only:
student = Student.get_by_email(email)
if not student:
raise Exception('Unable to find student for: %s' % user_id)
profile = cls._get_profile_by_user_id(user_id)
if not profile:
profile = cls.add_new_profile(user_id, email)
cls._update_attributes(
profile, student, email=email, legal_name=legal_name,
nick_name=nick_name, date_of_birth=date_of_birth,
is_enrolled=is_enrolled, final_grade=final_grade,
course_info=course_info, labels=labels)
cls._put_profile(profile)
if not profile_only:
student.put()
class Student(BaseEntity):
"""Student data specific to a course instance."""
enrolled_on = db.DateTimeProperty(auto_now_add=True, indexed=True)
user_id = db.StringProperty(indexed=True)
name = db.StringProperty(indexed=False)
additional_fields = db.TextProperty(indexed=False)
is_enrolled = db.BooleanProperty(indexed=False)
# Each of the following is a string representation of a JSON dict.
scores = db.TextProperty(indexed=False)
labels = db.StringProperty(indexed=False)
_PROPERTY_EXPORT_BLACKLIST = [additional_fields, name]
@classmethod
def safe_key(cls, db_key, transform_fn):
return db.Key.from_path(cls.kind(), transform_fn(db_key.id_or_name()))
def for_export(self, transform_fn):
"""Creates an ExportEntity populated from this entity instance."""
assert not hasattr(self, 'key_by_user_id')
model = super(Student, self).for_export(transform_fn)
model.user_id = transform_fn(self.user_id)
# Add a version of the key that always uses the user_id for the name
# component. This can be used to establish relationships between objects
# where the student key used was created via get_key(). In general,
# this means clients will join exports on this field, not the field made
# from safe_key().
model.key_by_user_id = self.get_key(transform_fn=transform_fn)
return model
@property
def is_transient(self):
return False
@property
def email(self):
return self.key().name()
@property
def profile(self):
return StudentProfileDAO.get_profile_by_user_id(self.user_id)
@classmethod
def _memcache_key(cls, key):
"""Makes a memcache key from primary key."""
return 'entity:student:%s' % key
def put(self):
"""Do the normal put() and also add the object to memcache."""
result = super(Student, self).put()
MemcacheManager.set(self._memcache_key(self.key().name()), self)
return result
def delete(self):
"""Do the normal delete() and also remove the object from memcache."""
super(Student, self).delete()
MemcacheManager.delete(self._memcache_key(self.key().name()))
@classmethod
def add_new_student_for_current_user(
cls, nick_name, additional_fields, handler, labels=None):
return StudentProfileDAO.add_new_student_for_current_user(
nick_name, additional_fields, handler, labels)
@classmethod
def get_by_email(cls, email):
return Student.get_by_key_name(email.encode('utf8'))
@classmethod
def get_enrolled_student_by_email(cls, email):
"""Returns enrolled student or None."""
student = MemcacheManager.get(cls._memcache_key(email))
if NO_OBJECT == student:
return None
if not student:
student = Student.get_by_email(email)
if student:
MemcacheManager.set(cls._memcache_key(email), student)
else:
MemcacheManager.set(cls._memcache_key(email), NO_OBJECT)
if student and student.is_enrolled:
return student
else:
return None
@classmethod
def _get_user_and_student(cls):
"""Loads user and student and asserts both are present."""
user = users.get_current_user()
if not user:
raise Exception('No current user.')
student = Student.get_by_email(user.email())
if not student:
raise Exception('Student instance corresponding to user %s not '
'found.' % user.email())
return user, student
@classmethod
def rename_current(cls, new_name):
"""Gives student a new name."""
_, student = cls._get_user_and_student()
StudentProfileDAO.update(
student.user_id, student.email, nick_name=new_name)
@classmethod
def set_enrollment_status_for_current(cls, is_enrolled):
"""Changes student enrollment status."""
_, student = cls._get_user_and_student()
StudentProfileDAO.update(
student.user_id, student.email, is_enrolled=is_enrolled)
@classmethod
def set_labels_for_current(cls, labels):
"""Set labels for tracks on the student."""
_, student = cls._get_user_and_student()
StudentProfileDAO.update(
student.user_id, student.email, labels=labels)
def get_key(self, transform_fn=None):
"""Gets a version of the key that uses user_id for the key name."""
if not self.user_id:
raise Exception('Student instance has no user_id set.')
user_id = transform_fn(self.user_id) if transform_fn else self.user_id
return db.Key.from_path(Student.kind(), user_id)
@classmethod
def get_student_by_user_id(cls, user_id):
students = cls.all().filter(cls.user_id.name, user_id).fetch(limit=2)
if len(students) == 2:
raise Exception(
'There is more than one student with user_id %s' % user_id)
return students[0] if students else None
def has_same_key_as(self, key):
"""Checks if the key of the student and the given key are equal."""
return key == self.get_key()
def get_labels_of_type(self, label_type):
if not self.labels:
return set()
label_ids = LabelDAO.get_set_of_ids_of_type(label_type)
return set([int(label) for label in
common_utils.text_to_list(self.labels)
if int(label) in label_ids])
class TransientStudent(object):
"""A transient student (i.e. a user who hasn't logged in or registered)."""
@property
def is_transient(self):
return True
@property
def is_enrolled(self):
return False
class EventEntity(BaseEntity):
"""Generic events.
Each event has a 'source' that defines a place in a code where the event was
recorded. Each event has a 'user_id' to represent an actor who triggered
the event. The event 'data' is a JSON object, the format of which is defined
elsewhere and depends on the type of the event.
"""
recorded_on = db.DateTimeProperty(auto_now_add=True, indexed=True)
source = db.StringProperty(indexed=False)
user_id = db.StringProperty(indexed=False)
# Each of the following is a string representation of a JSON dict.
data = db.TextProperty(indexed=False)
@classmethod
def record(cls, source, user, data):
"""Records new event into a datastore."""
event = EventEntity()
event.source = source
event.user_id = user.user_id()
event.data = data
event.put()
def for_export(self, transform_fn):
model = super(EventEntity, self).for_export(transform_fn)
model.user_id = transform_fn(self.user_id)
return model
class StudentAnswersEntity(BaseEntity):
"""Student answers to the assessments."""
updated_on = db.DateTimeProperty(indexed=True)
# Each of the following is a string representation of a JSON dict.
data = db.TextProperty(indexed=False)
@classmethod
def safe_key(cls, db_key, transform_fn):
return db.Key.from_path(cls.kind(), transform_fn(db_key.id_or_name()))
class StudentPropertyEntity(BaseEntity):
"""A property of a student, keyed by the string STUDENT_ID-PROPERTY_NAME."""
updated_on = db.DateTimeProperty(indexed=True)
name = db.StringProperty()
# Each of the following is a string representation of a JSON dict.
value = db.TextProperty()
@classmethod
def _memcache_key(cls, key):
"""Makes a memcache key from primary key."""
return 'entity:student_property:%s' % key
@classmethod
def create_key(cls, student_id, property_name):
return '%s-%s' % (student_id, property_name)
@classmethod
def create(cls, student, property_name):
return cls(
key_name=cls.create_key(student.user_id, property_name),
name=property_name)
@classmethod
def safe_key(cls, db_key, transform_fn):
user_id, name = db_key.name().split('-', 1)
return db.Key.from_path(
cls.kind(), '%s-%s' % (transform_fn(user_id), name))
def put(self):
"""Do the normal put() and also add the object to memcache."""
result = super(StudentPropertyEntity, self).put()
MemcacheManager.set(self._memcache_key(self.key().name()), self)
return result
def delete(self):
"""Do the normal delete() and also remove the object from memcache."""
super(Student, self).delete()
MemcacheManager.delete(self._memcache_key(self.key().name()))
@classmethod
def get(cls, student, property_name):
"""Loads student property."""
key = cls.create_key(student.user_id, property_name)
value = MemcacheManager.get(cls._memcache_key(key))
if NO_OBJECT == value:
return None
if not value:
value = cls.get_by_key_name(key)
if value:
MemcacheManager.set(cls._memcache_key(key), value)
else:
MemcacheManager.set(cls._memcache_key(key), NO_OBJECT)
return value
class BaseJsonDao(object):
"""Base DAO class for entities storing their data in a single JSON blob."""
class EntityKeyTypeId(object):
@classmethod
def get_entity_by_key(cls, entity_class, key):
return entity_class.get_by_id(int(key))
@classmethod
def new_entity(cls, entity_class, unused_key):
return entity_class() # ID auto-generated when entity is put().
class EntityKeyTypeName(object):
@classmethod
def get_entity_by_key(cls, entity_class, key):
return entity_class.get_by_key_name(key)
@classmethod
def new_entity(cls, entity_class, key_name):
return entity_class(key_name=key_name)
@classmethod
def _memcache_key(cls, obj_id):
"""Makes a memcache key from datastore id."""
# Keeping case-sensitivity in kind() because Foo(object) != foo(object).
return '(entity:%s:%s)' % (cls.ENTITY.kind(), obj_id)
@classmethod
def _memcache_all_key(cls):
"""Makes a memcache key for caching get_all()."""
# Keeping case-sensitivity in kind() because Foo(object) != foo(object).
return '(entity-get-all:%s)' % cls.ENTITY.kind()
@classmethod
def get_all_mapped(cls):
# try to get from memcache
entities = MemcacheManager.get(cls._memcache_all_key())
if entities is not None and entities != NO_OBJECT:
return entities
# get from datastore
result = {dto.id: dto for dto in cls.get_all_iter()}
# put into memcache
result_to_cache = NO_OBJECT
if result:
result_to_cache = result
MemcacheManager.set(cls._memcache_all_key(), result_to_cache)
return result
@classmethod
def get_all(cls):
return cls.get_all_mapped().values()
@classmethod
def get_all_iter(cls):
"""Return a generator that will produce all DTOs of a given type.
Yields:
A DTO for each row in the Entity type's table.
"""
prev_cursor = None
any_records = True
while any_records:
any_records = False
query = cls.ENTITY.all().with_cursor(prev_cursor)
for entity in query.run():
any_records = True
yield cls.DTO(entity.key().id_or_name(),
transforms.loads(entity.data))
prev_cursor = query.cursor()
@classmethod
def _maybe_apply_post_load_hooks(cls, dto_list):
"""Run any post-load processing hooks.
Modules may insert post-load processing hooks (e.g. for i18n
translation) into the list POST_LOAD_HOOKS defined on the DAO class.
If the class has this list and any hook functions are present, they
are passed the list of DTO's for in-place processing.
Args:
dto_list: list of DTO objects
"""
if hasattr(cls, 'POST_LOAD_HOOKS'):
for hook in cls.POST_LOAD_HOOKS:
hook(dto_list)
@classmethod
def _maybe_apply_post_save_hooks(cls, dto_and_id_list):
"""Run any post-save processing hooks.
Modules may insert post-save processing hooks (e.g. for i18n
translation) into the list POST_SAVE_HOOKS defined on the DAO class.
If the class has this list and any hook functions are present, they
are passed the list of DTO's for in-place processing.
Args:
dto_and_id_list: list of pairs of (id, DTO) objects
"""
dto_list = [
cls.DTO(dto_id, orig_dto.dict)
for dto_id, orig_dto in dto_and_id_list]
if hasattr(cls, 'POST_SAVE_HOOKS'):
common_utils.run_hooks(cls.POST_SAVE_HOOKS, dto_list)
@classmethod
def _load_entity(cls, obj_id):
if not obj_id:
return None
memcache_key = cls._memcache_key(obj_id)
entity = MemcacheManager.get(memcache_key)
if NO_OBJECT == entity:
return None
if not entity:
entity = cls.ENTITY_KEY_TYPE.get_entity_by_key(cls.ENTITY, obj_id)
if entity:
MemcacheManager.set(memcache_key, entity)
else:
MemcacheManager.set(memcache_key, NO_OBJECT)
return entity
@classmethod
def load(cls, obj_id):
entity = cls._load_entity(obj_id)
if entity:
dto = cls.DTO(obj_id, transforms.loads(entity.data))
cls._maybe_apply_post_load_hooks([dto])
return dto
else:
return None
@classmethod
@appengine_config.timeandlog('Models.bulk_load')
def bulk_load(cls, obj_id_list):
# fetch from memcache
memcache_keys = [cls._memcache_key(obj_id) for obj_id in obj_id_list]
memcache_entities = MemcacheManager.get_multi(memcache_keys)
# fetch missing from datastore
both_keys = zip(obj_id_list, memcache_keys)
datastore_keys = [
obj_id for obj_id, memcache_key in both_keys
if memcache_key not in memcache_entities]
if datastore_keys:
datastore_entities = dict(zip(
datastore_keys, db.get([
db.Key.from_path(cls.ENTITY.kind(), obj_id)
for obj_id in datastore_keys])))
else:
datastore_entities = {}
# weave the results together
ret = []
memcache_update = {}
dtos_for_post_hooks = []
for obj_id, memcache_key in both_keys:
entity = datastore_entities.get(obj_id)
if entity is not None:
dto = cls.DTO(obj_id, transforms.loads(entity.data))
ret.append(dto)
dtos_for_post_hooks.append(dto)
memcache_update[memcache_key] = entity
elif memcache_key not in memcache_entities:
ret.append(None)
memcache_update[memcache_key] = NO_OBJECT
else:
entity = memcache_entities[memcache_key]
if NO_OBJECT == entity:
ret.append(None)
else:
ret.append(cls.DTO(obj_id, transforms.loads(entity.data)))
# run hooks
cls._maybe_apply_post_load_hooks(dtos_for_post_hooks)
# put into memcache
if datastore_entities:
MemcacheManager.set_multi(memcache_update)
return ret
@classmethod
def _create_if_necessary(cls, dto):
entity = cls._load_entity(dto.id)
if not entity:
entity = cls.ENTITY_KEY_TYPE.new_entity(cls.ENTITY, dto.id)
entity.data = transforms.dumps(dto.dict)
return entity
@classmethod
def before_put(cls, dto, entity):
pass
@classmethod
def save(cls, dto):
entity = cls._create_if_necessary(dto)
cls.before_put(dto, entity)
entity.put()
MemcacheManager.delete(cls._memcache_all_key())
id_or_name = entity.key().id_or_name()
MemcacheManager.set(cls._memcache_key(id_or_name), entity)
cls._maybe_apply_post_save_hooks([(id_or_name, dto)])
return id_or_name
@classmethod
def save_all(cls, dtos):
"""Performs a block persist of a list of DTO's."""
entities = []
for dto in dtos:
entity = cls._create_if_necessary(dto)
entities.append(entity)
cls.before_put(dto, entity)
keys = db.put(entities)
MemcacheManager.delete(cls._memcache_all_key())
for key, entity in zip(keys, entities):
MemcacheManager.set(cls._memcache_key(key.id_or_name()), entity)
id_or_name_list = [key.id_or_name() for key in keys]
cls._maybe_apply_post_save_hooks(zip(id_or_name_list, dtos))
return id_or_name_list
@classmethod
def delete(cls, dto):
entity = cls._load_entity(dto.id)
entity.delete()
MemcacheManager.delete(cls._memcache_all_key())
MemcacheManager.delete(cls._memcache_key(entity.key().id_or_name()))
@classmethod
def clone(cls, dto):
return cls.DTO(None, copy.deepcopy(dto.dict))
class LastModfiedJsonDao(BaseJsonDao):
"""Base DAO that updates the last_modified field of entities on every save.
DTOs managed by this DAO must have a settable field last_modified defined.
"""
@classmethod
def save(cls, dto):
dto.last_modified = time.time()
return super(LastModfiedJsonDao, cls).save(dto)
@classmethod
def save_all(cls, dtos):
for dto in dtos:
dto.last_modified = time.time()
return super(LastModfiedJsonDao, cls).save_all(dtos)
class QuestionEntity(BaseEntity):
"""An object representing a top-level question."""
data = db.TextProperty(indexed=False)
class QuestionDTO(object):
"""DTO for question entities."""
MULTIPLE_CHOICE = 0
SHORT_ANSWER = 1
def __init__(self, the_id, the_dict):
self.id = the_id
self.dict = the_dict
@property
def type(self):
return self.dict.get('type')
@type.setter
def type(self, value):
self.dict['type'] = value
@property
def description(self):
return self.dict.get('description') or ''
@description.setter
def description(self, value):
self.dict['description'] = value
@property
def last_modified(self):
return self.dict.get('last_modified') or ''
@last_modified.setter
def last_modified(self, value):
self.dict['last_modified'] = value
class QuestionDAO(LastModfiedJsonDao):
VERSION = '1.5'
DTO = QuestionDTO
ENTITY = QuestionEntity
ENTITY_KEY_TYPE = BaseJsonDao.EntityKeyTypeId
# Enable other modules to add post-load transformations
POST_LOAD_HOOKS = []
# Enable other modules to add post-save transformations
POST_SAVE_HOOKS = []
@classmethod
def used_by(cls, question_id):
"""Returns the question groups using a question.
Args:
question_id: int. Identifier of the question we're testing.
Returns:
List of question groups. The list of all question groups that use
the given question.
"""
# O(num_question_groups), but deserialization of 1 large group takes
# ~1ms so practically speaking latency is OK for the admin console.
matches = []
for group in QuestionGroupDAO.get_all():
# Add the group the same amount of times as it contains the question
matches.extend([group] * (
[long(x) for x in group.question_ids].count(long(question_id))
))
return matches
@classmethod
def create_question(cls, question_dict, question_type):
question = cls.DTO(None, question_dict)
question.type = question_type
return cls.save(question)
@classmethod
def get_questions_descriptions(cls):
return set([q.description for q in cls.get_all()])
@classmethod
def validate_unique_description(cls, description):
if description in cls.get_questions_descriptions():
raise CollisionError(
'Non-unique question description: %s' % description)
return None
class QuestionImporter(object):
"""Helper class for converting ver. 1.2 questoins to ver. 1.3 ones."""
@classmethod
def _gen_description(cls, unit, lesson_title, question_number):
return (
'Imported from unit "%s", lesson "%s" (question #%s)' % (
unit.title, lesson_title, question_number))
@classmethod
def import_freetext(cls, question, description, task):
QuestionDAO.validate_unique_description(description)
try:
response = question.get('correctAnswerRegex')
response = response.value if response else None
return {
'version': QuestionDAO.VERSION,
'description': description,
'question': task,
'hint': question['showAnswerOutput'],
'graders': [{
'score': 1.0,
'matcher': 'regex',
'response': response,
'feedback': question.get('correctAnswerOutput', '')
}],
'defaultFeedback': question.get('incorrectAnswerOutput', '')}
except KeyError as e:
raise ValidationError('Invalid question: %s, %s' % (description, e))
@classmethod
def import_question(
cls, question, unit, lesson_title, question_number, task):
question_type = question['questionType']
task = ''.join(task)
description = cls._gen_description(unit, lesson_title, question_number)
if question_type == 'multiple choice':
question_dict = cls.import_multiple_choice(
question, description, task)
qid = QuestionDAO.create_question(
question_dict, QuestionDAO.DTO.MULTIPLE_CHOICE)
elif question_type == 'freetext':
question_dict = cls.import_freetext(question, description, task)
qid = QuestionDAO.create_question(
question_dict, QuestionDTO.SHORT_ANSWER)
elif question_type == 'multiple choice group':
question_group_dict = cls.import_multiple_choice_group(
question, description, unit, lesson_title, question_number,
task)
qid = QuestionGroupDAO.create_question_group(question_group_dict)
else:
raise ValueError('Unknown question type: %s' % question_type)
return (qid, common_utils.generate_instance_id())
@classmethod
def import_multiple_choice(cls, question, description, task):
QuestionDAO.validate_unique_description(description)
task = ''.join(task) if task else ''
return {
'version': QuestionDAO.VERSION,
'description': description,
'question': task,
'multiple_selections': False,
'choices': [
{
'text': choice[0],
'score': 1.0 if choice[1].value else 0.0,
'feedback': choice[2]
} for choice in question['choices']]}
@classmethod
def import_multiple_choice_group(
cls, group, description, unit, lesson_title, question_number, task):
"""Import a 'multiple choice group' as a question group."""
QuestionGroupDAO.validate_unique_description(description)
question_group_dict = {
'version': QuestionDAO.VERSION,
'description': description,
'introduction': task}
question_list = []
for index, question in enumerate(group['questionsList']):
description = (
'Imported from unit "%s", lesson "%s" (question #%s, part #%s)'
% (unit.title, lesson_title, question_number, index + 1))
question_dict = cls.import_multiple_choice_group_question(
question, description)
question = QuestionDTO(None, question_dict)
question.type = QuestionDTO.MULTIPLE_CHOICE
question_list.append(question)
qid_list = QuestionDAO.save_all(question_list)
question_group_dict['items'] = [{
'question': str(quid),
'weight': 1.0} for quid in qid_list]
return question_group_dict
@classmethod
def import_multiple_choice_group_question(cls, orig_question, description):
"""Import the questions from a group as individual questions."""
QuestionDAO.validate_unique_description(description)
# TODO(jorr): Handle allCorrectOutput and someCorrectOutput
correct_index = orig_question['correctIndex']
multiple_selections = not isinstance(correct_index, int)
if multiple_selections:
partial = 1.0 / len(correct_index)
choices = [{
'text': text,
'score': partial if i in correct_index else -1.0
} for i, text in enumerate(orig_question['choices'])]
else:
choices = [{
'text': text,
'score': 1.0 if i == correct_index else 0.0
} for i, text in enumerate(orig_question['choices'])]
return {
'version': QuestionDAO.VERSION,
'description': description,
'question': orig_question.get('questionHTML') or '',
'multiple_selections': multiple_selections,
'choices': choices}
@classmethod
def build_short_answer_question_dict(cls, question_html, matcher, response):
return {
'version': QuestionDAO.VERSION,
'question': question_html or '',
'graders': [{
'score': 1.0,
'matcher': matcher,
'response': response,
}]
}
@classmethod
def build_multiple_choice_question_dict(cls, question):
"""Assemble the dict for a multiple choice question."""
question_dict = {
'version': QuestionDAO.VERSION,
'question': question.get('questionHTML') or '',
'multiple_selections': False
}
choices = []
for choice in question.get('choices'):
if isinstance(choice, basestring):
text = choice
score = 0.0
else:
text = choice.value
score = 1.0
choices.append({
'text': text,
'score': score
})
question_dict['choices'] = choices
return question_dict
@classmethod
def import_assessment_question(cls, question):
if 'questionHTML' in question:
question['questionHTML'] = question['questionHTML'].decode(
'string-escape')
# Convert a single question into a QuestioDTO.
if 'choices' in question:
q_dict = cls.build_multiple_choice_question_dict(
question)
question_type = QuestionDTO.MULTIPLE_CHOICE
elif 'correctAnswerNumeric' in question:
q_dict = cls.build_short_answer_question_dict(
question.get('questionHTML'),
'numeric',
question.get('correctAnswerNumeric'))
question_type = QuestionDTO.SHORT_ANSWER
elif 'correctAnswerString' in question:
q_dict = cls.build_short_answer_question_dict(
question.get('questionHTML'),
'case_insensitive',
question.get('correctAnswerString'))
question_type = QuestionDTO.SHORT_ANSWER
elif 'correctAnswerRegex' in question:
q_dict = cls.build_short_answer_question_dict(
question.get('questionHTML'),
'regex',
question.get('correctAnswerRegex').value)
question_type = QuestionDTO.SHORT_ANSWER
else:
raise ValueError('Unknown question type')
question_dto = QuestionDTO(None, q_dict)
question_dto.type = question_type
return question_dto
@classmethod
def build_question_dtos(cls, assessment_dict, template, unit, errors):
"""Convert the assessment into a list of QuestionDTO's."""
descriptions = QuestionDAO.get_questions_descriptions()
question_dtos = []
try:
for i, q in enumerate(assessment_dict['questionsList']):
description = template % (unit.title, (i + 1))
if description in descriptions:
raise CollisionError(
'Non-unique question description: %s' % description)
question_dto = cls.import_assessment_question(q)
question_dto.dict['description'] = description
question_dtos.append(question_dto)
except CollisionError:
errors.append(
'This assessment has already been imported. Remove '
'duplicate questions from the question bank in '
'order to re-import: %s.' % description)
return None
except Exception as ex:
errors.append('Unable to convert: %s' % ex)
return None
return question_dtos
class SaQuestionConstants(object):
DEFAULT_WIDTH_COLUMNS = 100
DEFAULT_HEIGHT_ROWS = 1
class QuestionGroupEntity(BaseEntity):
"""An object representing a question group in the datastore."""
data = db.TextProperty(indexed=False)
class QuestionGroupDTO(object):
"""Data transfer object for question groups."""
def __init__(self, the_id, the_dict):
self.id = the_id
self.dict = the_dict
@property
def description(self):
return self.dict.get('description') or ''
@property
def introduction(self):
return self.dict.get('introduction') or ''
@property
def question_ids(self):
return [item['question'] for item in self.dict.get('items', [])]
def add_question(self, question_id, weight):
self.dict['items'].append({'question': question_id, 'weight': weight})
@property
def last_modified(self):
return self.dict.get('last_modified') or ''
@last_modified.setter
def last_modified(self, value):
self.dict['last_modified'] = value
class QuestionGroupDAO(LastModfiedJsonDao):
DTO = QuestionGroupDTO
ENTITY = QuestionGroupEntity
ENTITY_KEY_TYPE = BaseJsonDao.EntityKeyTypeId
# Enable other modules to add post-load transformations
POST_LOAD_HOOKS = []
# Enable other modules to add post-save transformations
POST_SAVE_HOOKS = []
@classmethod
def get_question_groups_descriptions(cls):
return set([g.description for g in cls.get_all()])
@classmethod
def create_question_group(cls, question_group_dict):
question_group = QuestionGroupDTO(None, question_group_dict)
return cls.save(question_group)
@classmethod
def validate_unique_description(cls, description):
if description in cls.get_question_groups_descriptions():
raise CollisionError(
'Non-unique question group description: %s' % description)
class LabelEntity(BaseEntity):
"""A class representing labels that can be applied to Student, Unit, etc."""
data = db.TextProperty(indexed=False)
MEMCACHE_KEY = 'labels'
_PROPERTY_EXPORT_BLACKLIST = [] # No PII in labels.
def put(self):
"""Save the content to the datastore.
To support caching the list of all labels, we must invalidate
the cache on any change to any label.
Returns:
Value of entity as modified by put() (i.e., key setting)
"""
result = super(LabelEntity, self).put()
MemcacheManager.delete(self.MEMCACHE_KEY)
return result
def delete(self):
"""Remove a label from the datastore.
To support caching the list of all labels, we must invalidate
the cache on any change to any label.
"""
super(LabelEntity, self).delete()
MemcacheManager.delete(self.MEMCACHE_KEY)
class LabelDTO(object):
LABEL_TYPE_GENERAL = 0
LABEL_TYPE_COURSE_TRACK = 1
LABEL_TYPE_LOCALE = 2
# ... etc.
# If you are extending CourseBuilder, please consider picking
# a number at 1,000 or over to avoid any potential conflicts
# with types added by the CourseBuilder team in future releases.
# Provide consistent naming and labeling for admin UI elements.
LabelType = collections.namedtuple(
'LabelType', ['type', 'name', 'title', 'menu_order'])
USER_EDITABLE_LABEL_TYPES = [
LabelType(LABEL_TYPE_GENERAL, 'general', 'General', 0),
LabelType(LABEL_TYPE_COURSE_TRACK, 'course_track', 'Course Track', 1),
]
SYSTEM_EDITABLE_LABEL_TYPES = [
LabelType(LABEL_TYPE_LOCALE, 'locale', 'Locale', 2),
]
LABEL_TYPES = USER_EDITABLE_LABEL_TYPES + SYSTEM_EDITABLE_LABEL_TYPES
def __init__(self, the_id, the_dict):
self.id = the_id
self.dict = the_dict # UI layer takes care of sanity-checks.
@property
def title(self):
return self.dict.get('title', '')
@property
def description(self):
return self.dict.get('description', '')
@property
def type(self):
return self.dict.get('type', self.LABEL_TYPE_GENERAL)
class LabelManager(caching.RequestScopedSingleton):
"""Class that manages optimized loading of I18N data from datastore."""
def __init__(self):
self._key_to_label = None
def _preload(self):
self._key_to_label = {}
for row in LabelDAO.get_all_iter():
self._key_to_label[row.id] = row
def _get_all(self):
if self._key_to_label is None:
self._preload()
return self._key_to_label.values()
@classmethod
def get_all(cls):
# pylint: disable-msg=protected-access
return cls.instance()._get_all()
class LabelDAO(BaseJsonDao):
DTO = LabelDTO
ENTITY = LabelEntity
ENTITY_KEY_TYPE = BaseJsonDao.EntityKeyTypeId
@classmethod
def get_all(cls):
items = LabelManager.get_all()
order = {lt.type: lt.menu_order for lt in LabelDTO.LABEL_TYPES}
return sorted(items, key=lambda l: (order[l.type], l.title))
@classmethod
def get_all_of_type(cls, label_type):
return [label for label in cls.get_all()
if label.type == label_type]
@classmethod
def get_set_of_ids_of_type(cls, label_type):
return set([label.id for label in cls.get_all_of_type(label_type)])
@classmethod
def _apply_locale_labels_to_locale(cls, locale, items):
"""Filter out items not matching locale labels and current locale."""
if locale:
id_to_label = {}
for label in LabelDAO.get_all_of_type(
LabelDTO.LABEL_TYPE_LOCALE):
id_to_label[int(label.id)] = label
for item in list(items):
item_matches = set([int(label_id) for label_id in
common_utils.text_to_list(item.labels)
if int(label_id) in id_to_label.keys()])
found = False
for item_match in item_matches:
label = id_to_label[item_match]
if id_to_label and label and label.title == locale:
found = True
if id_to_label and item_matches and not found:
items.remove(item)
return items
@classmethod
def apply_course_track_labels_to_student_labels(
cls, course, student, items):
MemcacheManager.begin_readonly()
try:
items = cls._apply_labels_to_student_labels(
LabelDTO.LABEL_TYPE_COURSE_TRACK, student, items)
if course.get_course_setting('can_student_change_locale'):
return cls._apply_locale_labels_to_locale(
course.app_context.get_current_locale(), items)
else:
return cls._apply_labels_to_student_labels(
LabelDTO.LABEL_TYPE_LOCALE, student, items)
finally:
MemcacheManager.end_readonly()
@classmethod
def _apply_labels_to_student_labels(cls, label_type, student, items):
"""Filter out items whose labels don't match those on the student.
If the student has no labels, all items are taken.
Similarly, if a item has no labels, it is included.
Args:
label_type: a label types to consider.
student: the logged-in Student matching the user for this request.
items: a list of item instances, each having 'labels' attribute.
Returns:
A list of item instances whose labels match those on the student.
"""
label_ids = LabelDAO.get_set_of_ids_of_type(label_type)
if student and not student.is_transient:
student_matches = student.get_labels_of_type(label_type)
for item in list(items):
item_matches = set([int(label_id) for label_id in
common_utils.text_to_list(item.labels)
if int(label_id) in label_ids])
if (student_matches and item_matches and
student_matches.isdisjoint(item_matches)):
items.remove(item)
return items
class StudentPreferencesEntity(BaseEntity):
"""A class representing an individual's preferences for a course.
Note that here, we are using "Student" in the broadest sense possible:
some human associated with a course. This basically means that we want to
support preferences that are relevant to a student's view of a course, as
well as a course administrator's preferences. These will be saved in the
same object but will be edited in different editors, appropriate to the
scope of the particular field in the DTO. For example, show_hooks and
show_jinja_context are edited in the Dashboard, in
modules/dashboard/admin_preferences_editor.py
while locale is set by an Ajax widget in base.html.
Note that this type is indexed by "name" -- the key is the same as
that of the user.get_current_user().user_id(), which is a string.
This type is course-specific, so it must be accessed within a namespaced
context.
"""
data = db.TextProperty(indexed=False)
@classmethod
def safe_key(cls, db_key, transform_fn):
return db.Key.from_path(cls.kind(), transform_fn(db_key.name()))
class StudentPreferencesDTO(object):
def __init__(self, the_id, the_dict):
self.id = the_id
self.dict = the_dict
@property
def show_hooks(self):
"""Show controls to permit editing of HTML inclusions (hook points).
On course pages, there are various locations (hook points) at which
HTML content is inserted. Turn this setting on to see those locations
with controls that permit an admin to edit that HTML, and off to see
the content as a student would.
Returns:
True when admin wants to see edit controls, False when he doesn't.
"""
return self.dict.get('show_hooks', True)
@show_hooks.setter
def show_hooks(self, value):
self.dict['show_hooks'] = value
@property
def show_jinja_context(self):
"""Do/don't show dump of Jinja context on bottom of pages."""
return self.dict.get('show_jinja_context', False)
@show_jinja_context.setter
def show_jinja_context(self, value):
self.dict['show_jinja_context'] = value
@property
def locale(self):
return self.dict.get('locale')
@locale.setter
def locale(self, value):
self.dict['locale'] = value
# Save the most recently visited course page so we can redirect there
# when student revisits the (presumably bookmarked) base URL.
@property
def last_location(self):
return self.dict.get('last_location')
@last_location.setter
def last_location(self, value):
self.dict['last_location'] = value
class StudentPreferencesDAO(BaseJsonDao):
DTO = StudentPreferencesDTO
ENTITY = StudentPreferencesEntity
ENTITY_KEY_TYPE = BaseJsonDao.EntityKeyTypeName
CURRENT_VERSION = '1.0'
@classmethod
def load_or_create(cls):
user = users.get_current_user()
if not user:
return None
user_id = user.user_id()
prefs = cls.load(user_id)
if not prefs:
prefs = StudentPreferencesDTO(
user_id, {
'version': cls.CURRENT_VERSION,
'show_hooks': True,
'show_jinja_context': False
})
cls.save(prefs)
return prefs
class RoleEntity(BaseEntity):
data = db.TextProperty(indexed=False)
class RoleDTO(object):
"""Data transfer object for roles."""
def __init__(self, the_id, the_dict):
self.id = the_id
self.dict = the_dict
@property
def name(self):
return self.dict.get('name', '')
@property
def description(self):
return self.dict.get('description', '')
@property
def users(self):
return self.dict.get('users', [])
@property
def permissions(self):
return self.dict.get('permissions', {})
class RoleDAO(BaseJsonDao):
DTO = RoleDTO
ENTITY = RoleEntity
ENTITY_KEY_TYPE = BaseJsonDao.EntityKeyTypeId
|
UniMOOC/AAClassroom
|
models/models.py
|
Python
|
apache-2.0
| 75,333
|
#!/usr/bin/env python
"""
Run subgrid as a python script with colored output
"""
import argparse
import logging
import os
import sys
from python_subgrid.tests.utils import colorlogs
from python_subgrid.tools.scenario import apply_events, clean_events
from python_subgrid.raingrid import AREA_WIDE_RAIN, RainGridContainer
from python_subgrid.tools.scenario import AreaWideGrid
from python_subgrid.tools.scenario import EventContainer
from python_subgrid.tools.scenario import RadarGrid
from python_subgrid.wrapper import SubgridWrapper
logger = logging.getLogger(__name__)
def parse_args():
"""
Parse the command line arguments
"""
argumentparser = argparse.ArgumentParser(
description='Run subgrid')
argumentparser.add_argument(
'mdu', help='mdu files to process')
argumentparser.add_argument(
"--tend", help="timestamp of end of simulation", type=int)
argumentparser.add_argument(
"--scenariodir", help="scenario directory")
argumentparser.add_argument(
"--bui",
help="ontwerpbui from t=0", type=int)
argumentparser.add_argument(
"--outputdir",
help="directory for output files")
argumentparser.add_argument(
"--radar",
help="radar rain from t=0, dt in iso8601 (2013-10-13T00:00:00Z)")
argumentparser.add_argument(
"--color",
help="Color logs", default=False, action='store_true')
argumentparser.add_argument(
"--verbose",
# USE WITH CARE: THE LOGGING MIGHT BREAK FORTRAN IN CERTAIN
# CIRCUMSTANCES
help="Verbose output (including subgrid output)",
default=False, action='store_true')
arguments = argumentparser.parse_args()
return arguments
def main():
"""main program"""
arguments = parse_args()
if arguments.color:
colorlogs()
# redirect stdout to /dev/null under osx so we get only 1 output stream
f = open(os.devnull, 'w')
sys.stderr = f
logger.info('Subgridpy')
logger.setLevel(logging.DEBUG)
if arguments.scenariodir:
logger.info('Using scenario dir: %s' % arguments.scenariodir)
scenario = EventContainer(arguments.scenariodir)
# scenario events from arguments
if arguments.bui:
if str(arguments.bui) in AREA_WIDE_RAIN.keys():
scenario.add(
AreaWideGrid, sim_time_start=0, sim_time_end=None,
rain_definition=str(arguments.bui), type=None)
if arguments.radar:
scenario.add(
RadarGrid, sim_time_start=0, sim_time_end=None,
radar_dt=arguments.radar, sync=1, multiplier=1, type=None)
logger.info('---- Scenario summary ----')
for line in scenario.summary():
logger.info(line)
subgrid = SubgridWrapper(mdu=arguments.mdu,
set_logger=arguments.verbose,
output_dir=arguments.outputdir)
subgrid.start()
# Should not be needed
# subgrid.library.initmodel()
rain_grid_container = RainGridContainer(subgrid)
if arguments.radar:
subgrid.subscribe_dataset(rain_grid_container.memcdf_name)
if arguments.tend:
t_end = arguments.tend
else:
# default
t_end = subgrid.get_nd('tend')
logger.info('End time (seconds): %r', t_end)
t = subgrid.get_nd('t1') # by reference
while t < t_end:
apply_events(subgrid, scenario, rain_grid_container)
subgrid.update(-1)
t = subgrid.get_nd('t1') # by reference
clean_events(scenario, rain_grid_container)
|
nens/python-subgrid
|
python_subgrid/tools/subgridpy.py
|
Python
|
gpl-3.0
| 3,587
|
# Copyright 2017 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Input and output from network interfaces.
This wraps PCap, TunTap, etc., to provide a simple, universal, cooperative
interface to network interfaces.
Currently limited to Linux.
"""
from pox.lib.pxpcap import PCap
from Queue import Queue
from pox.lib.revent import Event, EventMixin
from pox.lib.ioworker.io_loop import ReadLoop
from pox.core import core
import struct
from fcntl import ioctl
import socket
from pox.lib.addresses import EthAddr, IPAddr
from pox.lib.addresses import parse_cidr, cidr_to_netmask
import os
import ctypes
IFNAMESIZ = 16
IFREQ_SIZE = 40
# from linux/if_tun.h
TUNSETIFF = 0x400454ca
TUNGETIFF = 0x800454d2
IFF_TUN = 0x0001
IFF_TAP = 0x0002
IFF_NO_PI = 0x1000
IFF_ONE_QUEUE = 0x2000
IFF_VNET_HDR = 0x4000
IFF_TUN_EXCL = 0x8000
IFF_MULTI_QUEUE = 0x0100
IFF_ATTACH_QUEUE = 0x0200
IFF_DETACH_QUEUE = 0x0400
IFF_PERSIST = 0x0800
IFF_NOFILTER = 0x1000
#from linux/if.h (flags)
IFF_UP = 1<<0
IFF_BROADCAST = 1<<1
IFF_DEBUG = 1<<2
IFF_LOOPBACK = 1<<3
IFF_POINTOPOINT = 1<<4
IFF_NOTRAILERS = 1<<5
IFF_RUNNING = 1<<6
IFF_NOARP = 1<<7
IFF_PROMISC = 1<<8
IFF_ALLMULTI = 1<<9
IFF_MASTER = 1<<10
IFF_SLAVE = 1<<11
IFF_MULTICAST = 1<<12
IFF_PORTSEL = 1<<13
IFF_AUTOMEDIA = 1<<14
IFF_DYNAMIC = 1<<15
IFF_LOWER_UP = 1<<16
IFF_DORMANT = 1<<17
IFF_ECHO = 1<<18
# Unless IFF_NO_PI, there's a header on packets:
# 16 bits of flags
# 16 bits (big endian?) protocol number
# from /usr/include/linux/sockios.h
SIOCGIFHWADDR = 0x8927
SIOCGIFMTU = 0x8921
SIOCSIFMTU = 0x8922
SIOCGIFFLAGS = 0x8913
SIOCSIFFLAGS = 0x8914
SIOCSIFHWADDR = 0x8924
SIOCGIFNETMASK = 0x891b
SIOCSIFNETMASK = 0x891c
SIOCGIFADDR = 0x8915
SIOCSIFADDR = 0x8916
SIOCGIFBRDADDR = 0x8919
SIOCSIFBRDADDR = 0x891a
SIOCSIFNAME = 0x8923
SIOCADDRT = 0x890B # rtentry (route.h) for IPv4, in6_rtmsg for IPv6
SIOCDELRT = 0x890C
# from /usr/include/linux/if_arp.h
ARPHRD_ETHER = 1
ARPHRD_IEEE802 = 1
ARPHRD_IEEE1394 = 24
ARPHRD_EUI64 = 27
ARPHRD_LOOPBACK = 772
ARPHRD_IPGRE = 778
ARPHRD_IEE802_TR = 800
ARPHRD_IEE80211 = 801
ARPHRD_IEE80211_PRISM = 802
ARPHRD_IEE80211_RADIOTAP = 803
ARPHRD_IP6GRE = 823
class rtentry (object):
"""
Wrapper for Linux rtentry
Only tries to capture IPv4 usage.
Possibly better done with ctypes.
"""
# flags
RTF_UP = 0x0001 # usable
RTF_GATEWAY = 0x0002 # dst is gateway
RTF_HOST = 0x0004 # host route
RTF_REINSTATE = 0x0008 # reinstate after timeout
RTF_DYNAMIC = 0x0010 # created dynamically (by redirect)
RTF_MODIFIED = 0x0020 # modified dynamically (by redirect)
RTF_MSS = 0x0040 # use specific MSS for this route
RTF_WINDOW = 0x0080 # use per-route window clamping
RTF_IRTT = 0x0100 # use initial RTT
RTF_REJECT = 0x0200 # reject route
# fields
rt_hash = 0
rt_dst = IPAddr("0.0.0.0")
rt_gateway = IPAddr("0.0.0.0")
rt_genmask = IPAddr("0.0.0.0")
rt_flags = 0
rt_refcnt = 0
rt_use = 0
rt_ifp = 0 # ptr to struct ifnet
rt_metric = 0
rt_dev = None # device name
rt_mss = 0
rt_window = 0 # window clamping
rt_irtt = 0 # initial RTT
def pack (self):
if self.rt_dev:
s = ctypes.c_char_p(self.rt_dev + "\0") # Null terminator necessary?
dev = ctypes.cast(s, ctypes.c_void_p).value
self._buf = s # You must use the resulting packed string before changing
# rt_dev!
else:
dev = 0
return struct.pack("L16s16s16shhLPhPLLH",
self.rt_hash,
sockaddr_in(self.rt_dst).pack(),
sockaddr_in(self.rt_gateway).pack(),
sockaddr_in(self.rt_genmask).pack(),
self.rt_flags,
self.rt_refcnt,
self.rt_use,
self.rt_ifp,
self.rt_metric,
dev,
self.rt_mss,
self.rt_window,
self.rt_irtt)
class sockaddr_in (object):
"""
Wrapper for sockaddr_in
"""
sin_family = socket.AF_INET
sin_port = 0
sin_addr = IPAddr("0.0.0.0")
def __init__ (self, addr=None, port=None):
if addr is not None:
self.sin_addr = IPAddr(addr)
if port is not None:
self.sin_port = port
def pack (self):
r = struct.pack("hH", self.sin_family, self.sin_port)
r += self.sin_addr.raw
r += ("\0" * 8)
return r
class Interface (object):
"""
Simple interface to tun/tap driver
Currently only for Linux. IIRC, shouldn't be too hard to adapt for BSD.
Other OSes will probably need a fair amount of work.
"""
#TODO: Setters
def __init__ (self, name):
self._name = name
def __str__ (self):
return "%s('%s')" % (type(self).__name__, self.name)
@property
def name (self):
return self._name.rstrip("\0")
@name.setter
def name (self, value):
if len(value) > IFNAMESIZ: raise RuntimeError("Name too long")
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "s", self.name)
ifr += value
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, SIOCSIFNAME, ifr)
self._name = value
@property
def ipv6_enabled (self):
f = file("/proc/sys/net/ipv6/conf/%s/disable_ipv6" % (self.name,), "r")
with f:
return f.read()[0] == "0" # Note inversion!
@ipv6_enabled.setter
def ipv6_enabled (self, value):
f = file("/proc/sys/net/ipv6/conf/%s/disable_ipv6" % (self.name,), "w")
with f:
f.write("0" if value else "1") # Note inversion!
@property
def ip_forwarding (self):
f = file("/proc/sys/net/ipv4/conf/%s/forwarding" % (self.name,), "r")
with f:
return f.read()[0] == "1"
@ip_forwarding.setter
def ip_forwarding (self, value):
f = file("/proc/sys/net/ipv4/conf/%s/forwarding" % (self.name,), "w")
with f:
f.write("1" if value else "0")
@property
def mtu (self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "s", self.name)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, SIOCGIFMTU, ifr)
return struct.unpack("I", ret[IFNAMESIZ:][:4])[0]
@mtu.setter
def mtu (self, value):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "sI", self.name, value)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, SIOCSIFMTU, ifr)
@property
def flags (self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "s", self.name)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, SIOCGIFFLAGS, ifr)
return struct.unpack("H", ret[IFNAMESIZ:IFNAMESIZ+2])[0]
@flags.setter
def flags (self, value):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "sH", self.name, value)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, SIOCSIFFLAGS, ifr)
def set_flags (self, flags, on=True):
if on:
self.flags |= flags
else:
self.unset_flags(flags)
def unset_flags (self, flags):
self.flags = self.flags & (flags ^ 0xffFF)
@property
def promiscuous (self):
return bool(self.flags & IFF_PROMISC)
@promiscuous.setter
def promiscuous (self, value):
self.set_flags(IFF_PROMISC, value)
@property
def is_up (self):
return (self.flags & IFF_UP) != 0
@is_up.setter
def is_up (self, value):
self.set_flags(IFF_UP, value)
@property
def is_running (self):
return (self.flags & IFF_RUNNING) != 0
@property
def arp_enabled (self):
return (self.flags & IFF_NOARP) == 0
@arp_enabled.setter
def arp_enabled (self, value):
self.set_flags(IFF_NOARP, not value)
@property
def ip_addr (self):
try:
return self._ioctl_get_ipv4(SIOCGIFADDR)
except IOError as e:
if e.errno == 99: return None
raise
@ip_addr.setter
def ip_addr (self, value):
return self._ioctl_set_ipv4(SIOCSIFADDR, value)
@property
def netmask (self):
try:
return self._ioctl_get_ipv4(SIOCGIFNETMASK)
except IOError as e:
if e.errno == 99: return None
raise
@netmask.setter
def netmask (self, value):
return self._ioctl_set_ipv4(SIOCSIFNETMASK, value)
@property
def broadcast_addr (self):
try:
return self._ioctl_get_ipv4(SIOCGIFBRDADDR)
except IOError as e:
if e.errno == 99: return None
raise
@broadcast_addr.setter
def broadcast_addr (self, value):
return self._ioctl_set_ipv4(SIOCSIFBRDADDR, value)
@property
def eth_addr (self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "s", self.name)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, SIOCGIFHWADDR, ifr)
sa = ret[IFNAMESIZ:] # sockaddr
return self._get_eth(sa)
@eth_addr.setter
def eth_addr (self, value):
value = EthAddr(value).raw
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "sH", self.name, ARPHRD_ETHER)
ifr += value # Append to sockaddr
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, SIOCSIFHWADDR, ifr)
def _ioctl_get_ipv4 (self, which):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "s", self.name)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, which, ifr)
return self._get_ipv4(ret[IFNAMESIZ:])
def _ioctl_set_ipv4 (self, which, value):
value = IPAddr(value)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "sHHI", self.name, socket.AF_INET, 0,
value.toUnsigned(networkOrder=True))
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, which, ifr)
@staticmethod
def _get_ipv4 (sa):
sa_family = struct.unpack("H", sa[:2])[0]
if sa_family == socket.AF_INET:
return IPAddr(sa[4:8])
else:
raise RuntimeError("Unsupported hardware type %s for %s (expected %s)"
% (sa_family, self, socket.AF_INET))
@staticmethod
def _get_eth (sa):
sa_family = struct.unpack("H", sa[:2])[0]
if sa_family == ARPHRD_ETHER:
return EthAddr(sa[2:8])
else:
raise RuntimeError("Unsupported hardware type %s (expected %s)"
% (sa_family, ARPHRD_ETHER))
def add_default_route (self, *args, **kw):
return self.add_route("0.0.0.0/0", *args, **kw)
def add_route (self, network, gateway=None, dev=(), metric=0):
"""
Add routing table entry
If dev is unspecified, it defaults to this device
"""
return self._add_del_route(network, gateway, dev, metric, SIOCADDRT)
def del_route (self, network, gateway=None, dev=(), metric=0):
"""
Remove a routing table entry
If dev is unspecified, it defaults to this device
"""
return self._add_del_route(network, gateway, dev, metric, SIOCDELRT)
def _add_del_route (self, network, gateway=None, dev=(), metric=0,
command=None):
"""
Add or remove a routing table entry
If dev is unspecified, it defaults to this device
"""
r = rtentry()
if isinstance(network, tuple):
addr,mask = network
addr = str(addr)
if isinstance(mask, (int,long)):
mask = cidr_to_netmask(mask)
mask = str(mask)
network = "%s/%s" % (addr,mask)
host = False
if isinstance(network, IPAddr) or (isinstance(network, str)
and "/" not in network):
host = True
network,bits = parse_cidr(network)
r.rt_dst = network
r.rt_genmask = cidr_to_netmask(bits)
if gateway is not None:
r.rt_gateway = IPAddr(gateway)
r.rt_flags |= r.RTF_GATEWAY
r.rt_metric = metric
if dev is (): dev = self
if isinstance(dev, Interface): dev = dev.name
if dev: r.rt_dev = dev
if host: r.rt_flags |= r.RTF_HOST
r.rt_flags |= r.RTF_UP
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
rv = ioctl(sock, command, r.pack())
class TunTap (object):
"""
Simple wrapper for tun/tap interfaces
Looks like a file-like object. You should be able to read/write it, select
on it, etc.
"""
def __init__ (self, name=None, tun=False, raw=False):
"""
Create tun or tap
By default, it creates a new tun or tap with a default name. If you
specify a name, it will either try to create it (if it doesn't exist),
or try to use an existing interface (for which you must have permission).
Defaults to tap (Ethernet) mode. Specify tun=True for tun (IP) mode.
Specify raw=True to skip the 32 bits of flag/protocol metadata.
"""
if name is None: name = ""
openflags = os.O_RDWR
try:
openflow |= os.O_BINARY
except:
pass
self._f = os.open("/dev/net/tun", openflags)
# an ifreq is IFREQ_SIZE bytes long, starting with an interface name
# (IFNAMESIZ bytes) followed by a big union.
self.is_tun = tun
self.is_tap = not tun
self.is_raw = raw
flags = 0
if tun: flags |= IFF_TUN
else: flags |= IFF_TAP
if raw: flags |= IFF_NO_PI
ifr = struct.pack(str(IFNAMESIZ) + "sH", name, flags)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(self.fileno(), TUNSETIFF, ifr)
self.name = ret[:IFNAMESIZ]
iflags = flags
ifr = struct.pack(str(IFNAMESIZ) + "sH", name, 0)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(self.fileno(), TUNGETIFF, ifr)
flags = struct.unpack("H", ret[IFNAMESIZ:IFNAMESIZ+2])[0]
self.is_tun = (flags & IFF_TUN) == IFF_TUN
self.is_tap = not self.is_tun
#self.is_raw = (flags & IFF_NO_PI) == IFF_NO_PI
def fileno (self):
return self._f
def write (self, data):
return os.write(self.fileno(), data)
def read (self, n):
return os.read(self.fileno(), n)
def close (self):
return os.close(self.fileno())
@property
def eth_addr (self):
return Interface(self.name).eth_addr
class RXData (Event):
"""
Event fired when an interface receives data
"""
def __init__ (self, interface, data):
self.interface = interface
self.data = data
class PCapInterface (Interface, EventMixin):
_eventMixin_events = set([
RXData,
])
def __init__ (self, name):
Interface.__init__(self, name)
EventMixin.__init__(self)
self._q = Queue()
p = PCap(name, callback=self._pcap_cb, start=False)
p.set_direction(True, False) # Incoming, not outgoing
p.start()
self.pcap = p
core.add_listener(self._handle_GoingDownEvent)
def _handle_GoingDownEvent (self, event):
self.close()
def send (self, data):
if self.pcap is None: return
self.pcap.inject(data)
def _pcap_cb (self, obj, data, sec, usec, length):
"""
Handles incoming data from pcap
This may not be on the right thread, so we just push it to a thread-safe
queue and poke the cooperative thread, which will pop it later.
"""
do_read = self._q.empty()
self._q.put((obj,data))
if do_read: core.callLater(self._queue_read)
def _queue_read (self):
anything = False
for _ in xrange(10): # as most X at once
try:
data = self._q.get(False)
self._q.task_done()
anything = True
except:
break
pcap,data = data
self.raiseEventNoErrors(RXData, self, data)
if anything:
# Check for remainders later
core.callLater(self._queue_read)
def __del__ (self):
self.close()
def close (self):
if self.pcap:
self.pcap.close()
self.pcap = None
class TapInterface (Interface, EventMixin):
_eventMixin_events = set([
RXData,
])
io_loop = None
max_read_size = 1600
default_send_protocol = None
def __init__ (self, name="", tun=False, raw=False, protocol=None):
self.tap = None
self.last_flags = None
self.last_protocol = None
if protocol: self.default_send_protocol = protocol
self.io_loop = ReadLoop.singleton
Interface.__init__(self, name)
EventMixin.__init__(self)
self.tap = TunTap(name, raw=raw, tun=tun)
if not name: self._name = self.tap.name
self.io_loop.add(self)
@property
def is_tap (self):
return self.tap.is_tap
@property
def is_tun (self):
return self.tap.is_tun
def send (self, data, flags=0, protocol=None):
if not self.tap.is_raw:
if protocol is None: protocol = self.default_send_protocol or 0
#FIXME: In the "0" case above, should we fall back to using the Etherype
# in the packet?
if flags or protocol:
flags = struct.pack("!HH", flags, protocol) # Flags reversed?
else:
flags = "\0\0\0\0"
data = flags + data
self.tap.write(data)
def _do_rx (self):
data = self.tap.read(self.max_read_size)
if not self.tap.is_raw:
flags,proto = struct.unpack("!HH", data[:4])
#FIXME: This may invert the flags...
self.last_flags = flags
self.last_protocol = proto
data = data[4:] # Cut off header
self.raiseEvent(RXData, self, data)
def fileno (self):
# Support fileno so that this can be used in IO loop directly
return self.tap.fileno()
def close (self):
if self.tap:
self.tap.close()
self.tap = None
self.io_loop.remove(self)
def __del__ (self):
self.close()
|
MurphyMc/pox
|
pox/lib/interfaceio/__init__.py
|
Python
|
apache-2.0
| 17,797
|
# -*- coding: utf-8 -*-
'''
Specto Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,base64
from resources.lib.libraries import cleantitle
from resources.lib.libraries import client
from resources.lib.libraries import control
from resources.lib import resolvers
class source:
def __init__(self):
self.base_link = 'http://www.primewire.ag'
self.key_link = '/index.php?search'
self.link_1 = 'http://www.primewire.ag'
self.link_2 = 'http://www.primewire.org'
self.link_3 = 'http://www.primewire.is'
self.moviesearch_link = '/index.php?search_keywords=%s&key=%s&search_section=1'
self.tvsearch_link = '/index.php?search_keywords=%s&key=%s&search_section=2'
self.headers = {'Connection' : 'keep-alive'}
def get_movie(self, imdb, title, year):
try:
result = ''
links = [self.link_1, self.link_2, self.link_3]
for base_link in links:
result = client.request(urlparse.urljoin(base_link, self.key_link), headers=self.headers)
if 'searchform' in str(result): break
key = client.parseDOM(result, 'input', ret='value', attrs = {'name': 'key'})[0]
query = self.moviesearch_link % (urllib.quote_plus(re.sub('\'', '', title)), key)
result = client.request(urlparse.urljoin(base_link, query), headers=self.headers)
result = result.decode('iso-8859-1').encode('utf-8')
result = client.parseDOM(result, 'div', attrs = {'class': 'index_item.+?'})
title = 'watch' + cleantitle.movie(title)
years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]
result = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'a', ret='title')[0]) for i in result]
result = [i for i in result if any(x in i[1] for x in years)]
result = [(client.replaceHTMLCodes(i[0]), i[1]) for i in result]
try: result = [(urlparse.parse_qs(urlparse.urlparse(i[0]).query)['u'][0], i[1]) for i in result]
except: pass
result = [(urlparse.urlparse(i[0]).path, i[1]) for i in result]
match = [i[0] for i in result if title == cleantitle.movie(i[1])]
match2 = [i[0] for i in result]
match2 = [x for y,x in enumerate(match2) if x not in match2[:y]]
if match2 == []: return
for i in match2[:5]:
try:
if len(match) > 0:
url = match[0]
break
result = client.request(base_link + i, headers=self.headers)
if str(imdb) in str(result):
url = i
break
except:
pass
url = url.encode('utf-8')
return url
except:
return ''
def get_show(self, imdb, tvdb, tvshowtitle, year):
try:
result = ''
links = [self.link_1, self.link_2, self.link_3]
for base_link in links:
result = client.request(urlparse.urljoin(base_link, self.key_link), headers=self.headers)
if 'searchform' in str(result): break
key = client.parseDOM(result, 'input', ret='value', attrs = {'name': 'key'})[0]
query = self.tvsearch_link % (urllib.quote_plus(re.sub('\'', '', tvshowtitle)), key)
result = client.request(urlparse.urljoin(base_link, query), headers=self.headers)
result = result.decode('iso-8859-1').encode('utf-8')
result = client.parseDOM(result, 'div', attrs = {'class': 'index_item.+?'})
tvshowtitle = 'watch' + cleantitle.tv(tvshowtitle)
years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]
result = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'a', ret='title')[0]) for i in result]
result = [i for i in result if any(x in i[1] for x in years)]
result = [(client.replaceHTMLCodes(i[0]), i[1]) for i in result]
try: result = [(urlparse.parse_qs(urlparse.urlparse(i[0]).query)['u'][0], i[1]) for i in result]
except: pass
result = [(urlparse.urlparse(i[0]).path, i[1]) for i in result]
match = [i[0] for i in result if tvshowtitle == cleantitle.tv(i[1])]
match2 = [i[0] for i in result]
match2 = [x for y,x in enumerate(match2) if x not in match2[:y]]
if match2 == []: return
for i in match2[:5]:
try:
if len(match) > 0:
url = match[0]
break
result = client.request(base_link + i, headers=self.headers)
if str(imdb) in str(result):
url = i
break
except:
pass
url = url.encode('utf-8')
return url
except:
return
def get_episode(self, url, imdb, tvdb, title, date, season, episode):
if url == None: return
url = url.replace('/watch-','/tv-')
url += '/season-%01d-episode-%01d' % (int(season), int(episode))
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
result = ''
links = [self.link_1, self.link_2, self.link_3]
for base_link in links:
result = client.request(urlparse.urljoin(base_link, url), headers=self.headers)
if 'choose_tabs' in str(result): break
result = result.decode('iso-8859-1').encode('utf-8')
links = client.parseDOM(result, 'tbody')
for i in links:
try:
u = client.parseDOM(i, 'a', ret='href')[0]
u = client.replaceHTMLCodes(u)
try: u = urlparse.parse_qs(urlparse.urlparse(u).query)['u'][0]
except: pass
host = urlparse.parse_qs(urlparse.urlparse(u).query)['domain'][0]
host = base64.urlsafe_b64decode(host.encode('utf-8'))
#host = host.rsplit('.', 1)[0]
#host = host.strip().lower()
if not host in hostDict: raise Exception()
try: host = host.split('.')[0]
except: pass
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
url = urlparse.parse_qs(urlparse.urlparse(u).query)['url'][0]
url = base64.urlsafe_b64decode(url.encode('utf-8'))
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
quality = client.parseDOM(i, 'span', ret='class')[0]
if quality == 'quality_cam' or quality == 'quality_ts': quality = 'CAM'
elif quality == 'quality_dvd': quality = 'SD'
else: raise Exception()
sources.append({'source': host, 'quality': quality, 'provider': 'Primewire', 'url': url})
except:
pass
return sources
except:
return sources
def resolve(self, url):
try:
url = resolvers.request(url)
return url
except:
return
|
repotvsupertuga/repo
|
plugin.video.pancas/resources/lib/sources/primewire_mv_tv.py
|
Python
|
gpl-2.0
| 8,324
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-30 12:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('todo', '0006_auto_20160530_1210'),
]
operations = [
migrations.AlterField(
model_name='todo',
name='category',
field=models.ForeignKey(blank=True, default=1, on_delete=django.db.models.deletion.DO_NOTHING, to='todo.Category'),
preserve_default=False,
),
]
|
Azarn/mytodo
|
todo/migrations/0007_auto_20160530_1233.py
|
Python
|
apache-2.0
| 593
|
# -*- encoding: utf-8 -*-
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
SECRET_KEY = 'cheese'
DATABASE_ENGINE = 'sqlite3'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
INSTALLED_APPS = (
'memcached_memoize',
'django_nose',
)
ROOT_URLCONF = 'tests.urls'
DEBUG = True
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
|
globocom/memcached_memoize
|
tests/settings.py
|
Python
|
bsd-3-clause
| 457
|
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import numpy as np
import pandas as pd
from pyarrow.compat import unittest, u, unicode_type
import pyarrow as pa
class TestScalars(unittest.TestCase):
def test_null_singleton(self):
with self.assertRaises(Exception):
pa.NAType()
def test_ctor_null_check(self):
# ARROW-1155
with pytest.raises(ReferenceError):
repr(pa.Int16Value())
with pytest.raises(ReferenceError):
str(pa.Int16Value())
with pytest.raises(ReferenceError):
repr(pa.StringValue())
def test_bool(self):
arr = pa.array([True, None, False, None])
v = arr[0]
assert isinstance(v, pa.BooleanValue)
assert repr(v) == "True"
assert v.as_py() is True
assert arr[1] is pa.NA
def test_int64(self):
arr = pa.array([1, 2, None])
v = arr[0]
assert isinstance(v, pa.Int64Value)
assert repr(v) == "1"
assert v.as_py() == 1
assert v == 1
assert arr[2] is pa.NA
def test_double(self):
arr = pa.array([1.5, None, 3])
v = arr[0]
assert isinstance(v, pa.DoubleValue)
assert repr(v) == "1.5"
assert v.as_py() == 1.5
assert v == 1.5
assert arr[1] is pa.NA
v = arr[2]
assert v.as_py() == 3.0
def test_half_float(self):
arr = pa.array([np.float16(1.5), None], type=pa.float16())
v = arr[0]
assert isinstance(v, pa.HalfFloatValue)
assert repr(v) == "1.5"
assert v.as_py() == 1.5
assert v == 1.5
assert arr[1] is pa.NA
def test_string_unicode(self):
arr = pa.array([u'foo', None, u'mañana'])
v = arr[0]
assert isinstance(v, pa.StringValue)
assert v.as_py() == 'foo'
assert v == 'foo'
# Assert that newly created values are equal to the previously created
# one.
assert v == arr[0]
assert arr[1] is pa.NA
v = arr[2].as_py()
assert v == u'mañana'
assert isinstance(v, unicode_type)
def test_bytes(self):
arr = pa.array([b'foo', None, u('bar')])
v = arr[0]
assert isinstance(v, pa.BinaryValue)
assert v.as_py() == b'foo'
assert v == b'foo'
assert arr[1] is pa.NA
v = arr[2].as_py()
assert v == b'bar'
assert isinstance(v, bytes)
def test_fixed_size_bytes(self):
data = [b'foof', None, b'barb']
arr = pa.array(data, type=pa.binary(4))
v = arr[0]
assert isinstance(v, pa.FixedSizeBinaryValue)
assert v.as_py() == b'foof'
assert arr[1] is pa.NA
v = arr[2].as_py()
assert v == b'barb'
assert isinstance(v, bytes)
def test_list(self):
arr = pa.array([['foo', None], None, ['bar'], []])
v = arr[0]
assert len(v) == 2
assert isinstance(v, pa.ListValue)
assert repr(v) == "['foo', None]"
assert v.as_py() == ['foo', None]
assert v[0].as_py() == 'foo'
assert v[1] is pa.NA
assert v[-1] == v[1]
assert v[-2] == v[0]
with pytest.raises(IndexError):
v[-3]
with pytest.raises(IndexError):
v[2]
assert arr[1] is pa.NA
v = arr[3]
assert len(v) == 0
def test_timestamp(self):
arr = pd.date_range('2000-01-01 12:34:56', periods=10).values
units = ['s', 'ms', 'us', 'ns']
for unit in units:
dtype = 'datetime64[{0}]'.format(unit)
arrow_arr = pa.Array.from_pandas(arr.astype(dtype))
expected = pd.Timestamp('2000-01-01 12:34:56')
assert arrow_arr[0].as_py() == expected
tz = 'America/New_York'
arrow_type = pa.timestamp(unit, tz=tz)
dtype = 'datetime64[{0}]'.format(unit)
arrow_arr = pa.Array.from_pandas(arr.astype(dtype),
type=arrow_type)
expected = (pd.Timestamp('2000-01-01 12:34:56')
.tz_localize('utc')
.tz_convert(tz))
assert arrow_arr[0].as_py() == expected
def test_dictionary(self):
colors = ['red', 'green', 'blue']
colors_dict = {'red': 0, 'green': 1, 'blue': 2}
values = pd.Series(colors * 4)
categorical = pd.Categorical(values, categories=colors)
v = pa.DictionaryArray.from_arrays(categorical.codes,
categorical.categories)
for i, c in enumerate(values):
assert v[i].as_py() == c
assert v[i].dictionary_value == c
assert v[i].index_value == colors_dict[c]
def test_int_hash(self):
# ARROW-640
int_arr = pa.array([1, 1, 2, 1])
assert hash(int_arr[0]) == hash(1)
def test_float_hash(self):
# ARROW-640
float_arr = pa.array([1.4, 1.2, 2.5, 1.8])
assert hash(float_arr[0]) == hash(1.4)
def test_string_hash(self):
# ARROW-640
str_arr = pa.array(["foo", "bar"])
assert hash(str_arr[1]) == hash("bar")
def test_bytes_hash(self):
# ARROW-640
byte_arr = pa.array([b'foo', None, b'bar'])
assert hash(byte_arr[2]) == hash(b"bar")
def test_array_to_set(self):
# ARROW-640
arr = pa.array([1, 1, 2, 1])
set_from_array = set(arr)
assert isinstance(set_from_array, set)
assert set_from_array == {1, 2}
def test_struct_value_subscripting(self):
ty = pa.struct([pa.field('x', pa.int16()),
pa.field('y', pa.float32())])
arr = pa.array([(1, 2.5), (3, 4.5), (5, 6.5)], type=ty)
assert arr[0]['x'] == 1
assert arr[0]['y'] == 2.5
assert arr[1]['x'] == 3
assert arr[1]['y'] == 4.5
assert arr[2]['x'] == 5
assert arr[2]['y'] == 6.5
with pytest.raises(IndexError):
arr[4]['non-existent']
with pytest.raises(KeyError):
arr[0]['non-existent']
|
wagavulin/arrow
|
python/pyarrow/tests/test_scalars.py
|
Python
|
apache-2.0
| 6,936
|
default_app_config = 'website.apps.CoreConfig'
|
jonge-democraten/website
|
website/core/__init__.py
|
Python
|
mit
| 48
|
"""
WSGI config for c_all_in project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "c_all_in.settings")
application = get_wsgi_application()
|
uwekamper/c-all-in
|
c_all_in/c_all_in/wsgi.py
|
Python
|
mit
| 393
|
from bs4 import BeautifulSoup
import requests
import re
url = "https://play.google.com/store/apps/details?id=com.sillygames.killingSpree"
#using mobile agent to reduce the website size
headers = {
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 7_1_1 like Mac OS X) AppleWebKit/537.51.1 (KHTML, like Gecko) CriOS/35.0.1916.38 Mobile/11D201 Safari/9537.53'
}
response = requests.get(url, headers=headers)
soup = BeautifulSoup(response.text)
app_name = soup.find_all("div", {"class" : "document-title"})[0].get_text()
author = soup.find_all("a", {"class" : "document-subtitle primary"})[0].get_text()
description = "\n".join(soup.find_all("div", {"class" : "id-app-orig-desc"})[0].strings)
cover = soup.find_all("img", {"class" : "cover-image"})[0].get("src")
print(app_name)
print(author)
print(description)
print(cover)
|
ganeshkamathp/PlayStore-Parser
|
parser.py
|
Python
|
cc0-1.0
| 832
|
""" ============================================================================
File name:
rpy2_ggplot_test.py
Author:
gong-yi@GongTop0
Created on:
2013/12/24 02:17:42
Purpose:
To show
Copyright:
BSD / Apache
---------------------------------------------------------------------------- """
import pandas as pd
import rpy2.robjects as R
from rpy2.robjects.lib import grid, ggplot2
## read in the distances to railroad (we calculated)
neardist = pd.read_csv('data/NearDistance.csv')
## convert to R dataframe, via Python Dictionary data type
neardist_dataf = { 'OBAMA_SHAR': R.FloatVector(neardist['OBAMA_SHAR']),
'NEAR_DIST': R.FloatVector(neardist['NEAR_DIST']) }
RR_distance = R.DataFrame(neardist_dataf)
print(RR_distance.colnames)
## we use R instance of robjects to issue R commands
## load pre-prepared IL map data sets and print contents:
R.r('print(load("data/IL.RData"))')
## loaded data sets can now be accessed through R handle
## note that different from R dot . is not valid for Python variable names!
IL_railroads = R.r('IL.railroads')
IL_final = R.r('IL.final')
## import device driver from R with importr to plot to PNG
## we can then call any function in the grdevices package
grdevices = R.packages.importr('grDevices')
grdevices.png(file='mapplot.png', width=1300, height=1000)
## plot the map
## note that the order matters when we add another layer in ggplot
## (here IL_railroads): first aes, then data, that's different from R
## (see http://permalink.gmane.org/gmane.comp.python.rpy/2349)
## note that we use dictionary to set the opts to be able to set options as
## keywords, for example legend.key.size
p_map = ggplot2.ggplot(IL_final) + \
ggplot2.geom_polygon(ggplot2.aes(x = 'long', \
y = 'lat', \
group = 'group', \
color = 'ObamaShare', \
fill = 'ObamaShare')) + \
ggplot2.scale_fill_gradient(high = 'blue', \
low = 'red') + \
ggplot2.scale_fill_continuous(name = "Obama Vote Share") + \
ggplot2.scale_colour_continuous(name = "Obama Vote Share") + \
ggplot2.theme(**{ 'legend.position': 'left', \
'legend.key.size': R.r.unit(2, 'lines'), \
'legend.title' : ggplot2.element_text(size = 14, hjust=0), \
'legend.text': ggplot2.element_text(size = 12), \
'title' : ggplot2.element_text('Obama Vote Share and Distance to Railroads in IL'), \
'plot.title': ggplot2.element_text(size = 24),
'plot.margin': R.r.unit(R.r.rep(0,4),'lines'), \
'panel.background': ggplot2.element_blank(), \
'panel.grid.minor': ggplot2.element_blank(), \
'panel.grid.major': ggplot2.element_blank(), \
'axis.ticks': ggplot2.element_blank(), \
'axis.title.x': ggplot2.element_blank(), \
'axis.title.y': ggplot2.element_blank(), \
'axis.title.x': ggplot2.element_blank(), \
'axis.title.x': ggplot2.element_blank(), \
'axis.text.x': ggplot2.element_blank(), \
'axis.text.y': ggplot2.element_blank()} ) + \
ggplot2.geom_line(ggplot2.aes(x='long',
y='lat',
group='group'),
data=IL_railroads,
color='grey',
size=0.2) + \
ggplot2.coord_equal()
p_map.plot()
## add the scatterplot
## define layout of subplot with viewports
vp_sub = grid.viewport(x = 0.19, y = 0.2,
width = 0.32, height = 0.4)
p_sub = ggplot2.ggplot(RR_distance) + \
ggplot2.aes_string(x = 'OBAMA_SHAR',
y = 'NEAR_DIST') + \
ggplot2.geom_point(ggplot2.aes(color='OBAMA_SHAR')) + \
ggplot2.stat_smooth(color="black",
method='auto') + \
ggplot2.theme(**{ 'legend.position' : 'none' }) + \
ggplot2.scale_x_continuous("Obama Vote Share") + \
ggplot2.scale_y_continuous("Distance to nearest Railroad")
p_sub.plot(vp=vp_sub)
grdevices.dev_off()
""" ----------------------------------------------------------------------------
End note:
(end note starts here)
============================================================================ """
|
GongYiLiao/Python_Daily
|
2013/Dec/23/rpy2_ggplot_test.py
|
Python
|
mit
| 4,822
|
class TimetableGenerator:
def __init__(self):
self.generated = []
self.generated2 = []
self.total_combinations = 0
def store_timetable(self, tt):
self.total_combinations += 1
tt.heuristic = tt.total_time()
tt.heuristic2 = tt.total_time2()
if len(self.generated) <= 99:
self.generated.append(tt)
self.generated2.append(tt)
else:
if tt.heuristic < self.generated[99].heuristic: # tt is better
self.generated.pop(99)
self.generated.append(tt)
self.generated.sort(key=lambda tt: tt.heuristic)
if tt.heuristic2 < self.generated2[99].heuristic2: # tt is better
self.generated2.pop(99)
self.generated2.append(tt)
self.generated2.sort(key=lambda tt: tt.heuristic2)
def generate_timetables(self, lesson_blocks):
self.generate(Timetable(), lesson_blocks)
self.generated.sort(key=lambda tt: tt.heuristic)
self.generated2.sort(key=lambda tt: tt.heuristic2)
best = list(set(self.generated).intersection(set(self.generated2)))
for tt in best:
tt.score += len(self.generated) - self.generated.index(tt)
tt.score += len(self.generated2) - self.generated2.index(tt)
best.sort(key=lambda tt: tt.score, reverse=True)
return best[:100]
def generate(self, timetable, lesson_blocks):
if not lesson_blocks:
self.store_timetable(timetable)
else:
next_lesson_block = lesson_blocks[0]
for shift in next_lesson_block.shifts:
#if timetable.supports(shift):
self.generate(timetable.append_shift(shift), lesson_blocks[1:])
class Timetable:
def __init__(self):
self.lessons = []
self.score = 0
def append_shift(self, shift):
new_timetable = Timetable()
new_timetable.lessons.extend(self.lessons)
new_timetable.lessons.extend(shift.slots)
return new_timetable
def supports(self, shift):
for slot in shift.slots:
for existing in self.lessons:
if (slot.overlaps_with(existing)):
return False
return True
#def is_feasible(self):
# for slot in self.lessons:
# for other in self.lessons:
# if (slot is not other) and (slot.overlaps_with(other)):
# return False
# return True
# heuristics for selecting timetables
def total_time2(self):
result = 0
for weekday in range(Weekday.MONDAY, Weekday.SUNDAY):
daily_lessons = [slot for slot in self.lessons if slot.day == weekday]
if daily_lessons:
earliest_start = min([slot.start.minutes for slot in daily_lessons])
latest_end = max([slot.end.minutes for slot in daily_lessons])
interval = latest_end - earliest_start
result += interval
return result
def total_time(self):
result = 0
for weekday in range(Weekday.MONDAY, Weekday.SUNDAY):
daily_lessons = [slot for slot in self.lessons if slot.day == weekday]
if daily_lessons:
if len(daily_lessons) > 1:
daily_lessons.sort(key=lambda x: x.start.minutes)
prev = daily_lessons[0].end.minutes
for slot in daily_lessons[1:]:
if slot.start.minutes >= prev:
result = result + (slot.start.minutes - prev)
else:
result = result + 1000
prev = slot.end.minutes
return result
class Course(object):
def __init__(self, name):
self.name = name
self.lesson_blocks = []
def add_lesson_block(self, lesson_block):
self.lesson_blocks.append(lesson_block)
lesson_block.parent_course = self
def get_block_by_category(self, category):
for block in self.lesson_blocks:
if block.category == category:
return block
class LessonBlock:
def __init__(self, category):
self.category = category
self.shifts = []
def add_shift(self, shift):
self.shifts.append(shift)
shift.parent_lesson_block = self
class Shift:
def __init__(self, name):
self.name = name
self.slots = []
def add_lesson_slot(self, lesson_slot):
self.slots.append(lesson_slot)
lesson_slot.parent_shift = self
class LessonSlot:
def __init__(self, day, start, end, room):
self.day = day
self.start = start
self.end = end
self.room = room
def course_name(self):
return self.parent_shift.parent_lesson_block.parent_course.name
def lesson_category(self):
return self.parent_shift.parent_lesson_block.category
def overlaps_with_group(self, group):
for ss in group:
for other in ss:
if self != other:
if self.overlaps_with(other) == True:
return True
return False
def overlaps_with(self, other):
return self.day == other.day and (self.start.is_before(other.end) and self.end.is_after(other.start))
class Weekday:
MONDAY = 0
TUESDAY = 1
WEDNESDAY = 2
THURSDAY = 3
FRIDAY = 4
SATURDAY = 5
SUNDAY = 6
class Time:
def __init__(self, hour, minute):
self.minutes = hour * 60 + minute
def is_before(self, other):
return self.minutes < other.minutes
def is_after(self, other):
return self.minutes > other.minutes
def __str__(self):
return "%d:%02d" % (self.minutes/60, self.minutes%60)
|
colobas/gerador-horarios
|
tt_generator.py
|
Python
|
mit
| 4,842
|
try:
from IBMQuantumExperience import IBMQuantumExperience
from pprint import pprint
def quant_exp(args):
with open('etc/IBMtoken', 'r') as infile:
token = infile.read().replace('\n', '')
config = {
"url": 'https://quantumexperience.ng.bluemix.net/api'
}
QuantExp = IBMQuantumExperience.IBMQuantumExperience
api = QuantExp(token, config)
for code in api.get_last_codes():
pprint(code)
'''
name = code['name']
if 'grover' in name.lower():
print('IBM Results for{}'.format(name))
pprint(code['executions'][-1]['result'])
#pprint(code.keys())
#pprint(api.get_execution(name))
'''
except ModuleNotFoundError:
print('IBM suite not installed')
#1/0
def quant_exp(args):
1/0
|
LSaldyt/qnp
|
scripts/quant_exp.py
|
Python
|
mit
| 901
|
def my_or(x, y):
return x or y
my_or(True, False)
|
DEVSENSE/PTVS
|
Python/Tests/TestData/DebuggerProject/LocalBooleanTest.py
|
Python
|
apache-2.0
| 55
|
import unittest
import sys
import traceback
from util import *
class FakeImporter(Importer):
def do_import_module(self, name):
x = compile('import doesntexist\n', 'not_a_file.py', 'exec')
eval(x)
class TestImport(unittest.TestCase):
def test_backtrace(self):
imp = FakeImporter()
try:
result = imp.import_module('dummy')
except ImportError, inst:
#print inst.value
pass
self.assertEqual(inst.value, \
'\n'
'Traceback (most recent call last):\n' \
' File "not_a_file.py", line 1, in <module>\n' \
'ImportError: No module named doesntexist\n')
|
epronk/pyfit2
|
test_import.py
|
Python
|
gpl-2.0
| 765
|
from __future__ import unicode_literals, print_function
import pytest
import logging
from psd_tools.compression import (
compress, decompress, encode_prediction, decode_prediction,
encode_rle, decode_rle
)
from psd_tools.constants import Compression
logger = logging.getLogger(__name__)
RAW_IMAGE_3x3_8bit = b'\x00\x01\x02\x01\x01\x01\x01\x00\x00'
RAW_IMAGE_2x2_16bit = b'\x00\x01\x00\x02\x00\x03\x00\x04'
RAW_IMAGE_2x2_32bit = (
b'\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x04'
)
@pytest.mark.parametrize(
'fixture, width, height, depth', [
(bytes(bytearray(range(256))), 128, 2, 8),
(bytes(bytearray(range(256))), 64, 2, 16),
(bytes(bytearray(range(256))), 32, 2, 32),
]
)
def test_prediction(fixture, width, height, depth):
encoded = encode_prediction(fixture, width, height, depth)
decoded = decode_prediction(encoded, width, height, depth)
assert fixture == decoded
@pytest.mark.parametrize(
'fixture, width, height, depth, version', [
(bytes(bytearray(range(256))), 128, 2, 8, 1),
(bytes(bytearray(range(256))), 128, 2, 8, 2),
]
)
def test_rle(fixture, width, height, depth, version):
encoded = encode_rle(fixture, width, height, depth, version)
decoded = decode_rle(encoded, width, height, depth, version)
assert fixture == decoded
@pytest.mark.parametrize(
'data, kind, width, height, depth, version', [
(RAW_IMAGE_3x3_8bit, Compression.RAW, 3, 3, 8, 1),
(RAW_IMAGE_3x3_8bit, Compression.RLE, 3, 3, 8, 1),
(RAW_IMAGE_3x3_8bit, Compression.RLE, 3, 3, 8, 2),
(RAW_IMAGE_3x3_8bit, Compression.ZIP, 3, 3, 8, 1),
(RAW_IMAGE_3x3_8bit, Compression.ZIP_WITH_PREDICTION, 3, 3, 8, 1),
(RAW_IMAGE_2x2_16bit, Compression.RAW, 2, 2, 16, 1),
(RAW_IMAGE_2x2_16bit, Compression.RLE, 2, 2, 16, 1),
(RAW_IMAGE_2x2_16bit, Compression.RLE, 2, 2, 16, 2),
(RAW_IMAGE_2x2_16bit, Compression.ZIP, 2, 2, 16, 1),
(RAW_IMAGE_2x2_16bit, Compression.ZIP_WITH_PREDICTION, 2, 2, 16, 1),
(RAW_IMAGE_2x2_32bit, Compression.RAW, 2, 2, 32, 1),
(RAW_IMAGE_2x2_32bit, Compression.RLE, 2, 2, 32, 1),
(RAW_IMAGE_2x2_32bit, Compression.RLE, 2, 2, 32, 2),
(RAW_IMAGE_2x2_32bit, Compression.ZIP, 2, 2, 32, 1),
(RAW_IMAGE_2x2_32bit, Compression.ZIP_WITH_PREDICTION, 2, 2, 32, 1),
]
)
def test_compress_decompress(data, kind, width, height, depth, version):
compressed = compress(data, kind, width, height, depth, version)
output = decompress(compressed, kind, width, height, depth, version)
assert output == data, 'output=%r, expected=%r' % (output, data)
# This will fail due to irreversible zlib compression.
@pytest.mark.xfail
@pytest.mark.parametrize(
'data, width, height, depth', [(
b'H\x89\xb2g`8\xc8P\xca\xc0\xd0\xcd\xf0\x85\x81\x81\x87\x01\n\xec1D'
b'\xed\x0f\x02\xc5\xcc\xba\x81b\xf5<01;\x06F\x06\x86\xf3\x0c\xe9\x8b'
b'\xe2\xf4\x19\x026\xf9\xcdf(\x9c\x91a\x0f\x920cX\xc4\x10W\xcf\xb0\x89'
b'\xc1\x8f\x87a\x06C\x06@\x80\x01\x00\x94#\x14\x01', 5, 5, 32
)]
)
def test_compress_decompress_fail(data, width, height, depth):
decoded = decompress(
data, Compression.ZIP_WITH_PREDICTION, width, height, depth
)
encoded = compress(
decoded, Compression.ZIP_WITH_PREDICTION, width, height, depth
)
assert data == encoded
|
kmike/psd-tools
|
tests/psd_tools/compression/test_compression.py
|
Python
|
mit
| 3,424
|
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name
import html
from http import HTTPStatus
from django.conf import settings
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from tcms.tests import LoggedInTestCase
from tcms.tests.factories import UserFactory
class TestAdminView(LoggedInTestCase):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.tester.is_staff = True # can access admin
cls.tester.is_superuser = True # has all perms
cls.tester.save()
cls.url = reverse("admin:index")
def test_admin_display(self):
self.client.login( # nosec:B106:hardcoded_password_funcarg
username=self.tester.username, password="password"
)
response = self.client.get(self.url)
self.assertEqual(HTTPStatus.OK, response.status_code)
self.assertNotContains(response, "You don't have permission to edit anything")
# for tcms.management
self.assertContains(response, _("Builds"))
self.assertContains(response, "Classifications")
self.assertContains(response, "Components")
self.assertContains(response, _("Priorities"))
self.assertContains(response, "Products")
self.assertContains(response, _("Tags"))
self.assertContains(response, "Versions")
_bugs = _("Bugs")
if "tcms.bugs.apps.AppConfig" in settings.INSTALLED_APPS:
self.assertContains(
response,
f'<a href="/admin/bugs/" class="grp-section">{_bugs}</a>',
html=True,
)
self.assertContains(response, "<strong>Bugs</strong>", html=True)
# for tcms.testcases
self.assertContains(response, "Bug trackers")
self.assertContains(response, _("Categories"))
self.assertContains(response, _("Test case statuses"))
self.assertContains(response, "Testcases")
self.assertContains(response, "Test cases")
# for tcms.testplans
self.assertContains(response, "Plan types")
self.assertContains(response, "Testplans")
self.assertContains(response, "Test plans")
# for tcms.testruns
# b/c French translation contains characters which get HTML escaped
response_text = html.unescape(
str(response.content, encoding=settings.DEFAULT_CHARSET)
)
self.assertIn(str(_("Test execution statuses")), response_text)
self.assertContains(response, "Testruns")
self.assertContains(response, "Test runs")
# for django_comments
self.assertNotContains(response, "Django_Comments")
self.assertNotContains(response, "Comments")
# for django.contrib.sites
self.assertContains(response, _("Sites"))
def test_sites_admin_add(self):
self.client.login( # nosec:B106:hardcoded_password_funcarg
username=self.tester.username, password="password"
)
response = self.client.get(reverse("admin:sites_site_add"))
self.assertRedirects(
response, reverse("admin:sites_site_change", args=[settings.SITE_ID])
)
def test_sites_admin_delete(self):
self.client.login( # nosec:B106:hardcoded_password_funcarg
username=self.tester.username, password="password"
)
response = self.client.get(
reverse("admin:sites_site_delete", args=[settings.SITE_ID])
)
self.assertRedirects(
response, reverse("admin:sites_site_change", args=[settings.SITE_ID])
)
def test_users_list_shows_is_superuser_column(self):
self.client.login( # nosec:B106:hardcoded_password_funcarg
username=self.tester.username, password="password"
)
response = self.client.get(reverse("admin:auth_user_changelist"))
self.assertContains(response, "column-is_superuser")
class TestUserDeletionViaAdminView(LoggedInTestCase):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.superuser = UserFactory()
cls.superuser.is_staff = True
cls.superuser.is_superuser = True
cls.superuser.set_password("password")
cls.superuser.save()
cls.regular_user = UserFactory()
cls.regular_user.is_staff = True
cls.regular_user.set_password("password")
cls.regular_user.save()
cls.url = reverse("admin:auth_user_delete", args=[cls.regular_user.pk])
def test_regular_user_should_not_delete_another_user(self):
self.client.login( # nosec:B106:hardcoded_password_funcarg
username=self.regular_user.username, password="password"
)
response = self.client.get(
reverse("admin:auth_user_delete", args=[self.superuser.pk])
)
# it is not possible to delete other user accounts
self.assertEqual(HTTPStatus.FORBIDDEN, response.status_code)
def test_regular_user_should_be_able_to_delete_himself(self):
self.client.login( # nosec:B106:hardcoded_password_funcarg
username=self.regular_user.username, password="password"
)
response = self.client.get(self.url)
self.assertEqual(HTTPStatus.OK, response.status_code)
self.assertContains(response, _("Yes, I'm sure"))
def test_superuser_should_be_able_to_delete_any_user(self):
self.client.login( # nosec:B106:hardcoded_password_funcarg
username=self.superuser.username, password="password"
)
response = self.client.get(self.url)
# verify there's the Yes, I'm certain button
self.assertEqual(HTTPStatus.OK, response.status_code)
self.assertContains(response, _("Yes, I'm sure"))
response = self.client.post(self.url, {"post": "yes"}, follow=True)
self.assertEqual(HTTPStatus.OK, response.status_code)
self.assertNotContains(
response,
f'<a href="/admin/auth/user/{self.regular_user.pk}/change/">'
f"{self.regular_user.username}</a>",
html=True,
)
|
kiwitcms/Kiwi
|
tcms/core/tests/test_admin.py
|
Python
|
gpl-2.0
| 6,142
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2011-2018 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Search the transaction log for history """
from datetime import datetime
from datetime import timedelta
from dateutil.parser import parse
from dateutil.tz import tzutc
from sqlalchemy.sql.expression import asc, desc, exists
from sqlalchemy import or_
from aquilon.exceptions_ import ArgumentError
from aquilon.aqdb.model import Xtn, XtnDetail, XtnEnd
from aquilon.worker.broker import BrokerCommand
_IGNORED_COMMANDS = ('show_active_locks', 'show_active_commands', 'cat',
'search_audit')
_NON_DB_CHANGE_TYPE_COMMANDS = ('compile', 'pxeswitch')
class CommandSearchAudit(BrokerCommand):
required_parameters = []
def render(self, session, logger, keyword, argument, username, command,
before, after, forever, return_code, limit, reverse_order, **_):
"""Render the search_audit command.
Please see the abstract method defined in the superclass of this
class to better understand the purpose of the render method.
:param keyword: The value of any argument supplied to a command
(str or None)
:param argument: Look at the value of the specified argument only
(str or None)
:param username: The user name who executed the command
(str or None)
:param command: The name of the command or command type
(str or None)
:param before: Search for transactions started before a specific
date/time (str or None)
:param after: Search for transactions started after a specific
date/time (str or None)
:param forever: Search for transactions throughout the transaction log
(a flag, any value that evaluates to True or False)
:param return_code: Search by an HTTP response code (None or
int: 200-505)
:param limit: Limit the number of rows returned. (None (sets limit
to the configured default value) or int))
:param reverse_order: Output records in reverse chronological order.
Also causes the --limit parameter to find the
oldest records and not the newest.
(a flag, any value that evaluates to True or
False)
:return: a list of all results represented by the computed SQLAlchemy
Query object
"""
q = session.query(Xtn)
if command is not None:
if command == 'all':
# No filter
pass
elif command == 'rw':
# Filter our command list
q = q.filter(~Xtn.command.in_(_IGNORED_COMMANDS))
elif command == 'wo':
# Filter command list to only return manipulate commands to db
# Old default behaviour
q = q.filter_by(is_readonly=False)
elif command == 'ro':
# Filter our command list to only return readonly commands
q = q.filter_by(is_readonly=True)
else:
q = q.filter_by(command=str(command))
else:
# filter out read only
q = q.filter(or_(Xtn.is_readonly == False,
Xtn.command.in_(_NON_DB_CHANGE_TYPE_COMMANDS)))
if username is not None:
username = username.strip()
# 'nobody' is special, it is stored without any realm
if '@' in username or username == 'nobody':
q = q.filter_by(username=str(username))
else:
q = q.filter(Xtn.username.like(username + '@%'))
# TODO: These should be typed in input.xml as datetime and use
# the standard broker methods for dealing with input validation.
start, end = self.after_before_to_start_end(after, before)
if forever:
if before is not None or after is not None:
raise ArgumentError(
"Cannot specify 'forever' with 'before' or 'after'")
else:
# if no end-time set, and no start time set, use now; if
# start-time if set (and no end), end at +1 year. (unless
# 'forever' is given)
if end is None:
if start is None:
# Note: put this 1s in the future so we don't accidently
# miss things that were added within the last second (from
# zero'd microseconds).
end = datetime.utcnow().replace(microsecond=0) +\
timedelta(seconds=1)
else:
end = start + timedelta(days=366)
if not end.tzinfo:
end = end.replace(tzinfo=tzutc())
if start is None:
start = end - timedelta(days=366)
if not start.tzinfo:
start = start.replace(tzinfo=tzutc())
# sanity check: that end is after before.
if end < start:
raise ArgumentError(
"Time region from after '{}' to before '{}'"
" is invalid.".format(after, before))
q = q.filter(Xtn.start_time > start, Xtn.start_time < end)
if return_code is not None:
if return_code == 0:
q = q.filter(~exists().where(Xtn.id == XtnEnd.xtn_id))
else:
q = q.join(XtnEnd)
q = q.filter(XtnEnd.return_code == return_code)
q = q.reset_joinpoint()
# FIXME: Oracle ignores indices if it has to perform unicode -> string
# conversion, see the discussion at:
# https://groups.google.com/d/topic/sqlalchemy/8Xn31vBfGKU/discussion
# We may need a more generic solution like a custom type decorator as
# suggested in the thread, but for now str() should be enough
if keyword is not None or argument is not None:
q = q.join(XtnDetail)
if keyword is not None:
q = q.filter_by(value=str(keyword))
if argument is not None:
q = q.filter_by(name=str(argument))
q = q.reset_joinpoint()
# Set an order by when searching for the records, this controls
# which records are selected by the limit.
if reverse_order:
q = q.order_by(asc(Xtn.start_time)) # N oldest records
else:
# default: N most recent
q = q.order_by(desc(Xtn.start_time))
# Limit the ordered results.
if limit is None:
limit = self.config.getint('broker', 'default_audit_rows')
if limit > self.config.getint('broker', 'max_audit_rows'):
raise ArgumentError(
"Cannot set the limit higher than {}".format(
self.config.get('broker', 'max_audit_rows')))
q = q.limit(limit)
# Now apply the user preference to the limited output after
# the outer joins are applied to pull in details and end information.
if reverse_order:
q = q.from_self().order_by(desc(Xtn.start_time))
else:
q = q.from_self().order_by(asc(Xtn.start_time))
return q.all()
@classmethod
def after_before_to_start_end(cls, after, before):
"""Return a tuple (start, end) with parsed 'after' and 'before'.
This method parses after and before and returns a tuple (start, end)
specifying a period of interest, where each item is either a None (if
initially None) or a datetime object.
NB: This method purposefully does not ensure that start < end (this is
done elsewhere).
:param after: None, datetime or str
:param before: None, datetime or str
:return: a tuple (start, end) - each item is either set to None, or an
instance of datetime (with UTC timezone if no other timezone
given).
"""
# To protect ourselves against some very unlikely cases when 'after'
# would be erroneously computed to precede 'before' (for different
# combinations of 'now' or 'today' (NB: if today is used for either
# of the parameters, using now or today for another one does not
# make sense)), and to ensure that, from the user perspective, 'now'
# means 'now', we must use the same value of 'now' and 'tzinfo' (in
# this case we only use UTC) for both.
now = datetime.now()
tzinfo = tzutc()
if after == 'today':
after = now.combine(now.date(), now.max.time())
if before == 'today':
before = now.combine(now.date(), now.min.time())
after = cls._parsed_datetime_or_none(after, now, tzinfo)
before = cls._parsed_datetime_or_none(before, now, tzinfo)
return after, before
@staticmethod
def _parsed_datetime_or_none(fuzzy_dt, now=None, tzinfo=None):
"""Parse fuzzy_dt and return a datetime object on success, or None."""
if fuzzy_dt is None:
return None
if fuzzy_dt == 'now':
parsed = now or datetime.now()
elif isinstance(fuzzy_dt, datetime):
parsed = fuzzy_dt
else:
try:
parsed = parse(fuzzy_dt)
except (ValueError, TypeError):
raise ArgumentError(
"Unable to parse date string '{}'".format(fuzzy_dt))
if parsed.tzinfo is None:
parsed = parsed.replace(tzinfo=tzinfo or tzutc())
return parsed
|
quattor/aquilon
|
lib/aquilon/worker/commands/search_audit.py
|
Python
|
apache-2.0
| 10,400
|
#! /usr/bin/env python3
import API as pk
import requests
import pygeocoder
def main():
print("__________________________________________ \n \n \n Welcome to YikYak upvote Hack");
geocoder = pygeocoder.Geocoder("AIzaSyAGeW6l17ATMZiNTRExwvfa2iuPA1DvJqM")
currentlist = []
#Code pulled from YikYak.py terminal GUI. Because there is a problem with setting long and lat manually. Trying using google locations
# If first time using app, ask for preferred location
coordlocation = newLocation(geocoder)
# If location retrieval fails, ask user for coordinates
if coordlocation == 0:
print("Please enter coordinates manually: ")
currentlatitude = input("Latitude: ")
currentlongitude = input("Longitude: ")
coordlocation = pk.Location(currentlatitude, currentlongitude)
#Optional Set Location input
#print("Enter Location data for user upvotes");
#location set based on long and lat input
#currentlatitude = input("Latitude: ")
#currentlongitude = input("Longitude: ")
#temp hard coded in for easy testing
#coordlocation = pk.Location("29.0355990", "-81.3034150")
#longitude and latitude is: Lat 29.0355990 Long -81.3034150
#Create 1 yak user that will get list of yaks. From here user selects the number yak that they want to me upvoted i amount of times
remoteyakker = pk.Yakker(None, coordlocation, True)
currentlist = remoteyakker.get_yaks()
read(currentlist)
print("If you would like to upvote hack enter U and if you would like to downvote to delete enter D")
choice = input()
if choice.upper() == 'U':
#getList of yaks and print
#input which yak to upvote hack
yakID=int(input("Please enter the yak number from above: \n"))
#input yikyak vote amount
print("Enter the number of Yik Yak upvotes")
numberOfVotes=int(input());
#loop
for num in range (0,int(numberOfVotes)):
print(num);
#creating a new yak user object with location at input
remoteyakker = pk.Yakker(None, coordlocation, True)
#Prints unique user id
print("User ID: ", remoteyakker.id, "\n")
#implement upvote
#yaklist = remoteyakker.get_yaks()
upvoted = remoteyakker.upvote_yak(currentlist[yakID-1].message_id)
if upvoted:
print("\nUpvote successful :)")
else:
print("\nUpvote failed :(\t", end='')
print (posted.status_code, end='')
print (" ", end='')
print (requests.status_codes._codes[posted.status_code][0])
elif choice.upper() == 'D':
#input which yak to upvote hack
yakID=int(input("Please enter the yak number from above: \n"))
numberOfLikes=int(input("Enter the number of likes the Yak currently has:"))
for num in range(0,(numberOfLikes+10)):
remoteyakker = pk.Yakker(None, coordlocation, True)
print("User ID: ", remoteyakker.id, "\n")
#yaklist = remoteyakker.get_yaks()
remoteyakker.downvote_yak(currentlist[yakID-1].message_id)
#code pulled from YikYakTerminal
def newLocation(geocoder, address=""):
# figure out location latitude and longitude based on address
if len(address) == 0:
address = input("Enter college name or address: ")
try:
currentlocation = geocoder.geocode(address)
except:
print("\nGoogle Geocoding API has reached the limit of queries.\n")
return 0
coordlocation = 0
try:
coordlocation = pk.Location(currentlocation.latitude, currentlocation.longitude)
# Create file if it does not exist and write
f = open("locationsetting", 'w+')
coordoutput = str(currentlocation.latitude) + '\n' + str(currentlocation.longitude)
f.write(coordoutput)
f.write("\n")
f.write(address)
f.close()
except:
print("Unable to get location.")
return coordlocation
def changeLocation(geocoder, address=""):
coordlocation = newLocation(geocoder, address)
if coordlocation == 0:
print("\nPlease enter coordinates manually: ")
currentlatitude = input("Latitude: ")
currentlongitude = input("Longitude: ")
coordlocation = pk.Location(currentlatitude, currentlongitude)
return coordlocation
#read stream of yaks and print Pulled from YikYakTerminal
def read(yaklist):
yakNum = 1
for yak in yaklist:
# line between yaks
print ("_" * 93)
print (yakNum)
yak.print_yak()
commentNum = 1
# comments header
comments = yak.get_comments()
print ("\n\t\tComments:", end='')
print (len(comments))
# print all comments separated by dashes
for comment in comments:
print ("\t {0:>4}".format(commentNum), end=' ')
print ("-" * 77)
comment.print_comment()
commentNum += 1
yakNum += 1
main()
|
MrAjsTech/YakUpVoteHack
|
YikYak.py
|
Python
|
bsd-3-clause
| 4,519
|
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test that the mempool ensures transaction delivery by periodically sending
to peers until a GETDATA is received."""
import time
from test_framework.blocktools import create_confirmed_utxos
from test_framework.p2p import P2PTxInvStore
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
# 15 minutes in seconds
MAX_INITIAL_BROADCAST_DELAY = 15 * 60
class MempoolUnbroadcastTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.test_broadcast()
self.test_txn_removal()
def test_broadcast(self):
self.log.info(
"Test that mempool reattempts delivery of locally submitted transaction")
node = self.nodes[0]
min_relay_fee = node.getnetworkinfo()["relayfee"]
create_confirmed_utxos(node, 10)
self.disconnect_nodes(node.index, 1)
self.log.info("Generate transactions that only node 0 knows about")
# generate a wallet txn
addr = node.getnewaddress()
wallet_tx_hsh = node.sendtoaddress(addr, 100)
utxos = node.listunspent()
# generate a txn using sendrawtransaction
us0 = utxos.pop()
inputs = [{"txid": us0["txid"], "vout": us0["vout"]}]
outputs = {addr: 100}
tx = node.createrawtransaction(inputs, outputs)
node.settxfee(min_relay_fee)
txF = node.fundrawtransaction(tx)
txFS = node.signrawtransactionwithwallet(txF["hex"])
rpc_tx_hsh = node.sendrawtransaction(txFS["hex"])
# check transactions are in unbroadcast using rpc
mempoolinfo = self.nodes[0].getmempoolinfo()
assert_equal(mempoolinfo['unbroadcastcount'], 2)
mempool = self.nodes[0].getrawmempool(True)
for tx in mempool:
assert_equal(mempool[tx]['unbroadcast'], True)
# check that second node doesn't have these two txns
mempool = self.nodes[1].getrawmempool()
assert rpc_tx_hsh not in mempool
assert wallet_tx_hsh not in mempool
# ensure that unbroadcast txs are persisted to mempool.dat
self.restart_node(0)
self.log.info("Reconnect nodes & check if they are sent to node 1")
self.connect_nodes(node.index, 1)
# fast forward into the future & ensure that the second node has the
# txns
node.mockscheduler(MAX_INITIAL_BROADCAST_DELAY)
self.sync_mempools(timeout=30)
mempool = self.nodes[1].getrawmempool()
assert rpc_tx_hsh in mempool
assert wallet_tx_hsh in mempool
# check that transactions are no longer in first node's unbroadcast set
mempool = self.nodes[0].getrawmempool(True)
for tx in mempool:
assert_equal(mempool[tx]['unbroadcast'], False)
self.log.info(
"Add another connection & ensure transactions aren't broadcast again")
conn = node.add_p2p_connection(P2PTxInvStore())
node.mockscheduler(MAX_INITIAL_BROADCAST_DELAY)
# allow sufficient time for possibility of broadcast
time.sleep(2)
assert_equal(len(conn.get_invs()), 0)
self.disconnect_nodes(node.index, 1)
node.disconnect_p2ps()
def test_txn_removal(self):
self.log.info(
"Test that transactions removed from mempool are removed from unbroadcast set")
node = self.nodes[0]
# since the node doesn't have any connections, it will not receive
# any GETDATAs & thus the transaction will remain in the unbroadcast
# set.
addr = node.getnewaddress()
txhsh = node.sendtoaddress(addr, 100)
# check transaction was removed from unbroadcast set due to presence in
# a block
removal_reason = "Removed {} from set of unbroadcast txns before " \
"confirmation that txn was sent out".format(txhsh)
with node.assert_debug_log([removal_reason]):
node.generate(1)
if __name__ == "__main__":
MempoolUnbroadcastTest().main()
|
Bitcoin-ABC/bitcoin-abc
|
test/functional/mempool_unbroadcast.py
|
Python
|
mit
| 4,377
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 13 09:11:25 2015
@author: paulinkenbrandt
"""
import py_compile
py_compile.compile("C:\\Users\\PAULINKENBRANDT\\Documents\\GitHub\\Snake_Valley\\snake.py")
|
inkenbrandt/Snake_Valley
|
untitled1.py
|
Python
|
gpl-2.0
| 205
|
import unittest
from roadhouse import parser
class PortParseTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parse = parser.ports.parseString
def test_port_and_range(self):
tmp = self.parse("22, 80-100")
self.assertEqual(tmp.ports[0], (22, 22))
self.assertEqual(tmp.ports[1], (80, 100))
def test_double_range(self):
tmp = self.parse("10-20, 80-100")
self.assertEqual(tmp.ports[0], (10, 20))
self.assertEqual(tmp.ports[1], (80, 100))
class RuleParseTest(unittest.TestCase):
def test_single_rule(self):
result = parser.Rule.parse("tcp port 80 127.0.0.1/32")
self.assertEqual(len(result), 1)
tmp = result[0]
self.assertTrue(isinstance(tmp, parser.Rule))
self.assertEqual(tmp.from_port, 80)
self.assertEqual(tmp.to_port, 80)
def test_single_icmp_rule(self):
result = parser.Rule.parse("icmp port 0 192.168.1.1/32")
self.assertEqual(len(result), 1)
tmp = result[0]
self.assertTrue(isinstance(tmp, parser.Rule))
self.assertEqual(tmp.from_port, 0)
def test_group_name_parse(self):
result = parser.Rule.parse("tcp port 80 web_server")
inputs = ["web_server", "web-server", "web.server"]
for input in inputs:
result = parser.Rule.parse("tcp port 80 {}".format(input))[0]
self.assertTrue(isinstance(result, parser.Rule))
def test_sg_parse(self):
sg = "sg-edcd9784"
result = parser.Rule.parse("tcp port 80 {}".format(sg))[0]
self.assertEqual(result.group, sg)
class RulesParsingTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parse = parser.parser.parseString
def test_tcp_with_ip(self):
result = self.parse("tcp port 80 192.168.1.1/32")
self.assertEqual(result.protocol, "tcp")
self.assertEqual(result.ip_and_mask, "192.168.1.1/32")
self.assertEqual(result.ports[0], (80, 80))
def test_icmp(self):
result = self.parse("icmp port 0 192.168.1.1/32")
self.assertEqual(result.protocol, "icmp")
def test_tcp_port_zero(self):
result = self.parse("tcp port 0 192.168.1.1/32")
self.assertEqual(result.ports[0], (0,0))
def test_multiple_ports(self):
result = self.parse("tcp port 80, 100 192.168.1.1/32")
self.assertEqual(result.ports[0], (80,80))
self.assertEqual(result.ports[1], (100,100))
# def test_no_tcp_specified(self):
# tmp = self.parse("port 80 192.168.1.1")
# self.assertEqual("192.168.1.1", tmp.ip)
# self.assertEqual(tmp.ports[0], (80,80))
class IPTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parse = parser.ip.parseString
def test_ip_no_mask(self):
# ensurs we get the mask added as /32
tmp = self.parse("192.168.1.1")[0]
self.assertEqual("192.168.1.1/32", tmp)
def test_ip_with_mask(self):
tmp = self.parse("192.168.1.1/32")[0]
self.assertEqual("192.168.1.1/32", tmp)
class MaskTest(unittest.TestCase):
def test_mask(self):
result = parser.mask.parseString("/32")
self.assertEqual(result.mask, 32)
class SimplePortParseTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parse = parser.normalized_port_range.parseString
def test_single_port(self):
tmp = self.parse("80")
self.assertEqual(tmp[0], (80, 80))
def test_port_range(self):
tmp = self.parse("80-100")
self.assertEqual(tmp[0], (80, 100))
|
awsroadhouse/roadhouse
|
roadhouse/tests/test_parser.py
|
Python
|
bsd-2-clause
| 3,629
|
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet replace-by-fee capabilities in conjunction with the fallbackfee."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_raises_rpc_error
class WalletRBFTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[0].generate(101)
# sending a transaction without fee estimations must be possible by default on regtest
self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
# test sending a tx with disabled fallback fee (must fail)
self.restart_node(0, extra_args=["-fallbackfee=0"])
assert_raises_rpc_error(-4, "Fee estimation failed", lambda: self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1))
assert_raises_rpc_error(-4, "Fee estimation failed", lambda: self.nodes[0].fundrawtransaction(self.nodes[0].createrawtransaction([], {self.nodes[0].getnewaddress(): 1})))
assert_raises_rpc_error(-6, "Fee estimation failed", lambda: self.nodes[0].sendmany("", {self.nodes[0].getnewaddress(): 1}))
if __name__ == '__main__':
WalletRBFTest().main()
|
litecoin-project/litecoin
|
test/functional/wallet_fallbackfee.py
|
Python
|
mit
| 1,487
|
import string
import tensorflow as tf
import numpy as np
import os
import time
from tempfile import TemporaryFile
class Seq2Seq:
def __init__(self, input_file, window_size, overlap_size, batch_size, hidden_size, no_stacked_cells=3, vocab=None):
if vocab == None:
vocab = string.ascii_letters + string.digits + string.punctuation + string.whitespace + 'ČĆŽĐŠ' + 'ćčžđš'
self.vocab = vocab
self.input_file_name = input_file
self.window_size = window_size
self.overlap_size = overlap_size
self.batch_size = batch_size
self.hidden_size = hidden_size
self.no_stacked_cells = no_stacked_cells
# checkpoint stuff
self.checkpoint_base_path = 'checkpoints/'
# summary stuff
self.summary_train_path = 'graphs/train'
def vocab_encode(self, text):
return [self.vocab.index(x)+1 if x in self.vocab else self.vocab.index('*')+1 for x in text]
def vocab_decode(self, seq):
text_array = [self.vocab[i-1] for i in seq if i>0]
return ''.join(text_array)
def read_data(self, input=None, shuffle=True):
# if input is not provided, we will open a file and read all data into it
# this should work for "small" datasets - 100MB is small :)
if not input:
with open(self.input_file_name, encoding='utf-8') as f:
input = f.read()
seq = self.vocab_encode(input)
for i in range(0, len(seq), self.window_size - self.overlap_size):
if shuffle:
j = np.random.random_integers(0, len(seq))
else:
j = i
chunk = seq[j:j+self.window_size]
chunk += [0] * (self.window_size-len(chunk))
yield chunk
def read_batch(self, stream):
batch = []
for x in stream:
batch.append(x)
if len(batch) == self.batch_size:
yield batch
batch = []
if len(batch) > 0:
yield batch
def _seq2one_hot(self, seq):
"""
Encodes input tensor into one hot tensor of depth len(self.vocab) + 1. The size of vocabulary is increased by
one because we will pad shorter sequences with zeros.
:param seq: tensor (should be placeholder for input batch in our case)
:return: one hot tensor of depth len(vocab)+1
"""
# we will use 0 for padding so our vocabulary size will increase by one
vocab_len = len(self.vocab)
one_hot_seq = tf.one_hot(seq, depth=vocab_len+1)
return one_hot_seq
def _lenghts(self, seq):
"""
:param seq: one hot encoding
:return: sequence of batch_size where elements are lengths of the sequence in the batch
"""
return tf.reduce_sum(tf.sign(seq), 1)
def _create_cell(self, seq, no_stacked_cells):
"""
Creates GRU cell
:param seq: placeholder of the input batch
:return: cell and placeholder for its internal state
"""
batch_size = tf.shape(seq)[0]
##########################################################################################################
#
# TODO: Create a stacked MultiRNNCell from GRU cells
# First, you have to use tf.contrib.rnn.GRUCell() to construct cells
# Since around May 2017, there is new way of constructing MultiRNNCell and you need to create
# one cell for each layer. Old code snippets that used [cell * no_stacked_cells] that you can
# find online might not work with the latest Tensorflow
#
# After construction GRUCell objects, use it to construct tf.contrib.rnn.MultiRNNCell().
#
# YOUR CODE BEGIN
#
##########################################################################################################
cell = None # you
##########################################################################################################
#
# YOUR CODE END
#
##########################################################################################################
multi_cell_zero_state = cell.zero_state(batch_size, tf.float32)
in_state_shape = tuple([None, self.hidden_size] for _ in range(no_stacked_cells))
in_state = tuple(tf.placeholder_with_default(cell_zero_state, [None, self.hidden_size], name='in_state') for cell_zero_state in multi_cell_zero_state)
return cell, in_state
def _create_rnn(self):
with tf.name_scope('RNN_cell'):
seq = self.seq
self.cell, self.in_state = self._create_cell(seq, self.no_stacked_cells)
self.lenghts = self._lenghts(seq)
self.one_hot_seq = self._seq2one_hot(seq)
##########################################################################################################
#
# TODO: Create a dynamically unrolled RNN from previously created stacked GRU cell
#
# First, we created a stacked GRU cell.
# Next step was to get a actual length of input sequence and then encode it into one hot vector.
#
# Now, you have to use tf.nn.dynamic_rnn to create dynamically unrolled RNN.
#
# YOUR CODE BEGIN
#
##########################################################################################################
self.output, self.out_state = None
##########################################################################################################
#
# YOUR CODE END
#
##########################################################################################################
return self.output, self.in_state, self.out_state
def _create_placeholders(self, batch_size=None, window_size=None):
##########################################################################################################
#
# TODO: Create a placeholder for input data and for sampling temperature
#
# Input data shape is [batch_size, window_size].
# Temperature is scalar of tf.float32 type
#
# Use tf.placeholder() to create them
#
# YOUR CODE BEGIN
#
##########################################################################################################
with tf.name_scope('input_data'):
self.seq = None
with tf.name_scope('model_params'):
self.temp = None
##########################################################################################################
#
# YOUR CODE END
#
##########################################################################################################
return self.seq, self.temp
def _create_model(self):
depth = len(self.vocab)+1
self._create_rnn()
##########################################################################################################
#
# TODO: calculate loss
#
# First, you have to caluculate logits. Use tf.contrib.layers.fully_connected() to create a fully
# connected layer together with its variables.
#
# Then you can calculate labels by one hot encoding input sequence.
#
# Finally, use tf.nn.softmax_cross_entropy_with_logits and tf.reduce_mean to calculate loss.
# Remember to slide labels by one position to match predicted logits.
#
# YOUR CODE BEGIN
#
##########################################################################################################
with tf.name_scope('loss'):
self.logits = None
self.labels = None
self.loss = None
##########################################################################################################
#
# YOUR CODE END
#
##########################################################################################################
##########################################################################################################
#
# TODO: Given logits, sample the next character
#
# Hint: Use tf.multinomial() function.
#
# YOUR CODE BEGIN
#
##########################################################################################################
with tf.name_scope('sample'):
self.sample = tf.multinomial(self.logits[:, -1] / self.temp, 1)[:, 0]
##########################################################################################################
#
# YOUR CODE END
#
##########################################################################################################
return self.loss, self.sample, self.in_state, self.out_state
def _create_optimizer(self, learning_rate, decay_steps, decay_rate):
# we'll use this for logging and tensorboard naming
self.learning_rate = learning_rate
self.decay_steps = decay_steps
self.decay_rate = decay_rate
##########################################################################################################
#
# TODO: Create Adam optimizer with decaying learning rate
#
# First create a global step and then use it to create decaying learning rate with
# tf.train.exponential_decay(). Finally, create AdamOptimizer.
#
# YOUR CODE BEGIN
#
##########################################################################################################
with tf.name_scope('optimizer'):
# global step is needed for logging and tensorboard
self.global_step = None
# first we create a decaying learning rate tensor
self.lr = None
# and then we create optimizer (Adam is the default choice)
self.optimizer = None
##########################################################################################################
#
# YOUR CODE END
#
##########################################################################################################
return self.optimizer, self.global_step
def _create_summaries(self):
with tf.name_scope('summaries'):
tf.summary.scalar('loss', self.loss)
tf.summary.scalar('lr', self.lr)
self.summary = tf.summary.merge_all()
return self.summary
def online_inference(self, sess, len, temp, seed=None):
next_input = self.vocab_encode([seed])
next_state = None
sampled_sentence = seed
for _ in range(len-1):
feed_dict = {self.seq: [next_input], self.temp: temp}
if next_state is not None:
feed_dict[self.in_state] = next_state
next_input, next_state = sess.run([self.sample, self.out_state], feed_dict=feed_dict)
sampled_sentence += self.vocab_decode(next_input)[:1]
return sampled_sentence
def create_net(self, learning_rate, decay_steps, decay_rate):
"""
Creates complete graph and everything that can without tf.Session.
:param learning_rate:
:param decay_steps:
:param decay_rate:
:return:
"""
self._create_placeholders()
self._create_model()
self._create_optimizer(learning_rate, decay_steps, decay_rate)
self._create_summaries()
self._create_summary_writer()
def train(self, epochs, skip_steps, seed, temp):
with tf.Session() as sess:
# restore checkpoint if possible
# if not, initialize variables and start from beginning
self._create_checkpoint_saver()
if not self._restore_checkpoint(sess):
sess.run(tf.global_variables_initializer())
start = time.time()
for i in range(epochs):
for batch in self.read_batch(self.read_data()):
feed_dict = {
self.seq: batch
}
batch_loss, _, iteration = sess.run([self.loss, self.optimizer, self.global_step], feed_dict=feed_dict)
if iteration % skip_steps == 1:
# write summaries and save checkpoint
print('#' * 64)
print('[step={0:04d}] loss = {1:.1f} time = {2:.1f} sec'.format(iteration-1, batch_loss, time.time() - start))
self._save_checkpoint(sess)
self._add_summary(sess, feed_dict)
# make inference just for fun
sample = self.online_inference(sess,
len=self.window_size * 4,
temp=temp,
seed = np.random.choice(seed))
print('sample: {}'.format(sample))
# Boring stuff bellow :)
# used to generate names for checkpoints (needs refactoring)
def _model_extension(self):
if self.input_file_name:
dataset_name = os.path.basename(self.input_file_name)
else:
dataset_name = 'dummy_dataset'
return '-{}'.format(dataset_name)
def _arch_extension(self):
return '-stacked_layers{}-hidden_size{}-window_size{}-overlap_size{}'.format(
self.no_stacked_cells, self.hidden_size, self.window_size, self.overlap_size
)
def _train_extension(self):
return '-lr{}-dr{}-ds{}'.format(self.learning_rate, self.decay_rate, self.decay_steps)
def _name_extension(self):
return self._model_extension() + self._arch_extension() + self._train_extension()
def _create_checkpoint_saver(self):
self.checkpoint_namebase = os.path.join(self.checkpoint_base_path, 'seq2seq{}/checkpoint'.format(self._name_extension()))
self.checkpoint_path = os.path.dirname(self.checkpoint_namebase)
os.makedirs(self.checkpoint_path, exist_ok=True)
print('Checkpoint directory is:', os.path.abspath(self.checkpoint_path))
self.saver = tf.train.Saver()
return self.saver
def _save_checkpoint(self, sess):
saver = self.saver
saved_path = saver.save(sess, self.checkpoint_namebase, global_step=self.global_step)
return saved_path
def _restore_checkpoint(self, sess):
saver = self.saver
ckpt = tf.train.get_checkpoint_state(self.checkpoint_path)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
return True
return False
def _create_summary_writer(self):
graph = tf.get_default_graph()
summary_dir = os.path.join(self.summary_train_path, 'graph{}'.format(self._name_extension()))
self.writer = tf.summary.FileWriter(summary_dir, graph)
def _add_summary(self, sess, feed_dict):
summary, global_step = sess.run([self.summary, self.global_step], feed_dict=feed_dict)
self.writer.add_summary(summary, global_step=global_step)
|
wecliqued/deep_learning
|
seq2seq/seq2seq.py
|
Python
|
mit
| 15,669
|
#coding: utf-8
from django import template
register = template.Library()
@register.simple_tag
def upload_js():
return """
<!-- The template to display files available for upload -->
<script id="template-upload" type="text/x-tmpl">
{% for (var i=0, file; file=o.files[i]; i++) { %}
<tr class="template-upload fade">
<td class="name" ><span>{%=file.name%}</span></td>
<td class="type"><span>{%=file.type%}</span></td>
{% if (file.error) { %}
<td class="error" colspan="2"><span class="label label-danger">{%=locale.fileupload.error%}</span> {%=locale.fileupload.errors[file.error] || file.error%}</td>
{% } else if (o.files.valid && !i) { %}
<td>
<div class="progress progress-success progress-striped active"><div class="bar" style="width:0%;"></div></div>
</td>
<td class="start">{% if (!o.options.autoUpload) { %}
<button class="btn btn-primary" rel="fileupload_operation">
<i class="glyphicon glyphicon-arrow-up icon-white"></i>
<span>上传</span>
</button>
{% } %}</td>
{% } else { %}
<td colspan="2"></td>
{% } %}
<td class="cancel">{% if (!i) { %}
<button class="btn " rel="fileupload_operation"
style="height:40px">
<i class="glyphicon glyphicon-remove icon-white"></i>
<span>取消</span>
</button>
{% } %}</td>
</tr>
{% } %}
</script>
<!-- The template to display files available for download -->
<script id="template-download" type="text/x-tmpl">
{% for (var i=0, file; file=o.files[i]; i++) { %}
<tr class="template-download fade">
{% if (file.error) { %}
<td class="name" ><span>{%=file.name%}</span></td>
<td class="type"><span>{%=file.type%}</span></td>
<td class="error" colspan="2"><span class="label label-danger">{%=locale.fileupload.error%}</span> {%=locale.fileupload.errors[file.error] || file.error%}</td>
{% } else { %}
<td class="name">
<a href="$" title="{%=file.name%}" fid="{%=file.id%}">{%=file.name%}</a>
</td>
<td class="type"><span>{%=file.type%}</span></td>
<td colspan="2"></td>
{% } %}
<td class="delete">
<button class="btn btn-danger" data-type="{%=file.delete_type%}" data-url="{%=file.delete_url%}" rel="fileupload_operation">
<i class="glyphicon glyphicon-trash icon-white"></i>
<span>删除</span>
</button>
</td>
</tr>
{% } %}
</script>
"""
|
tianweidut/ChemToolsWebService
|
chemistry/templatetags/upload_tags.py
|
Python
|
agpl-3.0
| 2,694
|
#!/usr/bin/python
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
__all__ = [ "transport" ]
|
marcellodesales/svnedge-console
|
ext/windows/pkg-toolkit/pkg/vendor-packages/pkg/client/transport/__init__.py
|
Python
|
agpl-3.0
| 946
|
# from context import sample as pump
import context as pump
import time
import sys
import getopt
def main(argv):
fabrica = 0
ntimes = 1
sec = 5.0
WorA = 'a'
try:
opts, args = getopt.getopt(
argv,
"hwf:v:s:",
["fabrica=", "veces=", "segundos="],
)
except getopt.GetoptError:
print("Uso: test_curva.py -f <Numero fabrica> {opcional: -w <sobreescribe registros anteriores de esta fabrica> -v <Veces a repetir medida> -s <segundos de espera>}")
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print("Uso: test_curva.py -f <Numero fabrica> {opcional: -w <sobreescribe registros anteriores de esta fabrica> -v <Veces a repetir medida> -s <segundos de espera>}")
sys.exit()
elif opt in ("-f", "--fabrica"):
fabrica = arg
elif opt in ("-v", "--veces"):
ntimes = int(arg)
elif opt in ("-s", "--segundos"):
sec = int(arg)
elif opt in ("-w", "--write"):
WorA = 'w'
"""if (fabrica==0):
print("Uso: test_res.py -f <Numero fabrica> {opcional: -w <sobreescribe registros anteriores de esta fabrica> -v <Veces a repetir medida> -s <segundos de espera>}")
sys.exit()"""
sensors = {}
sensors.update({'pressure': {'SMC01': [2]}})
try:
"""classe = pump.core.PiUMP(
typeTest="prod_1cap",
numFabrica=500,
en=en,
en_namesvars=en_namesvars,
)""" # python 2x
classe = pump.PiUMP(
typetest="prod_cycles_knf",
numFabrica=fabrica,
WorA=WorA,
ntimes=ntimes,
sec=sec,
sensors=sensors,
)
classe.main()
except KeyboardInterrupt:
classe.close_gpio()
print('Stop')
if __name__ == '__main__':
main(sys.argv[1:])
|
xavigisbeg/ELECTROAD_PiUMP
|
tests/test_knf.py
|
Python
|
apache-2.0
| 1,987
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Tests for ACL management.
"""
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from weblate.trans.tests.test_views import ViewTestCase
class ACLViewTest(ViewTestCase):
def setUp(self):
super(ACLViewTest, self).setUp()
self.project.enable_acl = True
self.project.save()
self.project_url = reverse('project', kwargs=self.kw_project)
self.second_user = User.objects.create_user(
'seconduser',
'noreply@example.org',
'testpassword'
)
def add_acl(self):
"""
Adds user to ACL.
"""
self.project.add_user(self.user)
def test_acl_denied(self):
"""No access to the project without ACL.
"""
response = self.client.get(self.project_url)
self.assertEqual(response.status_code, 403)
def test_acl(self):
"""Regular user should not have access to user management.
"""
self.add_acl()
response = self.client.get(self.project_url)
self.assertNotContains(response, 'Manage users')
def test_edit_acl(self):
"""Manager should have access to user management.
"""
self.add_acl()
self.make_manager()
response = self.client.get(self.project_url)
self.assertContains(response, 'Manage users')
def test_edit_acl_owner(self):
"""Owner should have access to user management.
"""
self.add_acl()
self.project.owners.add(self.user)
response = self.client.get(self.project_url)
self.assertContains(response, 'Manage users')
def add_user(self):
self.add_acl()
self.project.owners.add(self.user)
# Add user
response = self.client.post(
reverse('add-user', kwargs=self.kw_project),
{'name': self.second_user.username}
)
self.assertRedirects(response, '{0}#acl'.format(self.project_url))
# Ensure user is now listed
response = self.client.get(self.project_url)
self.assertContains(response, self.second_user.username)
self.assertContains(response, self.second_user.email)
def remove_user(self):
# Remove user
response = self.client.post(
reverse('delete-user', kwargs=self.kw_project),
{'name': self.second_user.username}
)
self.assertRedirects(response, '{0}#acl'.format(self.project_url))
# Ensure user is now not listed
response = self.client.get(self.project_url)
self.assertNotContains(response, self.second_user.username)
self.assertNotContains(response, self.second_user.email)
def test_add_acl(self):
"""Adding and removing user from the ACL project.
"""
self.add_user()
self.remove_user()
def test_add_owner(self):
"""Adding and removing owner from the ACL project.
"""
self.add_user()
self.client.post(
reverse('make-owner', kwargs=self.kw_project),
{'name': self.second_user.username}
)
self.assertTrue(
self.project.owners.filter(
username=self.second_user.username
).exists()
)
self.client.post(
reverse('revoke-owner', kwargs=self.kw_project),
{'name': self.second_user.username}
)
self.assertFalse(
self.project.owners.filter(
username=self.second_user.username
).exists()
)
self.remove_user()
def test_delete_owner(self):
"""Adding and deleting owner from the ACL project.
"""
self.add_user()
self.client.post(
reverse('make-owner', kwargs=self.kw_project),
{'name': self.second_user.username}
)
self.remove_user()
self.assertFalse(
self.project.owners.filter(
username=self.second_user.username
).exists()
)
def test_denied_owner_delete(self):
"""Test that deleting last owner does not work."""
self.project.owners.add(self.user)
self.client.post(
reverse('revoke-owner', kwargs=self.kw_project),
{'name': self.second_user.username}
)
self.assertTrue(
self.project.owners.filter(
username=self.user.username
).exists()
)
self.client.post(
reverse('delete-user', kwargs=self.kw_project),
{'name': self.second_user.username}
)
self.assertTrue(
self.project.owners.filter(
username=self.user.username
).exists()
)
def test_nonexisting_user(self):
"""Test adding non existing user."""
self.project.owners.add(self.user)
response = self.client.post(
reverse('add-user', kwargs=self.kw_project),
{'name': 'nonexisging'},
follow=True
)
self.assertContains(response, 'No matching user found!')
|
leohmoraes/weblate
|
weblate/trans/tests/test_acl.py
|
Python
|
gpl-3.0
| 5,904
|
"""Implementation of JSONEncoder
"""
import re
try:
from _json import encode_basestring_ascii as c_encode_basestring_ascii
except ImportError:
c_encode_basestring_ascii = None
try:
from _json import encode_basestring as c_encode_basestring
except ImportError:
c_encode_basestring = None
try:
from _json import make_encoder as c_make_encoder
except ImportError:
c_make_encoder = None
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(b'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
#ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
INFINITY = float('inf')
def py_encode_basestring(s):
"""Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
encode_basestring = (c_encode_basestring or py_encode_basestring)
def py_encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u{0:04x}'.format(n)
#return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
return '"' + ESCAPE_ASCII.sub(replace, s) + '"'
encode_basestring_ascii = (
c_encode_basestring_ascii or py_encode_basestring_ascii)
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str | string |
+-------------------+---------------+
| int, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, *, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, default=None):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is false, then it is a TypeError to attempt
encoding of keys that are not str, int, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is true, the output is guaranteed to be str
objects with all incoming non-ASCII characters escaped. If
ensure_ascii is false, the output can contain non-ASCII characters.
If check_circular is true, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is true, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is true, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation.
If specified, separators should be an (item_separator, key_separator)
tuple. The default is (', ', ': ') if *indent* is ``None`` and
(',', ': ') otherwise. To get the most compact JSON representation,
you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
elif indent is not None:
self.item_separator = ','
if default is not None:
self.default = default
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
# Let the base class default method raise the TypeError
return JSONEncoder.default(self, o)
"""
raise TypeError("Object of type '%s' is not JSON serializable" %
o.__class__.__name__)
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> from json.encoder import JSONEncoder
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, str):
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
return ''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
def floatstr(o, allow_nan=self.allow_nan,
_repr=float.__repr__, _inf=INFINITY, _neginf=-INFINITY):
# Check for specials. Note that this type of test is processor
# and/or platform-specific, so do tests which don't depend on the
# internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
if (_one_shot and c_make_encoder is not None
and self.indent is None):
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot)
return _iterencode(o, 0)
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
_key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
## HACK: hand-optimized bytecode; turn globals into locals
ValueError=ValueError,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
str=str,
tuple=tuple,
_intstr=int.__str__,
):
if _indent is not None and not isinstance(_indent, str):
_indent = ' ' * _indent
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + _indent * _current_indent_level
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, str):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, int):
# Subclasses of int/float may override __str__, but we still
# want to encode them as integers/floats in JSON. One example
# within the standard library is IntEnum.
yield buf + _intstr(value)
elif isinstance(value, float):
# see comment above for int
yield buf + _floatstr(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
yield from chunks
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + _indent * _current_indent_level
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + _indent * _current_indent_level
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = sorted(dct.items(), key=lambda kv: kv[0])
else:
items = dct.items()
for key, value in items:
if isinstance(key, str):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
# see comment for int/float in _make_iterencode
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, int):
# see comment for int/float in _make_iterencode
key = _intstr(key)
elif _skipkeys:
continue
else:
raise TypeError("key " + repr(key) + " is not a string")
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, str):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, int):
# see comment for int/float in _make_iterencode
yield _intstr(value)
elif isinstance(value, float):
# see comment for int/float in _make_iterencode
yield _floatstr(value)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
yield from chunks
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + _indent * _current_indent_level
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, str):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, int):
# see comment for int/float in _make_iterencode
yield _intstr(o)
elif isinstance(o, float):
# see comment for int/float in _make_iterencode
yield _floatstr(o)
elif isinstance(o, (list, tuple)):
yield from _iterencode_list(o, _current_indent_level)
elif isinstance(o, dict):
yield from _iterencode_dict(o, _current_indent_level)
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
yield from _iterencode(o, _current_indent_level)
if markers is not None:
del markers[markerid]
return _iterencode
|
prefetchnta/questlab
|
bin/x64bin/python/36/Lib/json/encoder.py
|
Python
|
lgpl-2.1
| 16,461
|
#!/usr/bin/env python
import datetime
import os
import time
import bs4
import psycopg2
import requests
ENV_VARS = {
"FERRY_DBNAME": "dbname",
"FERRY_DBUSER": "user",
"FERRY_DBPWD": "password",
}
DB_CREDS = " ".join(ENV_VARS[var]+"="+os.environ[var] for var in ENV_VARS.keys() if var in os.environ)
FERRIES = {
"366952890": "Spirit of America",
"366952870": "Sen. John J Marchi",
"366952790": "Guy V Molinari",
"367000140": "Samuel I Newhouse",
"367000150": "Andrew J Barberi",
"367000190": "John F Kennedy",
"367000110": "John Noble",
"367000120": "Alice Austen",
}
def get(mmsi):
""" Returns html of a vessel information page
for a given vessel identified by mmsi. """
url = 'http://www.aishub.net/ais-hub-vessel-information.html'
params = {'mmsi': mmsi}
r = requests.get(url, params=params)
return r.text
def html_to_dict(html):
""" Returns dict of property-value pairs
from vessel information page table. """
soup = bs4.BeautifulSoup(html)
if soup.table:
trs = list(soup.table.find_all("tr"))[1:]
pairs = [fmt([tag_to_string(td) for td in tr.children if type(td)==bs4.element.Tag]) for tr in trs]
return dict(pairs)
else:
return {}
def tag_to_string(tag):
""" Returns string contained by (possibly nested) bs4.element.Tag. """
elem = tag.children.next()
if type(elem) == bs4.element.Tag:
return tag_to_string(elem)
else:
return elem
def fmt(pair):
""" Coerce selected values from string to appropriate type. """
action = {u'LATITUDE:': lambda v: float(v.replace(u' \xb0', u'')),
u'LONGITUDE:': lambda v: float(v.replace(u' \xb0', u'')),
u'MMSI': int,
u'LAST UPDATE:': lambda v: datetime.datetime.strptime(v, '%d-%m-%Y %H:%M') + datetime.timedelta(hours=-2)}
key, value = pair
if key in action:
return [key, action[key](value)]
else:
return pair
def latest_update(cur, mmsi):
cur.execute('SELECT MAX(last_update) FROM locations WHERE mmsi=%s;', (mmsi,))
return cur.fetchall()[0][0]
def push_to_db(d):
conn = psycopg2.connect(DB_CREDS)
cur = conn.cursor()
cols = (u'MMSI', u'LATITUDE:', u'LONGITUDE:', u'LAST UPDATE:')
vals = [d[c] for c in cols]
if d[u'LAST UPDATE:'] != latest_update(cur, d[u'MMSI']):
cur.execute('INSERT INTO locations VALUES (%s, %s, %s, %s);', vals)
conn.commit()
cur.close()
conn.close()
def main():
for mmsi in FERRIES.keys():
html = get(mmsi)
d = html_to_dict(html)
if d: push_to_db(d)
time.sleep(5)
if __name__ == '__main__':
main()
|
mikemcbrearty/ferry-app
|
py/scrape.py
|
Python
|
mit
| 2,701
|
# -*- test-case-name: twisted.test.test_process -*-
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
http://isometric.sixsided.org/_/gates_in_the_head/
"""
# pylint: disable-msg=F0401,E1101
import os
# Win32 imports
import win32api
import win32con
import win32event
import win32file
import win32pipe
import win32process
import win32security
import win32job
from win32con import CREATE_SUSPENDED
from time import time
import pywintypes
# security attributes for pipes
PIPE_ATTRS_INHERITABLE = win32security.SECURITY_ATTRIBUTES()
PIPE_ATTRS_INHERITABLE.bInheritHandle = 1
from zope.interface import implements
from twisted.internet.interfaces import IProcessTransport, IConsumer, IProducer
from twisted.python.win32 import quoteArguments
from twisted.internet import error
from twisted.python import failure
from twisted.internet import _pollingfile
from twisted.internet._baseprocess import BaseProcess
def debug(msg):
import sys
print msg
sys.stdout.flush()
class _Reaper(_pollingfile._PollableResource):
def __init__(self, proc):
self.proc = proc
def checkWork(self):
if win32event.WaitForSingleObject(self.proc.hProcess, 0) != win32event.WAIT_OBJECT_0:
return 0
exitCode = win32process.GetExitCodeProcess(self.proc.hProcess)
self.deactivate()
self.proc.processEnded(exitCode)
return 0
def _findShebang(filename):
"""
Look for a #! line, and return the value following the #! if one exists, or
None if this file is not a script.
I don't know if there are any conventions for quoting in Windows shebang
lines, so this doesn't support any; therefore, you may not pass any
arguments to scripts invoked as filters. That's probably wrong, so if
somebody knows more about the cultural expectations on Windows, please feel
free to fix.
This shebang line support was added in support of the CGI tests;
appropriately enough, I determined that shebang lines are culturally
accepted in the Windows world through this page::
http://www.cgi101.com/learn/connect/winxp.html
@param filename: str representing a filename
@return: a str representing another filename.
"""
f = file(filename, 'ru')
if f.read(2) == '#!':
exe = f.readline(1024).strip('\n')
return exe
def _invalidWin32App(pywinerr):
"""
Determine if a pywintypes.error is telling us that the given process is
'not a valid win32 application', i.e. not a PE format executable.
@param pywinerr: a pywintypes.error instance raised by CreateProcess
@return: a boolean
"""
# Let's do this better in the future, but I have no idea what this error
# is; MSDN doesn't mention it, and there is no symbolic constant in
# win32process module that represents 193.
return pywinerr.args[0] == 193
class Process(_pollingfile._PollingTimer, BaseProcess):
"""A process that integrates with the Twisted event loop.
If your subprocess is a python program, you need to:
- Run python.exe with the '-u' command line option - this turns on
unbuffered I/O. Buffering stdout/err/in can cause problems, see e.g.
http://support.microsoft.com/default.aspx?scid=kb;EN-US;q1903
- If you don't want Windows messing with data passed over
stdin/out/err, set the pipes to be in binary mode::
import os, sys, mscvrt
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
"""
implements(IProcessTransport, IConsumer, IProducer)
closedNotifies = 0
def __init__(self, reactor, protocol, command, args, environment, path):
_pollingfile._PollingTimer.__init__(self, reactor)
BaseProcess.__init__(self, protocol)
# security attributes for pipes
sAttrs = win32security.SECURITY_ATTRIBUTES()
sAttrs.bInheritHandle = 1
# create the pipes which will connect to the secondary process
self.hStdoutR, hStdoutW = win32pipe.CreatePipe(sAttrs, 0)
self.hStderrR, hStderrW = win32pipe.CreatePipe(sAttrs, 0)
hStdinR, self.hStdinW = win32pipe.CreatePipe(sAttrs, 0)
win32pipe.SetNamedPipeHandleState(self.hStdinW,
win32pipe.PIPE_NOWAIT,
None,
None)
# set the info structure for the new process.
StartupInfo = win32process.STARTUPINFO()
StartupInfo.hStdOutput = hStdoutW
StartupInfo.hStdError = hStderrW
StartupInfo.hStdInput = hStdinR
StartupInfo.dwFlags = win32process.STARTF_USESTDHANDLES
# Create new handles whose inheritance property is false
currentPid = win32api.GetCurrentProcess()
tmp = win32api.DuplicateHandle(currentPid, self.hStdoutR, currentPid, 0, 0,
win32con.DUPLICATE_SAME_ACCESS)
win32file.CloseHandle(self.hStdoutR)
self.hStdoutR = tmp
tmp = win32api.DuplicateHandle(currentPid, self.hStderrR, currentPid, 0, 0,
win32con.DUPLICATE_SAME_ACCESS)
win32file.CloseHandle(self.hStderrR)
self.hStderrR = tmp
tmp = win32api.DuplicateHandle(currentPid, self.hStdinW, currentPid, 0, 0,
win32con.DUPLICATE_SAME_ACCESS)
win32file.CloseHandle(self.hStdinW)
self.hStdinW = tmp
# Add the specified environment to the current environment - this is
# necessary because certain operations are only supported on Windows
# if certain environment variables are present.
env = os.environ.copy()
env.update(environment or {})
cmdline = quoteArguments(args)
# TODO: error detection here.
def doCreate():
self.job = win32job.CreateJobObject(None, str(time()))
self.hProcess, self.hThread, self.pid, dwTid = win32process.CreateProcess(
command, cmdline, None, None, 1, CREATE_SUSPENDED, env, path, StartupInfo)
win32job.AssignProcessToJobObject(self.job, self.hProcess)
win32process.ResumeThread(self.hThread)
try:
doCreate()
except pywintypes.error, pwte:
if not _invalidWin32App(pwte):
# This behavior isn't _really_ documented, but let's make it
# consistent with the behavior that is documented.
raise OSError(pwte)
else:
# look for a shebang line. Insert the original 'command'
# (actually a script) into the new arguments list.
sheb = _findShebang(command)
if sheb is None:
raise OSError(
"%r is neither a Windows executable, "
"nor a script with a shebang line" % command)
else:
args = list(args)
args.insert(0, command)
cmdline = quoteArguments(args)
origcmd = command
command = sheb
try:
# Let's try again.
doCreate()
except pywintypes.error, pwte2:
# d'oh, failed again!
if _invalidWin32App(pwte2):
raise OSError(
"%r has an invalid shebang line: "
"%r is not a valid executable" % (
origcmd, sheb))
raise OSError(pwte2)
win32file.CloseHandle(self.hThread)
# close handles which only the child will use
win32file.CloseHandle(hStderrW)
win32file.CloseHandle(hStdoutW)
win32file.CloseHandle(hStdinR)
# set up everything
self.stdout = _pollingfile._PollableReadPipe(
self.hStdoutR,
lambda data: self.proto.childDataReceived(1, data),
self.outConnectionLost)
self.stderr = _pollingfile._PollableReadPipe(
self.hStderrR,
lambda data: self.proto.childDataReceived(2, data),
self.errConnectionLost)
self.stdin = _pollingfile._PollableWritePipe(
self.hStdinW, self.inConnectionLost)
for pipewatcher in self.stdout, self.stderr, self.stdin:
self._addPollableResource(pipewatcher)
# notify protocol
self.proto.makeConnection(self)
self._addPollableResource(_Reaper(self))
def signalProcess(self, signalID):
if self.pid is None:
raise error.ProcessExitedAlready()
if signalID in ("INT", "TERM", "KILL"):
#win32process.TerminateProcess(self.hProcess, 1)
win32job.TerminateJobObject(self.job, 1)
def _getReason(self, status):
if status == 0:
return error.ProcessDone(status)
return error.ProcessTerminated(status)
def write(self, data):
"""Write data to the process' stdin."""
self.stdin.write(data)
def writeSequence(self, seq):
"""Write data to the process' stdin."""
self.stdin.writeSequence(seq)
def closeChildFD(self, fd):
if fd == 0:
self.closeStdin()
elif fd == 1:
self.closeStdout()
elif fd == 2:
self.closeStderr()
else:
raise NotImplementedError("Only standard-IO file descriptors available on win32")
def closeStdin(self):
"""Close the process' stdin.
"""
self.stdin.close()
def closeStderr(self):
self.stderr.close()
def closeStdout(self):
self.stdout.close()
def loseConnection(self):
"""Close the process' stdout, in and err."""
self.closeStdin()
self.closeStdout()
self.closeStderr()
def outConnectionLost(self):
self.proto.childConnectionLost(1)
self.connectionLostNotify()
def errConnectionLost(self):
self.proto.childConnectionLost(2)
self.connectionLostNotify()
def inConnectionLost(self):
self.proto.childConnectionLost(0)
self.connectionLostNotify()
def connectionLostNotify(self):
"""
Will be called 3 times, by stdout/err threads and process handle.
"""
self.closedNotifies += 1
self.maybeCallProcessEnded()
def maybeCallProcessEnded(self):
if self.closedNotifies == 3 and self.lostProcess:
BaseProcess.maybeCallProcessEnded(self)
# IConsumer
def registerProducer(self, producer, streaming):
self.stdin.registerProducer(producer, streaming)
def unregisterProducer(self):
self.stdin.unregisterProducer()
# IProducer
def pauseProducing(self):
self._pause()
def resumeProducing(self):
self._unpause()
def stopProducing(self):
self.loseConnection()
def __repr__(self):
"""
Return a string representation of the process.
"""
return "<%s pid=%s>" % (self.__class__.__name__, self.pid)
|
TaskEvolution/Task-Coach-Evolution
|
taskcoach/buildbot/slaves/WinXPSP3/_dumbwin32proc.py
|
Python
|
gpl-3.0
| 11,364
|
"""Test AdaNet single graph subnetwork implementation.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from adanet.subnetwork.report import Report
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
# pylint: enable=g-direct-tensorflow-import
class ReportTest(parameterized.TestCase, tf.test.TestCase):
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
{
"testcase_name": "empty",
"hparams": {},
"attributes": lambda: {},
"metrics": lambda: {},
}, {
"testcase_name": "non_empty",
"hparams": {
"hoo": 1
},
"attributes": lambda: {
"aoo": tf.constant(1)
},
"metrics": lambda: {
"moo": (tf.constant(1), tf.constant(1))
},
}, {
"testcase_name": "non_tensor_update_op",
"hparams": {
"hoo": 1
},
"attributes": lambda: {
"aoo": tf.constant(1)
},
"metrics": lambda: {
"moo": (tf.constant(1), tf.no_op())
},
})
# pylint: enable=g-long-lambda
@test_util.run_in_graph_and_eager_modes
def test_new(self, hparams, attributes, metrics):
with context.graph_mode():
_ = tf.constant(0) # Just to have a non-empty graph.
report = Report(
hparams=hparams, attributes=attributes(), metrics=metrics())
self.assertEqual(hparams, report.hparams)
self.assertEqual(
self.evaluate(attributes()), self.evaluate(report.attributes))
self.assertEqual(self.evaluate(metrics()), self.evaluate(report.metrics))
@test_util.run_in_graph_and_eager_modes
def test_drop_non_scalar_metric(self):
"""Tests b/118632346."""
hparams = {"hoo": 1}
attributes = {"aoo": tf.constant(1)}
metrics = {
"moo1": (tf.constant(1), tf.constant(1)),
"moo2": (tf.constant([1, 1]), tf.constant([1, 1])),
}
want_metrics = metrics.copy()
del want_metrics["moo2"]
with self.test_session():
report = Report(hparams=hparams, attributes=attributes, metrics=metrics)
self.assertEqual(hparams, report.hparams)
self.assertEqual(attributes, report.attributes)
self.assertEqual(want_metrics, report.metrics)
@parameterized.named_parameters(
{
"testcase_name": "tensor_hparams",
"hparams": {
"hoo": tf.constant(1)
},
"attributes": {},
"metrics": {},
}, {
"testcase_name": "non_tensor_attributes",
"hparams": {},
"attributes": {
"aoo": 1,
},
"metrics": {},
}, {
"testcase_name": "non_tuple_metrics",
"hparams": {},
"attributes": {},
"metrics": {
"moo": tf.constant(1)
},
}, {
"testcase_name": "one_item_tuple_metrics",
"hparams": {},
"attributes": {},
"metrics": {
"moo": (tf.constant(1),)
},
})
@test_util.run_in_graph_and_eager_modes
def test_new_errors(self, hparams, attributes, metrics):
with self.assertRaises(ValueError):
Report(hparams=hparams, attributes=attributes, metrics=metrics)
if __name__ == "__main__":
tf.test.main()
|
tensorflow/adanet
|
adanet/subnetwork/report_test.py
|
Python
|
apache-2.0
| 4,130
|
# -*- coding: utf-8 -*-
"""
Generator.py
create (generate) annotated data
H. Déjean
copyright Xerox 2017
READ project
Developed for the EU project READ. The READ project has received funding
from the European Union's Horizon 2020 research and innovation programme
under grant agreement No 674943.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from dataGenerator.generator import Generator
try:basestring
except NameError:basestring = str
class listGenerator(Generator):
"""
a generator for list
"""
def __init__(self,config,configKey,objGen,nbMaxGen):
Generator.__init__(self,config,configKey)
self.configObj=configKey
self.myObjectGen = objGen
self.nbMax = nbMaxGen
def getValuedNb(self): return self.nbMax._generation
def instantiate(self):
self._instance = []
self.nbMax.generate()
for i in range(self.nbMax._generation):
try:
o = self.myObjectGen(self.getConfig(),self.configObj)
o.instantiate()
except TypeError:
o = self.myObjectGen(*self.getConfig(),self.configObj)
o.instantiate()
o.setNumber(i)
self._instance.append(o)
return self
def exportAnnotatedData(self,foo):
self._GT=[]
for obj in self._generation:
if type(obj._generation) == basestring:
self._GT.append((obj._generation,[obj.getLabel()]))
elif type(obj) == int:
self._GT.append((obj._generation,[obj.getLabel()]))
else:
self._GT.append((obj.exportAnnotatedData([]),obj))
return self._GT
if __name__ == "__main__":
from dataGenerator.numericalGenerator import integerGenerator
integerGenerator(10,0)
lG = listGenerator((5,4),integerGenerator, integerGenerator(10,0))
lG.instantiate()
lG.generate()
print(lG._generation)
|
Transkribus/TranskribusDU
|
TranskribusDU/dataGenerator/listGenerator.py
|
Python
|
bsd-3-clause
| 2,135
|
"""
Extended docstrings for functions.py
"""
pi = r"""
`\pi`, roughly equal to 3.141592654, represents the area of the unit
circle, the half-period of trigonometric functions, and many other
things in mathematics.
Mpmath can evaluate `\pi` to arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +pi
3.1415926535897932384626433832795028841971693993751
This shows digits 99991-100000 of `\pi`::
>>> mp.dps = 100000
>>> str(pi)[-10:]
'5549362464'
**Possible issues**
:data:`pi` always rounds to the nearest floating-point
number when used. This means that exact mathematical identities
involving `\pi` will generally not be preserved in floating-point
arithmetic. In particular, multiples of :data:`pi` (except for
the trivial case ``0*pi``) are *not* the exact roots of
:func:`~mpmath.sin`, but differ roughly by the current epsilon::
>>> mp.dps = 15
>>> sin(pi)
1.22464679914735e-16
One solution is to use the :func:`~mpmath.sinpi` function instead::
>>> sinpi(1)
0.0
See the documentation of trigonometric functions for additional
details.
"""
degree = r"""
Represents one degree of angle, `1^{\circ} = \pi/180`, or
about 0.01745329. This constant may be evaluated to arbitrary
precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +degree
0.017453292519943295769236907684886127134428718885417
The :data:`degree` object is convenient for conversion
to radians::
>>> sin(30 * degree)
0.5
>>> asin(0.5) / degree
30.0
"""
e = r"""
The transcendental number `e` = 2.718281828... is the base of the
natural logarithm (:func:`~mpmath.ln`) and of the exponential function
(:func:`~mpmath.exp`).
Mpmath can be evaluate `e` to arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +e
2.7182818284590452353602874713526624977572470937
This shows digits 99991-100000 of `e`::
>>> mp.dps = 100000
>>> str(e)[-10:]
'2100427165'
**Possible issues**
:data:`e` always rounds to the nearest floating-point number
when used, and mathematical identities involving `e` may not
hold in floating-point arithmetic. For example, ``ln(e)``
might not evaluate exactly to 1.
In particular, don't use ``e**x`` to compute the exponential
function. Use ``exp(x)`` instead; this is both faster and more
accurate.
"""
phi = r"""
Represents the golden ratio `\phi = (1+\sqrt 5)/2`,
approximately equal to 1.6180339887. To high precision,
its value is::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +phi
1.6180339887498948482045868343656381177203091798058
Formulas for the golden ratio include the following::
>>> (1+sqrt(5))/2
1.6180339887498948482045868343656381177203091798058
>>> findroot(lambda x: x**2-x-1, 1)
1.6180339887498948482045868343656381177203091798058
>>> limit(lambda n: fib(n+1)/fib(n), inf)
1.6180339887498948482045868343656381177203091798058
"""
euler = r"""
Euler's constant or the Euler-Mascheroni constant `\gamma`
= 0.57721566... is a number of central importance to
number theory and special functions. It is defined as the limit
.. math ::
\gamma = \lim_{n\to\infty} H_n - \log n
where `H_n = 1 + \frac{1}{2} + \ldots + \frac{1}{n}` is a harmonic
number (see :func:`~mpmath.harmonic`).
Evaluation of `\gamma` is supported at arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +euler
0.57721566490153286060651209008240243104215933593992
We can also compute `\gamma` directly from the definition,
although this is less efficient::
>>> limit(lambda n: harmonic(n)-log(n), inf)
0.57721566490153286060651209008240243104215933593992
This shows digits 9991-10000 of `\gamma`::
>>> mp.dps = 10000
>>> str(euler)[-10:]
'4679858165'
Integrals, series, and representations for `\gamma` in terms of
special functions include the following (there are many others)::
>>> mp.dps = 25
>>> -quad(lambda x: exp(-x)*log(x), [0,inf])
0.5772156649015328606065121
>>> quad(lambda x,y: (x-1)/(1-x*y)/log(x*y), [0,1], [0,1])
0.5772156649015328606065121
>>> nsum(lambda k: 1/k-log(1+1/k), [1,inf])
0.5772156649015328606065121
>>> nsum(lambda k: (-1)**k*zeta(k)/k, [2,inf])
0.5772156649015328606065121
>>> -diff(gamma, 1)
0.5772156649015328606065121
>>> limit(lambda x: 1/x-gamma(x), 0)
0.5772156649015328606065121
>>> limit(lambda x: zeta(x)-1/(x-1), 1)
0.5772156649015328606065121
>>> (log(2*pi*nprod(lambda n:
... exp(-2+2/n)*(1+2/n)**n, [1,inf]))-3)/2
0.5772156649015328606065121
For generalizations of the identities `\gamma = -\Gamma'(1)`
and `\gamma = \lim_{x\to1} \zeta(x)-1/(x-1)`, see
:func:`~mpmath.psi` and :func:`~mpmath.stieltjes` respectively.
"""
catalan = r"""
Catalan's constant `K` = 0.91596559... is given by the infinite
series
.. math ::
K = \sum_{k=0}^{\infty} \frac{(-1)^k}{(2k+1)^2}.
Mpmath can evaluate it to arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +catalan
0.91596559417721901505460351493238411077414937428167
One can also compute `K` directly from the definition, although
this is significantly less efficient::
>>> nsum(lambda k: (-1)**k/(2*k+1)**2, [0, inf])
0.91596559417721901505460351493238411077414937428167
This shows digits 9991-10000 of `K`::
>>> mp.dps = 10000
>>> str(catalan)[-10:]
'9537871503'
Catalan's constant has numerous integral representations::
>>> mp.dps = 50
>>> quad(lambda x: -log(x)/(1+x**2), [0, 1])
0.91596559417721901505460351493238411077414937428167
>>> quad(lambda x: atan(x)/x, [0, 1])
0.91596559417721901505460351493238411077414937428167
>>> quad(lambda x: ellipk(x**2)/2, [0, 1])
0.91596559417721901505460351493238411077414937428167
>>> quad(lambda x,y: 1/(1+(x*y)**2), [0, 1], [0, 1])
0.91596559417721901505460351493238411077414937428167
As well as series representations::
>>> pi*log(sqrt(3)+2)/8 + 3*nsum(lambda n:
... (fac(n)/(2*n+1))**2/fac(2*n), [0, inf])/8
0.91596559417721901505460351493238411077414937428167
>>> 1-nsum(lambda n: n*zeta(2*n+1)/16**n, [1,inf])
0.91596559417721901505460351493238411077414937428167
"""
khinchin = r"""
Khinchin's constant `K` = 2.68542... is a number that
appears in the theory of continued fractions. Mpmath can evaluate
it to arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +khinchin
2.6854520010653064453097148354817956938203822939945
An integral representation is::
>>> I = quad(lambda x: log((1-x**2)/sincpi(x))/x/(1+x), [0, 1])
>>> 2*exp(1/log(2)*I)
2.6854520010653064453097148354817956938203822939945
The computation of ``khinchin`` is based on an efficient
implementation of the following series::
>>> f = lambda n: (zeta(2*n)-1)/n*sum((-1)**(k+1)/mpf(k)
... for k in range(1,2*int(n)))
>>> exp(nsum(f, [1,inf])/log(2))
2.6854520010653064453097148354817956938203822939945
"""
glaisher = r"""
Glaisher's constant `A`, also known as the Glaisher-Kinkelin
constant, is a number approximately equal to 1.282427129 that
sometimes appears in formulas related to gamma and zeta functions.
It is also related to the Barnes G-function (see :func:`~mpmath.barnesg`).
The constant is defined as `A = \exp(1/12-\zeta'(-1))` where
`\zeta'(s)` denotes the derivative of the Riemann zeta function
(see :func:`~mpmath.zeta`).
Mpmath can evaluate Glaisher's constant to arbitrary precision:
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +glaisher
1.282427129100622636875342568869791727767688927325
We can verify that the value computed by :data:`glaisher` is
correct using mpmath's facilities for numerical
differentiation and arbitrary evaluation of the zeta function:
>>> exp(mpf(1)/12 - diff(zeta, -1))
1.282427129100622636875342568869791727767688927325
Here is an example of an integral that can be evaluated in
terms of Glaisher's constant:
>>> mp.dps = 15
>>> quad(lambda x: log(gamma(x)), [1, 1.5])
-0.0428537406502909
>>> -0.5 - 7*log(2)/24 + log(pi)/4 + 3*log(glaisher)/2
-0.042853740650291
Mpmath computes Glaisher's constant by applying Euler-Maclaurin
summation to a slowly convergent series. The implementation is
reasonably efficient up to about 10,000 digits. See the source
code for additional details.
References:
http://mathworld.wolfram.com/Glaisher-KinkelinConstant.html
"""
apery = r"""
Represents Apery's constant, which is the irrational number
approximately equal to 1.2020569 given by
.. math ::
\zeta(3) = \sum_{k=1}^\infty\frac{1}{k^3}.
The calculation is based on an efficient hypergeometric
series. To 50 decimal places, the value is given by::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +apery
1.2020569031595942853997381615114499907649862923405
Other ways to evaluate Apery's constant using mpmath
include::
>>> zeta(3)
1.2020569031595942853997381615114499907649862923405
>>> -psi(2,1)/2
1.2020569031595942853997381615114499907649862923405
>>> 8*nsum(lambda k: 1/(2*k+1)**3, [0,inf])/7
1.2020569031595942853997381615114499907649862923405
>>> f = lambda k: 2/k**3/(exp(2*pi*k)-1)
>>> 7*pi**3/180 - nsum(f, [1,inf])
1.2020569031595942853997381615114499907649862923405
This shows digits 9991-10000 of Apery's constant::
>>> mp.dps = 10000
>>> str(apery)[-10:]
'3189504235'
"""
mertens = r"""
Represents the Mertens or Meissel-Mertens constant, which is the
prime number analog of Euler's constant:
.. math ::
B_1 = \lim_{N\to\infty}
\left(\sum_{p_k \le N} \frac{1}{p_k} - \log \log N \right)
Here `p_k` denotes the `k`-th prime number. Other names for this
constant include the Hadamard-de la Vallee-Poussin constant or
the prime reciprocal constant.
The following gives the Mertens constant to 50 digits::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +mertens
0.2614972128476427837554268386086958590515666482612
References:
http://mathworld.wolfram.com/MertensConstant.html
"""
twinprime = r"""
Represents the twin prime constant, which is the factor `C_2`
featuring in the Hardy-Littlewood conjecture for the growth of the
twin prime counting function,
.. math ::
\pi_2(n) \sim 2 C_2 \frac{n}{\log^2 n}.
It is given by the product over primes
.. math ::
C_2 = \prod_{p\ge3} \frac{p(p-2)}{(p-1)^2} \approx 0.66016
Computing `C_2` to 50 digits::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +twinprime
0.66016181584686957392781211001455577843262336028473
References:
http://mathworld.wolfram.com/TwinPrimesConstant.html
"""
ln = r"""
Computes the natural logarithm of `x`, `\ln x`.
See :func:`~mpmath.log` for additional documentation."""
sqrt = r"""
``sqrt(x)`` gives the principal square root of `x`, `\sqrt x`.
For positive real numbers, the principal root is simply the
positive square root. For arbitrary complex numbers, the principal
square root is defined to satisfy `\sqrt x = \exp(\log(x)/2)`.
The function thus has a branch cut along the negative half real axis.
For all mpmath numbers ``x``, calling ``sqrt(x)`` is equivalent to
performing ``x**0.5``.
**Examples**
Basic examples and limits::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> sqrt(10)
3.16227766016838
>>> sqrt(100)
10.0
>>> sqrt(-4)
(0.0 + 2.0j)
>>> sqrt(1+1j)
(1.09868411346781 + 0.455089860562227j)
>>> sqrt(inf)
+inf
Square root evaluation is fast at huge precision::
>>> mp.dps = 50000
>>> a = sqrt(3)
>>> str(a)[-10:]
'9329332814'
:func:`mpmath.iv.sqrt` supports interval arguments::
>>> iv.dps = 15; iv.pretty = True
>>> iv.sqrt([16,100])
[4.0, 10.0]
>>> iv.sqrt(2)
[1.4142135623730949234, 1.4142135623730951455]
>>> iv.sqrt(2) ** 2
[1.9999999999999995559, 2.0000000000000004441]
"""
cbrt = r"""
``cbrt(x)`` computes the cube root of `x`, `x^{1/3}`. This
function is faster and more accurate than raising to a floating-point
fraction::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> 125**(mpf(1)/3)
mpf('4.9999999999999991')
>>> cbrt(125)
mpf('5.0')
Every nonzero complex number has three cube roots. This function
returns the cube root defined by `\exp(\log(x)/3)` where the
principal branch of the natural logarithm is used. Note that this
does not give a real cube root for negative real numbers::
>>> mp.pretty = True
>>> cbrt(-1)
(0.5 + 0.866025403784439j)
"""
exp = r"""
Computes the exponential function,
.. math ::
\exp(x) = e^x = \sum_{k=0}^{\infty} \frac{x^k}{k!}.
For complex numbers, the exponential function also satisfies
.. math ::
\exp(x+yi) = e^x (\cos y + i \sin y).
**Basic examples**
Some values of the exponential function::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> exp(0)
1.0
>>> exp(1)
2.718281828459045235360287
>>> exp(-1)
0.3678794411714423215955238
>>> exp(inf)
+inf
>>> exp(-inf)
0.0
Arguments can be arbitrarily large::
>>> exp(10000)
8.806818225662921587261496e+4342
>>> exp(-10000)
1.135483865314736098540939e-4343
Evaluation is supported for interval arguments via
:func:`mpmath.iv.exp`::
>>> iv.dps = 25; iv.pretty = True
>>> iv.exp([-inf,0])
[0.0, 1.0]
>>> iv.exp([0,1])
[1.0, 2.71828182845904523536028749558]
The exponential function can be evaluated efficiently to arbitrary
precision::
>>> mp.dps = 10000
>>> exp(pi) #doctest: +ELLIPSIS
23.140692632779269005729...8984304016040616
**Functional properties**
Numerical verification of Euler's identity for the complex
exponential function::
>>> mp.dps = 15
>>> exp(j*pi)+1
(0.0 + 1.22464679914735e-16j)
>>> chop(exp(j*pi)+1)
0.0
This recovers the coefficients (reciprocal factorials) in the
Maclaurin series expansion of exp::
>>> nprint(taylor(exp, 0, 5))
[1.0, 1.0, 0.5, 0.166667, 0.0416667, 0.00833333]
The exponential function is its own derivative and antiderivative::
>>> exp(pi)
23.1406926327793
>>> diff(exp, pi)
23.1406926327793
>>> quad(exp, [-inf, pi])
23.1406926327793
The exponential function can be evaluated using various methods,
including direct summation of the series, limits, and solving
the defining differential equation::
>>> nsum(lambda k: pi**k/fac(k), [0,inf])
23.1406926327793
>>> limit(lambda k: (1+pi/k)**k, inf)
23.1406926327793
>>> odefun(lambda t, x: x, 0, 1)(pi)
23.1406926327793
"""
cosh = r"""
Computes the hyperbolic cosine of `x`,
`\cosh(x) = (e^x + e^{-x})/2`. Values and limits include::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> cosh(0)
1.0
>>> cosh(1)
1.543080634815243778477906
>>> cosh(-inf), cosh(+inf)
(+inf, +inf)
The hyperbolic cosine is an even, convex function with
a global minimum at `x = 0`, having a Maclaurin series
that starts::
>>> nprint(chop(taylor(cosh, 0, 5)))
[1.0, 0.0, 0.5, 0.0, 0.0416667, 0.0]
Generalized to complex numbers, the hyperbolic cosine is
equivalent to a cosine with the argument rotated
in the imaginary direction, or `\cosh x = \cos ix`::
>>> cosh(2+3j)
(-3.724545504915322565473971 + 0.5118225699873846088344638j)
>>> cos(3-2j)
(-3.724545504915322565473971 + 0.5118225699873846088344638j)
"""
sinh = r"""
Computes the hyperbolic sine of `x`,
`\sinh(x) = (e^x - e^{-x})/2`. Values and limits include::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> sinh(0)
0.0
>>> sinh(1)
1.175201193643801456882382
>>> sinh(-inf), sinh(+inf)
(-inf, +inf)
The hyperbolic sine is an odd function, with a Maclaurin
series that starts::
>>> nprint(chop(taylor(sinh, 0, 5)))
[0.0, 1.0, 0.0, 0.166667, 0.0, 0.00833333]
Generalized to complex numbers, the hyperbolic sine is
essentially a sine with a rotation `i` applied to
the argument; more precisely, `\sinh x = -i \sin ix`::
>>> sinh(2+3j)
(-3.590564589985779952012565 + 0.5309210862485198052670401j)
>>> j*sin(3-2j)
(-3.590564589985779952012565 + 0.5309210862485198052670401j)
"""
tanh = r"""
Computes the hyperbolic tangent of `x`,
`\tanh(x) = \sinh(x)/\cosh(x)`. Values and limits include::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> tanh(0)
0.0
>>> tanh(1)
0.7615941559557648881194583
>>> tanh(-inf), tanh(inf)
(-1.0, 1.0)
The hyperbolic tangent is an odd, sigmoidal function, similar
to the inverse tangent and error function. Its Maclaurin
series is::
>>> nprint(chop(taylor(tanh, 0, 5)))
[0.0, 1.0, 0.0, -0.333333, 0.0, 0.133333]
Generalized to complex numbers, the hyperbolic tangent is
essentially a tangent with a rotation `i` applied to
the argument; more precisely, `\tanh x = -i \tan ix`::
>>> tanh(2+3j)
(0.9653858790221331242784803 - 0.009884375038322493720314034j)
>>> j*tan(3-2j)
(0.9653858790221331242784803 - 0.009884375038322493720314034j)
"""
cos = r"""
Computes the cosine of `x`, `\cos(x)`.
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> cos(pi/3)
0.5
>>> cos(100000001)
-0.9802850113244713353133243
>>> cos(2+3j)
(-4.189625690968807230132555 - 9.109227893755336597979197j)
>>> cos(inf)
nan
>>> nprint(chop(taylor(cos, 0, 6)))
[1.0, 0.0, -0.5, 0.0, 0.0416667, 0.0, -0.00138889]
Intervals are supported via :func:`mpmath.iv.cos`::
>>> iv.dps = 25; iv.pretty = True
>>> iv.cos([0,1])
[0.540302305868139717400936602301, 1.0]
>>> iv.cos([0,2])
[-0.41614683654714238699756823214, 1.0]
"""
sin = r"""
Computes the sine of `x`, `\sin(x)`.
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> sin(pi/3)
0.8660254037844386467637232
>>> sin(100000001)
0.1975887055794968911438743
>>> sin(2+3j)
(9.1544991469114295734673 - 4.168906959966564350754813j)
>>> sin(inf)
nan
>>> nprint(chop(taylor(sin, 0, 6)))
[0.0, 1.0, 0.0, -0.166667, 0.0, 0.00833333, 0.0]
Intervals are supported via :func:`mpmath.iv.sin`::
>>> iv.dps = 25; iv.pretty = True
>>> iv.sin([0,1])
[0.0, 0.841470984807896506652502331201]
>>> iv.sin([0,2])
[0.0, 1.0]
"""
tan = r"""
Computes the tangent of `x`, `\tan(x) = \frac{\sin(x)}{\cos(x)}`.
The tangent function is singular at `x = (n+1/2)\pi`, but
``tan(x)`` always returns a finite result since `(n+1/2)\pi`
cannot be represented exactly using floating-point arithmetic.
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> tan(pi/3)
1.732050807568877293527446
>>> tan(100000001)
-0.2015625081449864533091058
>>> tan(2+3j)
(-0.003764025641504248292751221 + 1.003238627353609801446359j)
>>> tan(inf)
nan
>>> nprint(chop(taylor(tan, 0, 6)))
[0.0, 1.0, 0.0, 0.333333, 0.0, 0.133333, 0.0]
Intervals are supported via :func:`mpmath.iv.tan`::
>>> iv.dps = 25; iv.pretty = True
>>> iv.tan([0,1])
[0.0, 1.55740772465490223050697482944]
>>> iv.tan([0,2]) # Interval includes a singularity
[-inf, +inf]
"""
sec = r"""
Computes the secant of `x`, `\mathrm{sec}(x) = \frac{1}{\cos(x)}`.
The secant function is singular at `x = (n+1/2)\pi`, but
``sec(x)`` always returns a finite result since `(n+1/2)\pi`
cannot be represented exactly using floating-point arithmetic.
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> sec(pi/3)
2.0
>>> sec(10000001)
-1.184723164360392819100265
>>> sec(2+3j)
(-0.04167496441114427004834991 + 0.0906111371962375965296612j)
>>> sec(inf)
nan
>>> nprint(chop(taylor(sec, 0, 6)))
[1.0, 0.0, 0.5, 0.0, 0.208333, 0.0, 0.0847222]
Intervals are supported via :func:`mpmath.iv.sec`::
>>> iv.dps = 25; iv.pretty = True
>>> iv.sec([0,1])
[1.0, 1.85081571768092561791175326276]
>>> iv.sec([0,2]) # Interval includes a singularity
[-inf, +inf]
"""
csc = r"""
Computes the cosecant of `x`, `\mathrm{csc}(x) = \frac{1}{\sin(x)}`.
This cosecant function is singular at `x = n \pi`, but with the
exception of the point `x = 0`, ``csc(x)`` returns a finite result
since `n \pi` cannot be represented exactly using floating-point
arithmetic.
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> csc(pi/3)
1.154700538379251529018298
>>> csc(10000001)
-1.864910497503629858938891
>>> csc(2+3j)
(0.09047320975320743980579048 + 0.04120098628857412646300981j)
>>> csc(inf)
nan
Intervals are supported via :func:`mpmath.iv.csc`::
>>> iv.dps = 25; iv.pretty = True
>>> iv.csc([0,1]) # Interval includes a singularity
[1.18839510577812121626159943988, +inf]
>>> iv.csc([0,2])
[1.0, +inf]
"""
cot = r"""
Computes the cotangent of `x`,
`\mathrm{cot}(x) = \frac{1}{\tan(x)} = \frac{\cos(x)}{\sin(x)}`.
This cotangent function is singular at `x = n \pi`, but with the
exception of the point `x = 0`, ``cot(x)`` returns a finite result
since `n \pi` cannot be represented exactly using floating-point
arithmetic.
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> cot(pi/3)
0.5773502691896257645091488
>>> cot(10000001)
1.574131876209625656003562
>>> cot(2+3j)
(-0.003739710376336956660117409 - 0.9967577965693583104609688j)
>>> cot(inf)
nan
Intervals are supported via :func:`mpmath.iv.cot`::
>>> iv.dps = 25; iv.pretty = True
>>> iv.cot([0,1]) # Interval includes a singularity
[0.642092615934330703006419974862, +inf]
>>> iv.cot([1,2])
[-inf, +inf]
"""
acos = r"""
Computes the inverse cosine or arccosine of `x`, `\cos^{-1}(x)`.
Since `-1 \le \cos(x) \le 1` for real `x`, the inverse
cosine is real-valued only for `-1 \le x \le 1`. On this interval,
:func:`~mpmath.acos` is defined to be a monotonically decreasing
function assuming values between `+\pi` and `0`.
Basic values are::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> acos(-1)
3.141592653589793238462643
>>> acos(0)
1.570796326794896619231322
>>> acos(1)
0.0
>>> nprint(chop(taylor(acos, 0, 6)))
[1.5708, -1.0, 0.0, -0.166667, 0.0, -0.075, 0.0]
:func:`~mpmath.acos` is defined so as to be a proper inverse function of
`\cos(\theta)` for `0 \le \theta < \pi`.
We have `\cos(\cos^{-1}(x)) = x` for all `x`, but
`\cos^{-1}(\cos(x)) = x` only for `0 \le \Re[x] < \pi`::
>>> for x in [1, 10, -1, 2+3j, 10+3j]:
... print("%s %s" % (cos(acos(x)), acos(cos(x))))
...
1.0 1.0
(10.0 + 0.0j) 2.566370614359172953850574
-1.0 1.0
(2.0 + 3.0j) (2.0 + 3.0j)
(10.0 + 3.0j) (2.566370614359172953850574 - 3.0j)
The inverse cosine has two branch points: `x = \pm 1`. :func:`~mpmath.acos`
places the branch cuts along the line segments `(-\infty, -1)` and
`(+1, +\infty)`. In general,
.. math ::
\cos^{-1}(x) = \frac{\pi}{2} + i \log\left(ix + \sqrt{1-x^2} \right)
where the principal-branch log and square root are implied.
"""
asin = r"""
Computes the inverse sine or arcsine of `x`, `\sin^{-1}(x)`.
Since `-1 \le \sin(x) \le 1` for real `x`, the inverse
sine is real-valued only for `-1 \le x \le 1`.
On this interval, it is defined to be a monotonically increasing
function assuming values between `-\pi/2` and `\pi/2`.
Basic values are::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> asin(-1)
-1.570796326794896619231322
>>> asin(0)
0.0
>>> asin(1)
1.570796326794896619231322
>>> nprint(chop(taylor(asin, 0, 6)))
[0.0, 1.0, 0.0, 0.166667, 0.0, 0.075, 0.0]
:func:`~mpmath.asin` is defined so as to be a proper inverse function of
`\sin(\theta)` for `-\pi/2 < \theta < \pi/2`.
We have `\sin(\sin^{-1}(x)) = x` for all `x`, but
`\sin^{-1}(\sin(x)) = x` only for `-\pi/2 < \Re[x] < \pi/2`::
>>> for x in [1, 10, -1, 1+3j, -2+3j]:
... print("%s %s" % (chop(sin(asin(x))), asin(sin(x))))
...
1.0 1.0
10.0 -0.5752220392306202846120698
-1.0 -1.0
(1.0 + 3.0j) (1.0 + 3.0j)
(-2.0 + 3.0j) (-1.141592653589793238462643 - 3.0j)
The inverse sine has two branch points: `x = \pm 1`. :func:`~mpmath.asin`
places the branch cuts along the line segments `(-\infty, -1)` and
`(+1, +\infty)`. In general,
.. math ::
\sin^{-1}(x) = -i \log\left(ix + \sqrt{1-x^2} \right)
where the principal-branch log and square root are implied.
"""
atan = r"""
Computes the inverse tangent or arctangent of `x`, `\tan^{-1}(x)`.
This is a real-valued function for all real `x`, with range
`(-\pi/2, \pi/2)`.
Basic values are::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> atan(-inf)
-1.570796326794896619231322
>>> atan(-1)
-0.7853981633974483096156609
>>> atan(0)
0.0
>>> atan(1)
0.7853981633974483096156609
>>> atan(inf)
1.570796326794896619231322
>>> nprint(chop(taylor(atan, 0, 6)))
[0.0, 1.0, 0.0, -0.333333, 0.0, 0.2, 0.0]
The inverse tangent is often used to compute angles. However,
the atan2 function is often better for this as it preserves sign
(see :func:`~mpmath.atan2`).
:func:`~mpmath.atan` is defined so as to be a proper inverse function of
`\tan(\theta)` for `-\pi/2 < \theta < \pi/2`.
We have `\tan(\tan^{-1}(x)) = x` for all `x`, but
`\tan^{-1}(\tan(x)) = x` only for `-\pi/2 < \Re[x] < \pi/2`::
>>> mp.dps = 25
>>> for x in [1, 10, -1, 1+3j, -2+3j]:
... print("%s %s" % (tan(atan(x)), atan(tan(x))))
...
1.0 1.0
10.0 0.5752220392306202846120698
-1.0 -1.0
(1.0 + 3.0j) (1.000000000000000000000001 + 3.0j)
(-2.0 + 3.0j) (1.141592653589793238462644 + 3.0j)
The inverse tangent has two branch points: `x = \pm i`. :func:`~mpmath.atan`
places the branch cuts along the line segments `(-i \infty, -i)` and
`(+i, +i \infty)`. In general,
.. math ::
\tan^{-1}(x) = \frac{i}{2}\left(\log(1-ix)-\log(1+ix)\right)
where the principal-branch log is implied.
"""
acot = r"""Computes the inverse cotangent of `x`,
`\mathrm{cot}^{-1}(x) = \tan^{-1}(1/x)`."""
asec = r"""Computes the inverse secant of `x`,
`\mathrm{sec}^{-1}(x) = \cos^{-1}(1/x)`."""
acsc = r"""Computes the inverse cosecant of `x`,
`\mathrm{csc}^{-1}(x) = \sin^{-1}(1/x)`."""
coth = r"""Computes the hyperbolic cotangent of `x`,
`\mathrm{coth}(x) = \frac{\cosh(x)}{\sinh(x)}`.
"""
sech = r"""Computes the hyperbolic secant of `x`,
`\mathrm{sech}(x) = \frac{1}{\cosh(x)}`.
"""
csch = r"""Computes the hyperbolic cosecant of `x`,
`\mathrm{csch}(x) = \frac{1}{\sinh(x)}`.
"""
acosh = r"""Computes the inverse hyperbolic cosine of `x`,
`\mathrm{cosh}^{-1}(x) = \log(x+\sqrt{x+1}\sqrt{x-1})`.
"""
asinh = r"""Computes the inverse hyperbolic sine of `x`,
`\mathrm{sinh}^{-1}(x) = \log(x+\sqrt{1+x^2})`.
"""
atanh = r"""Computes the inverse hyperbolic tangent of `x`,
`\mathrm{tanh}^{-1}(x) = \frac{1}{2}\left(\log(1+x)-\log(1-x)\right)`.
"""
acoth = r"""Computes the inverse hyperbolic cotangent of `x`,
`\mathrm{coth}^{-1}(x) = \tanh^{-1}(1/x)`."""
asech = r"""Computes the inverse hyperbolic secant of `x`,
`\mathrm{sech}^{-1}(x) = \cosh^{-1}(1/x)`."""
acsch = r"""Computes the inverse hyperbolic cosecant of `x`,
`\mathrm{csch}^{-1}(x) = \sinh^{-1}(1/x)`."""
sinpi = r"""
Computes `\sin(\pi x)`, more accurately than the expression
``sin(pi*x)``::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> sinpi(10**10), sin(pi*(10**10))
(0.0, -2.23936276195592e-6)
>>> sinpi(10**10+0.5), sin(pi*(10**10+0.5))
(1.0, 0.999999999998721)
"""
cospi = r"""
Computes `\cos(\pi x)`, more accurately than the expression
``cos(pi*x)``::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> cospi(10**10), cos(pi*(10**10))
(1.0, 0.999999999997493)
>>> cospi(10**10+0.5), cos(pi*(10**10+0.5))
(0.0, 1.59960492420134e-6)
"""
sinc = r"""
``sinc(x)`` computes the unnormalized sinc function, defined as
.. math ::
\mathrm{sinc}(x) = \begin{cases}
\sin(x)/x, & \mbox{if } x \ne 0 \\
1, & \mbox{if } x = 0.
\end{cases}
See :func:`~mpmath.sincpi` for the normalized sinc function.
Simple values and limits include::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> sinc(0)
1.0
>>> sinc(1)
0.841470984807897
>>> sinc(inf)
0.0
The integral of the sinc function is the sine integral Si::
>>> quad(sinc, [0, 1])
0.946083070367183
>>> si(1)
0.946083070367183
"""
sincpi = r"""
``sincpi(x)`` computes the normalized sinc function, defined as
.. math ::
\mathrm{sinc}_{\pi}(x) = \begin{cases}
\sin(\pi x)/(\pi x), & \mbox{if } x \ne 0 \\
1, & \mbox{if } x = 0.
\end{cases}
Equivalently, we have
`\mathrm{sinc}_{\pi}(x) = \mathrm{sinc}(\pi x)`.
The normalization entails that the function integrates
to unity over the entire real line::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> quadosc(sincpi, [-inf, inf], period=2.0)
1.0
Like, :func:`~mpmath.sinpi`, :func:`~mpmath.sincpi` is evaluated accurately
at its roots::
>>> sincpi(10)
0.0
"""
expj = r"""
Convenience function for computing `e^{ix}`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> expj(0)
(1.0 + 0.0j)
>>> expj(-1)
(0.5403023058681397174009366 - 0.8414709848078965066525023j)
>>> expj(j)
(0.3678794411714423215955238 + 0.0j)
>>> expj(1+j)
(0.1987661103464129406288032 + 0.3095598756531121984439128j)
"""
expjpi = r"""
Convenience function for computing `e^{i \pi x}`.
Evaluation is accurate near zeros (see also :func:`~mpmath.cospi`,
:func:`~mpmath.sinpi`)::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> expjpi(0)
(1.0 + 0.0j)
>>> expjpi(1)
(-1.0 + 0.0j)
>>> expjpi(0.5)
(0.0 + 1.0j)
>>> expjpi(-1)
(-1.0 + 0.0j)
>>> expjpi(j)
(0.04321391826377224977441774 + 0.0j)
>>> expjpi(1+j)
(-0.04321391826377224977441774 + 0.0j)
"""
floor = r"""
Computes the floor of `x`, `\lfloor x \rfloor`, defined as
the largest integer less than or equal to `x`::
>>> from mpmath import *
>>> mp.pretty = False
>>> floor(3.5)
mpf('3.0')
.. note ::
:func:`~mpmath.floor`, :func:`~mpmath.ceil` and :func:`~mpmath.nint` return a
floating-point number, not a Python ``int``. If `\lfloor x \rfloor` is
too large to be represented exactly at the present working precision,
the result will be rounded, not necessarily in the direction
implied by the mathematical definition of the function.
To avoid rounding, use *prec=0*::
>>> mp.dps = 15
>>> print(int(floor(10**30+1)))
1000000000000000019884624838656
>>> print(int(floor(10**30+1, prec=0)))
1000000000000000000000000000001
The floor function is defined for complex numbers and
acts on the real and imaginary parts separately::
>>> floor(3.25+4.75j)
mpc(real='3.0', imag='4.0')
"""
ceil = r"""
Computes the ceiling of `x`, `\lceil x \rceil`, defined as
the smallest integer greater than or equal to `x`::
>>> from mpmath import *
>>> mp.pretty = False
>>> ceil(3.5)
mpf('4.0')
The ceiling function is defined for complex numbers and
acts on the real and imaginary parts separately::
>>> ceil(3.25+4.75j)
mpc(real='4.0', imag='5.0')
See notes about rounding for :func:`~mpmath.floor`.
"""
nint = r"""
Evaluates the nearest integer function, `\mathrm{nint}(x)`.
This gives the nearest integer to `x`; on a tie, it
gives the nearest even integer::
>>> from mpmath import *
>>> mp.pretty = False
>>> nint(3.2)
mpf('3.0')
>>> nint(3.8)
mpf('4.0')
>>> nint(3.5)
mpf('4.0')
>>> nint(4.5)
mpf('4.0')
The nearest integer function is defined for complex numbers and
acts on the real and imaginary parts separately::
>>> nint(3.25+4.75j)
mpc(real='3.0', imag='5.0')
See notes about rounding for :func:`~mpmath.floor`.
"""
frac = r"""
Gives the fractional part of `x`, defined as
`\mathrm{frac}(x) = x - \lfloor x \rfloor` (see :func:`~mpmath.floor`).
In effect, this computes `x` modulo 1, or `x+n` where
`n \in \mathbb{Z}` is such that `x+n \in [0,1)`::
>>> from mpmath import *
>>> mp.pretty = False
>>> frac(1.25)
mpf('0.25')
>>> frac(3)
mpf('0.0')
>>> frac(-1.25)
mpf('0.75')
For a complex number, the fractional part function applies to
the real and imaginary parts separately::
>>> frac(2.25+3.75j)
mpc(real='0.25', imag='0.75')
Plotted, the fractional part function gives a sawtooth
wave. The Fourier series coefficients have a simple
form::
>>> mp.dps = 15
>>> nprint(fourier(lambda x: frac(x)-0.5, [0,1], 4))
([0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -0.31831, -0.159155, -0.106103, -0.0795775])
>>> nprint([-1/(pi*k) for k in range(1,5)])
[-0.31831, -0.159155, -0.106103, -0.0795775]
.. note::
The fractional part is sometimes defined as a symmetric
function, i.e. returning `-\mathrm{frac}(-x)` if `x < 0`.
This convention is used, for instance, by Mathematica's
``FractionalPart``.
"""
sign = r"""
Returns the sign of `x`, defined as `\mathrm{sign}(x) = x / |x|`
(with the special case `\mathrm{sign}(0) = 0`)::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> sign(10)
mpf('1.0')
>>> sign(-10)
mpf('-1.0')
>>> sign(0)
mpf('0.0')
Note that the sign function is also defined for complex numbers,
for which it gives the projection onto the unit circle::
>>> mp.dps = 15; mp.pretty = True
>>> sign(1+j)
(0.707106781186547 + 0.707106781186547j)
"""
arg = r"""
Computes the complex argument (phase) of `x`, defined as the
signed angle between the positive real axis and `x` in the
complex plane::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> arg(3)
0.0
>>> arg(3+3j)
0.785398163397448
>>> arg(3j)
1.5707963267949
>>> arg(-3)
3.14159265358979
>>> arg(-3j)
-1.5707963267949
The angle is defined to satisfy `-\pi < \arg(x) \le \pi` and
with the sign convention that a nonnegative imaginary part
results in a nonnegative argument.
The value returned by :func:`~mpmath.arg` is an ``mpf`` instance.
"""
fabs = r"""
Returns the absolute value of `x`, `|x|`. Unlike :func:`abs`,
:func:`~mpmath.fabs` converts non-mpmath numbers (such as ``int``)
into mpmath numbers::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> fabs(3)
mpf('3.0')
>>> fabs(-3)
mpf('3.0')
>>> fabs(3+4j)
mpf('5.0')
"""
re = r"""
Returns the real part of `x`, `\Re(x)`. Unlike ``x.real``,
:func:`~mpmath.re` converts `x` to a mpmath number::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> re(3)
mpf('3.0')
>>> re(-1+4j)
mpf('-1.0')
"""
im = r"""
Returns the imaginary part of `x`, `\Im(x)`. Unlike ``x.imag``,
:func:`~mpmath.im` converts `x` to a mpmath number::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> im(3)
mpf('0.0')
>>> im(-1+4j)
mpf('4.0')
"""
conj = r"""
Returns the complex conjugate of `x`, `\overline{x}`. Unlike
``x.conjugate()``, :func:`~mpmath.im` converts `x` to a mpmath number::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> conj(3)
mpf('3.0')
>>> conj(-1+4j)
mpc(real='-1.0', imag='-4.0')
"""
polar = r"""
Returns the polar representation of the complex number `z`
as a pair `(r, \phi)` such that `z = r e^{i \phi}`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> polar(-2)
(2.0, 3.14159265358979)
>>> polar(3-4j)
(5.0, -0.927295218001612)
"""
rect = r"""
Returns the complex number represented by polar
coordinates `(r, \phi)`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> chop(rect(2, pi))
-2.0
>>> rect(sqrt(2), -pi/4)
(1.0 - 1.0j)
"""
expm1 = r"""
Computes `e^x - 1`, accurately for small `x`.
Unlike the expression ``exp(x) - 1``, ``expm1(x)`` does not suffer from
potentially catastrophic cancellation::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> exp(1e-10)-1; print(expm1(1e-10))
1.00000008274037e-10
1.00000000005e-10
>>> exp(1e-20)-1; print(expm1(1e-20))
0.0
1.0e-20
>>> 1/(exp(1e-20)-1)
Traceback (most recent call last):
...
ZeroDivisionError
>>> 1/expm1(1e-20)
1.0e+20
Evaluation works for extremely tiny values::
>>> expm1(0)
0.0
>>> expm1('1e-10000000')
1.0e-10000000
"""
powm1 = r"""
Computes `x^y - 1`, accurately when `x^y` is very close to 1.
This avoids potentially catastrophic cancellation::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> power(0.99999995, 1e-10) - 1
0.0
>>> powm1(0.99999995, 1e-10)
-5.00000012791934e-18
Powers exactly equal to 1, and only those powers, yield 0 exactly::
>>> powm1(-j, 4)
(0.0 + 0.0j)
>>> powm1(3, 0)
0.0
>>> powm1(fadd(-1, 1e-100, exact=True), 4)
-4.0e-100
Evaluation works for extremely tiny `y`::
>>> powm1(2, '1e-100000')
6.93147180559945e-100001
>>> powm1(j, '1e-1000')
(-1.23370055013617e-2000 + 1.5707963267949e-1000j)
"""
root = r"""
``root(z, n, k=0)`` computes an `n`-th root of `z`, i.e. returns a number
`r` that (up to possible approximation error) satisfies `r^n = z`.
(``nthroot`` is available as an alias for ``root``.)
Every complex number `z \ne 0` has `n` distinct `n`-th roots, which are
equidistant points on a circle with radius `|z|^{1/n}`, centered around the
origin. A specific root may be selected using the optional index
`k`. The roots are indexed counterclockwise, starting with `k = 0` for the root
closest to the positive real half-axis.
The `k = 0` root is the so-called principal `n`-th root, often denoted by
`\sqrt[n]{z}` or `z^{1/n}`, and also given by `\exp(\log(z) / n)`. If `z` is
a positive real number, the principal root is just the unique positive
`n`-th root of `z`. Under some circumstances, non-principal real roots exist:
for positive real `z`, `n` even, there is a negative root given by `k = n/2`;
for negative real `z`, `n` odd, there is a negative root given by `k = (n-1)/2`.
To obtain all roots with a simple expression, use
``[root(z,n,k) for k in range(n)]``.
An important special case, ``root(1, n, k)`` returns the `k`-th `n`-th root of
unity, `\zeta_k = e^{2 \pi i k / n}`. Alternatively, :func:`~mpmath.unitroots`
provides a slightly more convenient way to obtain the roots of unity,
including the option to compute only the primitive roots of unity.
Both `k` and `n` should be integers; `k` outside of ``range(n)`` will be
reduced modulo `n`. If `n` is negative, `x^{-1/n} = 1/x^{1/n}` (or
the equivalent reciprocal for a non-principal root with `k \ne 0`) is computed.
:func:`~mpmath.root` is implemented to use Newton's method for small
`n`. At high precision, this makes `x^{1/n}` not much more
expensive than the regular exponentiation, `x^n`. For very large
`n`, :func:`~mpmath.nthroot` falls back to use the exponential function.
**Examples**
:func:`~mpmath.nthroot`/:func:`~mpmath.root` is faster and more accurate than raising to a
floating-point fraction::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> 16807 ** (mpf(1)/5)
mpf('7.0000000000000009')
>>> root(16807, 5)
mpf('7.0')
>>> nthroot(16807, 5) # Alias
mpf('7.0')
A high-precision root::
>>> mp.dps = 50; mp.pretty = True
>>> nthroot(10, 5)
1.584893192461113485202101373391507013269442133825
>>> nthroot(10, 5) ** 5
10.0
Computing principal and non-principal square and cube roots::
>>> mp.dps = 15
>>> root(10, 2)
3.16227766016838
>>> root(10, 2, 1)
-3.16227766016838
>>> root(-10, 3)
(1.07721734501594 + 1.86579517236206j)
>>> root(-10, 3, 1)
-2.15443469003188
>>> root(-10, 3, 2)
(1.07721734501594 - 1.86579517236206j)
All the 7th roots of a complex number::
>>> for r in [root(3+4j, 7, k) for k in range(7)]:
... print("%s %s" % (r, r**7))
...
(1.24747270589553 + 0.166227124177353j) (3.0 + 4.0j)
(0.647824911301003 + 1.07895435170559j) (3.0 + 4.0j)
(-0.439648254723098 + 1.17920694574172j) (3.0 + 4.0j)
(-1.19605731775069 + 0.391492658196305j) (3.0 + 4.0j)
(-1.05181082538903 - 0.691023585965793j) (3.0 + 4.0j)
(-0.115529328478668 - 1.25318497558335j) (3.0 + 4.0j)
(0.907748109144957 - 0.871672518271819j) (3.0 + 4.0j)
Cube roots of unity::
>>> for k in range(3): print(root(1, 3, k))
...
1.0
(-0.5 + 0.866025403784439j)
(-0.5 - 0.866025403784439j)
Some exact high order roots::
>>> root(75**210, 105)
5625.0
>>> root(1, 128, 96)
(0.0 - 1.0j)
>>> root(4**128, 128, 96)
(0.0 - 4.0j)
"""
unitroots = r"""
``unitroots(n)`` returns `\zeta_0, \zeta_1, \ldots, \zeta_{n-1}`,
all the distinct `n`-th roots of unity, as a list. If the option
*primitive=True* is passed, only the primitive roots are returned.
Every `n`-th root of unity satisfies `(\zeta_k)^n = 1`. There are `n` distinct
roots for each `n` (`\zeta_k` and `\zeta_j` are the same when
`k = j \pmod n`), which form a regular polygon with vertices on the unit
circle. They are ordered counterclockwise with increasing `k`, starting
with `\zeta_0 = 1`.
**Examples**
The roots of unity up to `n = 4`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> nprint(unitroots(1))
[1.0]
>>> nprint(unitroots(2))
[1.0, -1.0]
>>> nprint(unitroots(3))
[1.0, (-0.5 + 0.866025j), (-0.5 - 0.866025j)]
>>> nprint(unitroots(4))
[1.0, (0.0 + 1.0j), -1.0, (0.0 - 1.0j)]
Roots of unity form a geometric series that sums to 0::
>>> mp.dps = 50
>>> chop(fsum(unitroots(25)))
0.0
Primitive roots up to `n = 4`::
>>> mp.dps = 15
>>> nprint(unitroots(1, primitive=True))
[1.0]
>>> nprint(unitroots(2, primitive=True))
[-1.0]
>>> nprint(unitroots(3, primitive=True))
[(-0.5 + 0.866025j), (-0.5 - 0.866025j)]
>>> nprint(unitroots(4, primitive=True))
[(0.0 + 1.0j), (0.0 - 1.0j)]
There are only four primitive 12th roots::
>>> nprint(unitroots(12, primitive=True))
[(0.866025 + 0.5j), (-0.866025 + 0.5j), (-0.866025 - 0.5j), (0.866025 - 0.5j)]
The `n`-th roots of unity form a group, the cyclic group of order `n`.
Any primitive root `r` is a generator for this group, meaning that
`r^0, r^1, \ldots, r^{n-1}` gives the whole set of unit roots (in
some permuted order)::
>>> for r in unitroots(6): print(r)
...
1.0
(0.5 + 0.866025403784439j)
(-0.5 + 0.866025403784439j)
-1.0
(-0.5 - 0.866025403784439j)
(0.5 - 0.866025403784439j)
>>> r = unitroots(6, primitive=True)[1]
>>> for k in range(6): print(chop(r**k))
...
1.0
(0.5 - 0.866025403784439j)
(-0.5 - 0.866025403784439j)
-1.0
(-0.5 + 0.866025403784438j)
(0.5 + 0.866025403784438j)
The number of primitive roots equals the Euler totient function `\phi(n)`::
>>> [len(unitroots(n, primitive=True)) for n in range(1,20)]
[1, 1, 2, 2, 4, 2, 6, 4, 6, 4, 10, 4, 12, 6, 8, 8, 16, 6, 18]
"""
log = r"""
Computes the base-`b` logarithm of `x`, `\log_b(x)`. If `b` is
unspecified, :func:`~mpmath.log` computes the natural (base `e`) logarithm
and is equivalent to :func:`~mpmath.ln`. In general, the base `b` logarithm
is defined in terms of the natural logarithm as
`\log_b(x) = \ln(x)/\ln(b)`.
By convention, we take `\log(0) = -\infty`.
The natural logarithm is real if `x > 0` and complex if `x < 0` or if
`x` is complex. The principal branch of the complex logarithm is
used, meaning that `\Im(\ln(x)) = -\pi < \arg(x) \le \pi`.
**Examples**
Some basic values and limits::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> log(1)
0.0
>>> log(2)
0.693147180559945
>>> log(1000,10)
3.0
>>> log(4, 16)
0.5
>>> log(j)
(0.0 + 1.5707963267949j)
>>> log(-1)
(0.0 + 3.14159265358979j)
>>> log(0)
-inf
>>> log(inf)
+inf
The natural logarithm is the antiderivative of `1/x`::
>>> quad(lambda x: 1/x, [1, 5])
1.6094379124341
>>> log(5)
1.6094379124341
>>> diff(log, 10)
0.1
The Taylor series expansion of the natural logarithm around
`x = 1` has coefficients `(-1)^{n+1}/n`::
>>> nprint(taylor(log, 1, 7))
[0.0, 1.0, -0.5, 0.333333, -0.25, 0.2, -0.166667, 0.142857]
:func:`~mpmath.log` supports arbitrary precision evaluation::
>>> mp.dps = 50
>>> log(pi)
1.1447298858494001741434273513530587116472948129153
>>> log(pi, pi**3)
0.33333333333333333333333333333333333333333333333333
>>> mp.dps = 25
>>> log(3+4j)
(1.609437912434100374600759 + 0.9272952180016122324285125j)
"""
log10 = r"""
Computes the base-10 logarithm of `x`, `\log_{10}(x)`. ``log10(x)``
is equivalent to ``log(x, 10)``.
"""
fmod = r"""
Converts `x` and `y` to mpmath numbers and returns `x \mod y`.
For mpmath numbers, this is equivalent to ``x % y``.
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> fmod(100, pi)
2.61062773871641
You can use :func:`~mpmath.fmod` to compute fractional parts of numbers::
>>> fmod(10.25, 1)
0.25
"""
radians = r"""
Converts the degree angle `x` to radians::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> radians(60)
1.0471975511966
"""
degrees = r"""
Converts the radian angle `x` to a degree angle::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> degrees(pi/3)
60.0
"""
atan2 = r"""
Computes the two-argument arctangent, `\mathrm{atan2}(y, x)`,
giving the signed angle between the positive `x`-axis and the
point `(x, y)` in the 2D plane. This function is defined for
real `x` and `y` only.
The two-argument arctangent essentially computes
`\mathrm{atan}(y/x)`, but accounts for the signs of both
`x` and `y` to give the angle for the correct quadrant. The
following examples illustrate the difference::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> atan2(1,1), atan(1/1.)
(0.785398163397448, 0.785398163397448)
>>> atan2(1,-1), atan(1/-1.)
(2.35619449019234, -0.785398163397448)
>>> atan2(-1,1), atan(-1/1.)
(-0.785398163397448, -0.785398163397448)
>>> atan2(-1,-1), atan(-1/-1.)
(-2.35619449019234, 0.785398163397448)
The angle convention is the same as that used for the complex
argument; see :func:`~mpmath.arg`.
"""
fibonacci = r"""
``fibonacci(n)`` computes the `n`-th Fibonacci number, `F(n)`. The
Fibonacci numbers are defined by the recurrence `F(n) = F(n-1) + F(n-2)`
with the initial values `F(0) = 0`, `F(1) = 1`. :func:`~mpmath.fibonacci`
extends this definition to arbitrary real and complex arguments
using the formula
.. math ::
F(z) = \frac{\phi^z - \cos(\pi z) \phi^{-z}}{\sqrt 5}
where `\phi` is the golden ratio. :func:`~mpmath.fibonacci` also uses this
continuous formula to compute `F(n)` for extremely large `n`, where
calculating the exact integer would be wasteful.
For convenience, :func:`~mpmath.fib` is available as an alias for
:func:`~mpmath.fibonacci`.
**Basic examples**
Some small Fibonacci numbers are::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for i in range(10):
... print(fibonacci(i))
...
0.0
1.0
1.0
2.0
3.0
5.0
8.0
13.0
21.0
34.0
>>> fibonacci(50)
12586269025.0
The recurrence for `F(n)` extends backwards to negative `n`::
>>> for i in range(10):
... print(fibonacci(-i))
...
0.0
1.0
-1.0
2.0
-3.0
5.0
-8.0
13.0
-21.0
34.0
Large Fibonacci numbers will be computed approximately unless
the precision is set high enough::
>>> fib(200)
2.8057117299251e+41
>>> mp.dps = 45
>>> fib(200)
280571172992510140037611932413038677189525.0
:func:`~mpmath.fibonacci` can compute approximate Fibonacci numbers
of stupendous size::
>>> mp.dps = 15
>>> fibonacci(10**25)
3.49052338550226e+2089876402499787337692720
**Real and complex arguments**
The extended Fibonacci function is an analytic function. The
property `F(z) = F(z-1) + F(z-2)` holds for arbitrary `z`::
>>> mp.dps = 15
>>> fib(pi)
2.1170270579161
>>> fib(pi-1) + fib(pi-2)
2.1170270579161
>>> fib(3+4j)
(-5248.51130728372 - 14195.962288353j)
>>> fib(2+4j) + fib(1+4j)
(-5248.51130728372 - 14195.962288353j)
The Fibonacci function has infinitely many roots on the
negative half-real axis. The first root is at 0, the second is
close to -0.18, and then there are infinitely many roots that
asymptotically approach `-n+1/2`::
>>> findroot(fib, -0.2)
-0.183802359692956
>>> findroot(fib, -2)
-1.57077646820395
>>> findroot(fib, -17)
-16.4999999596115
>>> findroot(fib, -24)
-23.5000000000479
**Mathematical relationships**
For large `n`, `F(n+1)/F(n)` approaches the golden ratio::
>>> mp.dps = 50
>>> fibonacci(101)/fibonacci(100)
1.6180339887498948482045868343656381177203127439638
>>> +phi
1.6180339887498948482045868343656381177203091798058
The sum of reciprocal Fibonacci numbers converges to an irrational
number for which no closed form expression is known::
>>> mp.dps = 15
>>> nsum(lambda n: 1/fib(n), [1, inf])
3.35988566624318
Amazingly, however, the sum of odd-index reciprocal Fibonacci
numbers can be expressed in terms of a Jacobi theta function::
>>> nsum(lambda n: 1/fib(2*n+1), [0, inf])
1.82451515740692
>>> sqrt(5)*jtheta(2,0,(3-sqrt(5))/2)**2/4
1.82451515740692
Some related sums can be done in closed form::
>>> nsum(lambda k: 1/(1+fib(2*k+1)), [0, inf])
1.11803398874989
>>> phi - 0.5
1.11803398874989
>>> f = lambda k:(-1)**(k+1) / sum(fib(n)**2 for n in range(1,int(k+1)))
>>> nsum(f, [1, inf])
0.618033988749895
>>> phi-1
0.618033988749895
**References**
1. http://mathworld.wolfram.com/FibonacciNumber.html
"""
altzeta = r"""
Gives the Dirichlet eta function, `\eta(s)`, also known as the
alternating zeta function. This function is defined in analogy
with the Riemann zeta function as providing the sum of the
alternating series
.. math ::
\eta(s) = \sum_{k=0}^{\infty} \frac{(-1)^k}{k^s}
= 1-\frac{1}{2^s}+\frac{1}{3^s}-\frac{1}{4^s}+\ldots
The eta function, unlike the Riemann zeta function, is an entire
function, having a finite value for all complex `s`. The special case
`\eta(1) = \log(2)` gives the value of the alternating harmonic series.
The alternating zeta function may expressed using the Riemann zeta function
as `\eta(s) = (1 - 2^{1-s}) \zeta(s)`. It can also be expressed
in terms of the Hurwitz zeta function, for example using
:func:`~mpmath.dirichlet` (see documentation for that function).
**Examples**
Some special values are::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> altzeta(1)
0.693147180559945
>>> altzeta(0)
0.5
>>> altzeta(-1)
0.25
>>> altzeta(-2)
0.0
An example of a sum that can be computed more accurately and
efficiently via :func:`~mpmath.altzeta` than via numerical summation::
>>> sum(-(-1)**n / mpf(n)**2.5 for n in range(1, 100))
0.867204951503984
>>> altzeta(2.5)
0.867199889012184
At positive even integers, the Dirichlet eta function
evaluates to a rational multiple of a power of `\pi`::
>>> altzeta(2)
0.822467033424113
>>> pi**2/12
0.822467033424113
Like the Riemann zeta function, `\eta(s)`, approaches 1
as `s` approaches positive infinity, although it does
so from below rather than from above::
>>> altzeta(30)
0.999999999068682
>>> altzeta(inf)
1.0
>>> mp.pretty = False
>>> altzeta(1000, rounding='d')
mpf('0.99999999999999989')
>>> altzeta(1000, rounding='u')
mpf('1.0')
**References**
1. http://mathworld.wolfram.com/DirichletEtaFunction.html
2. http://en.wikipedia.org/wiki/Dirichlet_eta_function
"""
factorial = r"""
Computes the factorial, `x!`. For integers `n \ge 0`, we have
`n! = 1 \cdot 2 \cdots (n-1) \cdot n` and more generally the factorial
is defined for real or complex `x` by `x! = \Gamma(x+1)`.
**Examples**
Basic values and limits::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for k in range(6):
... print("%s %s" % (k, fac(k)))
...
0 1.0
1 1.0
2 2.0
3 6.0
4 24.0
5 120.0
>>> fac(inf)
+inf
>>> fac(0.5), sqrt(pi)/2
(0.886226925452758, 0.886226925452758)
For large positive `x`, `x!` can be approximated by
Stirling's formula::
>>> x = 10**10
>>> fac(x)
2.32579620567308e+95657055186
>>> sqrt(2*pi*x)*(x/e)**x
2.32579597597705e+95657055186
:func:`~mpmath.fac` supports evaluation for astronomically large values::
>>> fac(10**30)
6.22311232304258e+29565705518096748172348871081098
Reciprocal factorials appear in the Taylor series of the
exponential function (among many other contexts)::
>>> nsum(lambda k: 1/fac(k), [0, inf]), exp(1)
(2.71828182845905, 2.71828182845905)
>>> nsum(lambda k: pi**k/fac(k), [0, inf]), exp(pi)
(23.1406926327793, 23.1406926327793)
"""
gamma = r"""
Computes the gamma function, `\Gamma(x)`. The gamma function is a
shifted version of the ordinary factorial, satisfying
`\Gamma(n) = (n-1)!` for integers `n > 0`. More generally, it
is defined by
.. math ::
\Gamma(x) = \int_0^{\infty} t^{x-1} e^{-t}\, dt
for any real or complex `x` with `\Re(x) > 0` and for `\Re(x) < 0`
by analytic continuation.
**Examples**
Basic values and limits::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for k in range(1, 6):
... print("%s %s" % (k, gamma(k)))
...
1 1.0
2 1.0
3 2.0
4 6.0
5 24.0
>>> gamma(inf)
+inf
>>> gamma(0)
Traceback (most recent call last):
...
ValueError: gamma function pole
The gamma function of a half-integer is a rational multiple of
`\sqrt{\pi}`::
>>> gamma(0.5), sqrt(pi)
(1.77245385090552, 1.77245385090552)
>>> gamma(1.5), sqrt(pi)/2
(0.886226925452758, 0.886226925452758)
We can check the integral definition::
>>> gamma(3.5)
3.32335097044784
>>> quad(lambda t: t**2.5*exp(-t), [0,inf])
3.32335097044784
:func:`~mpmath.gamma` supports arbitrary-precision evaluation and
complex arguments::
>>> mp.dps = 50
>>> gamma(sqrt(3))
0.91510229697308632046045539308226554038315280564184
>>> mp.dps = 25
>>> gamma(2j)
(0.009902440080927490985955066 - 0.07595200133501806872408048j)
Arguments can also be large. Note that the gamma function grows
very quickly::
>>> mp.dps = 15
>>> gamma(10**20)
1.9328495143101e+1956570551809674817225
"""
psi = r"""
Gives the polygamma function of order `m` of `z`, `\psi^{(m)}(z)`.
Special cases are known as the *digamma function* (`\psi^{(0)}(z)`),
the *trigamma function* (`\psi^{(1)}(z)`), etc. The polygamma
functions are defined as the logarithmic derivatives of the gamma
function:
.. math ::
\psi^{(m)}(z) = \left(\frac{d}{dz}\right)^{m+1} \log \Gamma(z)
In particular, `\psi^{(0)}(z) = \Gamma'(z)/\Gamma(z)`. In the
present implementation of :func:`~mpmath.psi`, the order `m` must be a
nonnegative integer, while the argument `z` may be an arbitrary
complex number (with exception for the polygamma function's poles
at `z = 0, -1, -2, \ldots`).
**Examples**
For various rational arguments, the polygamma function reduces to
a combination of standard mathematical constants::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> psi(0, 1), -euler
(-0.5772156649015328606065121, -0.5772156649015328606065121)
>>> psi(1, '1/4'), pi**2+8*catalan
(17.19732915450711073927132, 17.19732915450711073927132)
>>> psi(2, '1/2'), -14*apery
(-16.82879664423431999559633, -16.82879664423431999559633)
The polygamma functions are derivatives of each other::
>>> diff(lambda x: psi(3, x), pi), psi(4, pi)
(-0.1105749312578862734526952, -0.1105749312578862734526952)
>>> quad(lambda x: psi(4, x), [2, 3]), psi(3,3)-psi(3,2)
(-0.375, -0.375)
The digamma function diverges logarithmically as `z \to \infty`,
while higher orders tend to zero::
>>> psi(0,inf), psi(1,inf), psi(2,inf)
(+inf, 0.0, 0.0)
Evaluation for a complex argument::
>>> psi(2, -1-2j)
(0.03902435405364952654838445 + 0.1574325240413029954685366j)
Evaluation is supported for large orders `m` and/or large
arguments `z`::
>>> psi(3, 10**100)
2.0e-300
>>> psi(250, 10**30+10**20*j)
(-1.293142504363642687204865e-7010 + 3.232856260909107391513108e-7018j)
**Application to infinite series**
Any infinite series where the summand is a rational function of
the index `k` can be evaluated in closed form in terms of polygamma
functions of the roots and poles of the summand::
>>> a = sqrt(2)
>>> b = sqrt(3)
>>> nsum(lambda k: 1/((k+a)**2*(k+b)), [0, inf])
0.4049668927517857061917531
>>> (psi(0,a)-psi(0,b)-a*psi(1,a)+b*psi(1,a))/(a-b)**2
0.4049668927517857061917531
This follows from the series representation (`m > 0`)
.. math ::
\psi^{(m)}(z) = (-1)^{m+1} m! \sum_{k=0}^{\infty}
\frac{1}{(z+k)^{m+1}}.
Since the roots of a polynomial may be complex, it is sometimes
necessary to use the complex polygamma function to evaluate
an entirely real-valued sum::
>>> nsum(lambda k: 1/(k**2-2*k+3), [0, inf])
1.694361433907061256154665
>>> nprint(polyroots([1,-2,3]))
[(1.0 - 1.41421j), (1.0 + 1.41421j)]
>>> r1 = 1-sqrt(2)*j
>>> r2 = r1.conjugate()
>>> (psi(0,-r2)-psi(0,-r1))/(r1-r2)
(1.694361433907061256154665 + 0.0j)
"""
digamma = r"""
Shortcut for ``psi(0,z)``.
"""
harmonic = r"""
If `n` is an integer, ``harmonic(n)`` gives a floating-point
approximation of the `n`-th harmonic number `H(n)`, defined as
.. math ::
H(n) = 1 + \frac{1}{2} + \frac{1}{3} + \ldots + \frac{1}{n}
The first few harmonic numbers are::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(8):
... print("%s %s" % (n, harmonic(n)))
...
0 0.0
1 1.0
2 1.5
3 1.83333333333333
4 2.08333333333333
5 2.28333333333333
6 2.45
7 2.59285714285714
The infinite harmonic series `1 + 1/2 + 1/3 + \ldots` diverges::
>>> harmonic(inf)
+inf
:func:`~mpmath.harmonic` is evaluated using the digamma function rather
than by summing the harmonic series term by term. It can therefore
be computed quickly for arbitrarily large `n`, and even for
nonintegral arguments::
>>> harmonic(10**100)
230.835724964306
>>> harmonic(0.5)
0.613705638880109
>>> harmonic(3+4j)
(2.24757548223494 + 0.850502209186044j)
:func:`~mpmath.harmonic` supports arbitrary precision evaluation::
>>> mp.dps = 50
>>> harmonic(11)
3.0198773448773448773448773448773448773448773448773
>>> harmonic(pi)
1.8727388590273302654363491032336134987519132374152
The harmonic series diverges, but at a glacial pace. It is possible
to calculate the exact number of terms required before the sum
exceeds a given amount, say 100::
>>> mp.dps = 50
>>> v = 10**findroot(lambda x: harmonic(10**x) - 100, 10)
>>> v
15092688622113788323693563264538101449859496.864101
>>> v = int(ceil(v))
>>> print(v)
15092688622113788323693563264538101449859497
>>> harmonic(v-1)
99.999999999999999999999999999999999999999999942747
>>> harmonic(v)
100.000000000000000000000000000000000000000000009
"""
bernoulli = r"""
Computes the nth Bernoulli number, `B_n`, for any integer `n \ge 0`.
The Bernoulli numbers are rational numbers, but this function
returns a floating-point approximation. To obtain an exact
fraction, use :func:`~mpmath.bernfrac` instead.
**Examples**
Numerical values of the first few Bernoulli numbers::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(15):
... print("%s %s" % (n, bernoulli(n)))
...
0 1.0
1 -0.5
2 0.166666666666667
3 0.0
4 -0.0333333333333333
5 0.0
6 0.0238095238095238
7 0.0
8 -0.0333333333333333
9 0.0
10 0.0757575757575758
11 0.0
12 -0.253113553113553
13 0.0
14 1.16666666666667
Bernoulli numbers can be approximated with arbitrary precision::
>>> mp.dps = 50
>>> bernoulli(100)
-2.8382249570693706959264156336481764738284680928013e+78
Arbitrarily large `n` are supported::
>>> mp.dps = 15
>>> bernoulli(10**20 + 2)
3.09136296657021e+1876752564973863312327
The Bernoulli numbers are related to the Riemann zeta function
at integer arguments::
>>> -bernoulli(8) * (2*pi)**8 / (2*fac(8))
1.00407735619794
>>> zeta(8)
1.00407735619794
**Algorithm**
For small `n` (`n < 3000`) :func:`~mpmath.bernoulli` uses a recurrence
formula due to Ramanujan. All results in this range are cached,
so sequential computation of small Bernoulli numbers is
guaranteed to be fast.
For larger `n`, `B_n` is evaluated in terms of the Riemann zeta
function.
"""
stieltjes = r"""
For a nonnegative integer `n`, ``stieltjes(n)`` computes the
`n`-th Stieltjes constant `\gamma_n`, defined as the
`n`-th coefficient in the Laurent series expansion of the
Riemann zeta function around the pole at `s = 1`. That is,
we have:
.. math ::
\zeta(s) = \frac{1}{s-1} \sum_{n=0}^{\infty}
\frac{(-1)^n}{n!} \gamma_n (s-1)^n
More generally, ``stieltjes(n, a)`` gives the corresponding
coefficient `\gamma_n(a)` for the Hurwitz zeta function
`\zeta(s,a)` (with `\gamma_n = \gamma_n(1)`).
**Examples**
The zeroth Stieltjes constant is just Euler's constant `\gamma`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> stieltjes(0)
0.577215664901533
Some more values are::
>>> stieltjes(1)
-0.0728158454836767
>>> stieltjes(10)
0.000205332814909065
>>> stieltjes(30)
0.00355772885557316
>>> stieltjes(1000)
-1.57095384420474e+486
>>> stieltjes(2000)
2.680424678918e+1109
>>> stieltjes(1, 2.5)
-0.23747539175716
An alternative way to compute `\gamma_1`::
>>> diff(extradps(15)(lambda x: 1/(x-1) - zeta(x)), 1)
-0.0728158454836767
:func:`~mpmath.stieltjes` supports arbitrary precision evaluation::
>>> mp.dps = 50
>>> stieltjes(2)
-0.0096903631928723184845303860352125293590658061013408
**Algorithm**
:func:`~mpmath.stieltjes` numerically evaluates the integral in
the following representation due to Ainsworth, Howell and
Coffey [1], [2]:
.. math ::
\gamma_n(a) = \frac{\log^n a}{2a} - \frac{\log^{n+1}(a)}{n+1} +
\frac{2}{a} \Re \int_0^{\infty}
\frac{(x/a-i)\log^n(a-ix)}{(1+x^2/a^2)(e^{2\pi x}-1)} dx.
For some reference values with `a = 1`, see e.g. [4].
**References**
1. O. R. Ainsworth & L. W. Howell, "An integral representation of
the generalized Euler-Mascheroni constants", NASA Technical
Paper 2456 (1985),
http://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/19850014994_1985014994.pdf
2. M. W. Coffey, "The Stieltjes constants, their relation to the
`\eta_j` coefficients, and representation of the Hurwitz
zeta function", arXiv:0706.0343v1 http://arxiv.org/abs/0706.0343
3. http://mathworld.wolfram.com/StieltjesConstants.html
4. http://pi.lacim.uqam.ca/piDATA/stieltjesgamma.txt
"""
gammaprod = r"""
Given iterables `a` and `b`, ``gammaprod(a, b)`` computes the
product / quotient of gamma functions:
.. math ::
\frac{\Gamma(a_0) \Gamma(a_1) \cdots \Gamma(a_p)}
{\Gamma(b_0) \Gamma(b_1) \cdots \Gamma(b_q)}
Unlike direct calls to :func:`~mpmath.gamma`, :func:`~mpmath.gammaprod` considers
the entire product as a limit and evaluates this limit properly if
any of the numerator or denominator arguments are nonpositive
integers such that poles of the gamma function are encountered.
That is, :func:`~mpmath.gammaprod` evaluates
.. math ::
\lim_{\epsilon \to 0}
\frac{\Gamma(a_0+\epsilon) \Gamma(a_1+\epsilon) \cdots
\Gamma(a_p+\epsilon)}
{\Gamma(b_0+\epsilon) \Gamma(b_1+\epsilon) \cdots
\Gamma(b_q+\epsilon)}
In particular:
* If there are equally many poles in the numerator and the
denominator, the limit is a rational number times the remaining,
regular part of the product.
* If there are more poles in the numerator, :func:`~mpmath.gammaprod`
returns ``+inf``.
* If there are more poles in the denominator, :func:`~mpmath.gammaprod`
returns 0.
**Examples**
The reciprocal gamma function `1/\Gamma(x)` evaluated at `x = 0`::
>>> from mpmath import *
>>> mp.dps = 15
>>> gammaprod([], [0])
0.0
A limit::
>>> gammaprod([-4], [-3])
-0.25
>>> limit(lambda x: gamma(x-1)/gamma(x), -3, direction=1)
-0.25
>>> limit(lambda x: gamma(x-1)/gamma(x), -3, direction=-1)
-0.25
"""
beta = r"""
Computes the beta function,
`B(x,y) = \Gamma(x) \Gamma(y) / \Gamma(x+y)`.
The beta function is also commonly defined by the integral
representation
.. math ::
B(x,y) = \int_0^1 t^{x-1} (1-t)^{y-1} \, dt
**Examples**
For integer and half-integer arguments where all three gamma
functions are finite, the beta function becomes either rational
number or a rational multiple of `\pi`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> beta(5, 2)
0.0333333333333333
>>> beta(1.5, 2)
0.266666666666667
>>> 16*beta(2.5, 1.5)
3.14159265358979
Where appropriate, :func:`~mpmath.beta` evaluates limits. A pole
of the beta function is taken to result in ``+inf``::
>>> beta(-0.5, 0.5)
0.0
>>> beta(-3, 3)
-0.333333333333333
>>> beta(-2, 3)
+inf
>>> beta(inf, 1)
0.0
>>> beta(inf, 0)
nan
:func:`~mpmath.beta` supports complex numbers and arbitrary precision
evaluation::
>>> beta(1, 2+j)
(0.4 - 0.2j)
>>> mp.dps = 25
>>> beta(j,0.5)
(1.079424249270925780135675 - 1.410032405664160838288752j)
>>> mp.dps = 50
>>> beta(pi, e)
0.037890298781212201348153837138927165984170287886464
Various integrals can be computed by means of the
beta function::
>>> mp.dps = 15
>>> quad(lambda t: t**2.5*(1-t)**2, [0, 1])
0.0230880230880231
>>> beta(3.5, 3)
0.0230880230880231
>>> quad(lambda t: sin(t)**4 * sqrt(cos(t)), [0, pi/2])
0.319504062596158
>>> beta(2.5, 0.75)/2
0.319504062596158
"""
betainc = r"""
``betainc(a, b, x1=0, x2=1, regularized=False)`` gives the generalized
incomplete beta function,
.. math ::
I_{x_1}^{x_2}(a,b) = \int_{x_1}^{x_2} t^{a-1} (1-t)^{b-1} dt.
When `x_1 = 0, x_2 = 1`, this reduces to the ordinary (complete)
beta function `B(a,b)`; see :func:`~mpmath.beta`.
With the keyword argument ``regularized=True``, :func:`~mpmath.betainc`
computes the regularized incomplete beta function
`I_{x_1}^{x_2}(a,b) / B(a,b)`. This is the cumulative distribution of the
beta distribution with parameters `a`, `b`.
.. note :
Implementations of the incomplete beta function in some other
software uses a different argument order. For example, Mathematica uses the
reversed argument order ``Beta[x1,x2,a,b]``. For the equivalent of SciPy's
three-argument incomplete beta integral (implicitly with `x1 = 0`), use
``betainc(a,b,0,x2,regularized=True)``.
**Examples**
Verifying that :func:`~mpmath.betainc` computes the integral in the
definition::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> x,y,a,b = 3, 4, 0, 6
>>> betainc(x, y, a, b)
-4010.4
>>> quad(lambda t: t**(x-1) * (1-t)**(y-1), [a, b])
-4010.4
The arguments may be arbitrary complex numbers::
>>> betainc(0.75, 1-4j, 0, 2+3j)
(0.2241657956955709603655887 + 0.3619619242700451992411724j)
With regularization::
>>> betainc(1, 2, 0, 0.25, regularized=True)
0.4375
>>> betainc(pi, e, 0, 1, regularized=True) # Complete
1.0
The beta integral satisfies some simple argument transformation
symmetries::
>>> mp.dps = 15
>>> betainc(2,3,4,5), -betainc(2,3,5,4), betainc(3,2,1-5,1-4)
(56.0833333333333, 56.0833333333333, 56.0833333333333)
The beta integral can often be evaluated analytically. For integer and
rational arguments, the incomplete beta function typically reduces to a
simple algebraic-logarithmic expression::
>>> mp.dps = 25
>>> identify(chop(betainc(0, 0, 3, 4)))
'-(log((9/8)))'
>>> identify(betainc(2, 3, 4, 5))
'(673/12)'
>>> identify(betainc(1.5, 1, 1, 2))
'((-12+sqrt(1152))/18)'
"""
binomial = r"""
Computes the binomial coefficient
.. math ::
{n \choose k} = \frac{n!}{k!(n-k)!}.
The binomial coefficient gives the number of ways that `k` items
can be chosen from a set of `n` items. More generally, the binomial
coefficient is a well-defined function of arbitrary real or
complex `n` and `k`, via the gamma function.
**Examples**
Generate Pascal's triangle::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(5):
... nprint([binomial(n,k) for k in range(n+1)])
...
[1.0]
[1.0, 1.0]
[1.0, 2.0, 1.0]
[1.0, 3.0, 3.0, 1.0]
[1.0, 4.0, 6.0, 4.0, 1.0]
There is 1 way to select 0 items from the empty set, and 0 ways to
select 1 item from the empty set::
>>> binomial(0, 0)
1.0
>>> binomial(0, 1)
0.0
:func:`~mpmath.binomial` supports large arguments::
>>> binomial(10**20, 10**20-5)
8.33333333333333e+97
>>> binomial(10**20, 10**10)
2.60784095465201e+104342944813
Nonintegral binomial coefficients find use in series
expansions::
>>> nprint(taylor(lambda x: (1+x)**0.25, 0, 4))
[1.0, 0.25, -0.09375, 0.0546875, -0.0375977]
>>> nprint([binomial(0.25, k) for k in range(5)])
[1.0, 0.25, -0.09375, 0.0546875, -0.0375977]
An integral representation::
>>> n, k = 5, 3
>>> f = lambda t: exp(-j*k*t)*(1+exp(j*t))**n
>>> chop(quad(f, [-pi,pi])/(2*pi))
10.0
>>> binomial(n,k)
10.0
"""
rf = r"""
Computes the rising factorial or Pochhammer symbol,
.. math ::
x^{(n)} = x (x+1) \cdots (x+n-1) = \frac{\Gamma(x+n)}{\Gamma(x)}
where the rightmost expression is valid for nonintegral `n`.
**Examples**
For integral `n`, the rising factorial is a polynomial::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(5):
... nprint(taylor(lambda x: rf(x,n), 0, n))
...
[1.0]
[0.0, 1.0]
[0.0, 1.0, 1.0]
[0.0, 2.0, 3.0, 1.0]
[0.0, 6.0, 11.0, 6.0, 1.0]
Evaluation is supported for arbitrary arguments::
>>> rf(2+3j, 5.5)
(-7202.03920483347 - 3777.58810701527j)
"""
ff = r"""
Computes the falling factorial,
.. math ::
(x)_n = x (x-1) \cdots (x-n+1) = \frac{\Gamma(x+1)}{\Gamma(x-n+1)}
where the rightmost expression is valid for nonintegral `n`.
**Examples**
For integral `n`, the falling factorial is a polynomial::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(5):
... nprint(taylor(lambda x: ff(x,n), 0, n))
...
[1.0]
[0.0, 1.0]
[0.0, -1.0, 1.0]
[0.0, 2.0, -3.0, 1.0]
[0.0, -6.0, 11.0, -6.0, 1.0]
Evaluation is supported for arbitrary arguments::
>>> ff(2+3j, 5.5)
(-720.41085888203 + 316.101124983878j)
"""
fac2 = r"""
Computes the double factorial `x!!`, defined for integers
`x > 0` by
.. math ::
x!! = \begin{cases}
1 \cdot 3 \cdots (x-2) \cdot x & x \;\mathrm{odd} \\
2 \cdot 4 \cdots (x-2) \cdot x & x \;\mathrm{even}
\end{cases}
and more generally by [1]
.. math ::
x!! = 2^{x/2} \left(\frac{\pi}{2}\right)^{(\cos(\pi x)-1)/4}
\Gamma\left(\frac{x}{2}+1\right).
**Examples**
The integer sequence of double factorials begins::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> nprint([fac2(n) for n in range(10)])
[1.0, 1.0, 2.0, 3.0, 8.0, 15.0, 48.0, 105.0, 384.0, 945.0]
For large `x`, double factorials follow a Stirling-like asymptotic
approximation::
>>> x = mpf(10000)
>>> fac2(x)
5.97272691416282e+17830
>>> sqrt(pi)*x**((x+1)/2)*exp(-x/2)
5.97262736954392e+17830
The recurrence formula `x!! = x (x-2)!!` can be reversed to
define the double factorial of negative odd integers (but
not negative even integers)::
>>> fac2(-1), fac2(-3), fac2(-5), fac2(-7)
(1.0, -1.0, 0.333333333333333, -0.0666666666666667)
>>> fac2(-2)
Traceback (most recent call last):
...
ValueError: gamma function pole
With the exception of the poles at negative even integers,
:func:`~mpmath.fac2` supports evaluation for arbitrary complex arguments.
The recurrence formula is valid generally::
>>> fac2(pi+2j)
(-1.3697207890154e-12 + 3.93665300979176e-12j)
>>> (pi+2j)*fac2(pi-2+2j)
(-1.3697207890154e-12 + 3.93665300979176e-12j)
Double factorials should not be confused with nested factorials,
which are immensely larger::
>>> fac(fac(20))
5.13805976125208e+43675043585825292774
>>> fac2(20)
3715891200.0
Double factorials appear, among other things, in series expansions
of Gaussian functions and the error function. Infinite series
include::
>>> nsum(lambda k: 1/fac2(k), [0, inf])
3.05940740534258
>>> sqrt(e)*(1+sqrt(pi/2)*erf(sqrt(2)/2))
3.05940740534258
>>> nsum(lambda k: 2**k/fac2(2*k-1), [1, inf])
4.06015693855741
>>> e * erf(1) * sqrt(pi)
4.06015693855741
A beautiful Ramanujan sum::
>>> nsum(lambda k: (-1)**k*(fac2(2*k-1)/fac2(2*k))**3, [0,inf])
0.90917279454693
>>> (gamma('9/8')/gamma('5/4')/gamma('7/8'))**2
0.90917279454693
**References**
1. http://functions.wolfram.com/GammaBetaErf/Factorial2/27/01/0002/
2. http://mathworld.wolfram.com/DoubleFactorial.html
"""
hyper = r"""
Evaluates the generalized hypergeometric function
.. math ::
\,_pF_q(a_1,\ldots,a_p; b_1,\ldots,b_q; z) =
\sum_{n=0}^\infty \frac{(a_1)_n (a_2)_n \ldots (a_p)_n}
{(b_1)_n(b_2)_n\ldots(b_q)_n} \frac{z^n}{n!}
where `(x)_n` denotes the rising factorial (see :func:`~mpmath.rf`).
The parameters lists ``a_s`` and ``b_s`` may contain integers,
real numbers, complex numbers, as well as exact fractions given in
the form of tuples `(p, q)`. :func:`~mpmath.hyper` is optimized to handle
integers and fractions more efficiently than arbitrary
floating-point parameters (since rational parameters are by
far the most common).
**Examples**
Verifying that :func:`~mpmath.hyper` gives the sum in the definition, by
comparison with :func:`~mpmath.nsum`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> a,b,c,d = 2,3,4,5
>>> x = 0.25
>>> hyper([a,b],[c,d],x)
1.078903941164934876086237
>>> fn = lambda n: rf(a,n)*rf(b,n)/rf(c,n)/rf(d,n)*x**n/fac(n)
>>> nsum(fn, [0, inf])
1.078903941164934876086237
The parameters can be any combination of integers, fractions,
floats and complex numbers::
>>> a, b, c, d, e = 1, (-1,2), pi, 3+4j, (2,3)
>>> x = 0.2j
>>> hyper([a,b],[c,d,e],x)
(0.9923571616434024810831887 - 0.005753848733883879742993122j)
>>> b, e = -0.5, mpf(2)/3
>>> fn = lambda n: rf(a,n)*rf(b,n)/rf(c,n)/rf(d,n)/rf(e,n)*x**n/fac(n)
>>> nsum(fn, [0, inf])
(0.9923571616434024810831887 - 0.005753848733883879742993122j)
The `\,_0F_0` and `\,_1F_0` series are just elementary functions::
>>> a, z = sqrt(2), +pi
>>> hyper([],[],z)
23.14069263277926900572909
>>> exp(z)
23.14069263277926900572909
>>> hyper([a],[],z)
(-0.09069132879922920160334114 + 0.3283224323946162083579656j)
>>> (1-z)**(-a)
(-0.09069132879922920160334114 + 0.3283224323946162083579656j)
If any `a_k` coefficient is a nonpositive integer, the series terminates
into a finite polynomial::
>>> hyper([1,1,1,-3],[2,5],1)
0.7904761904761904761904762
>>> identify(_)
'(83/105)'
If any `b_k` is a nonpositive integer, the function is undefined (unless the
series terminates before the division by zero occurs)::
>>> hyper([1,1,1,-3],[-2,5],1)
Traceback (most recent call last):
...
ZeroDivisionError: pole in hypergeometric series
>>> hyper([1,1,1,-1],[-2,5],1)
1.1
Except for polynomial cases, the radius of convergence `R` of the hypergeometric
series is either `R = \infty` (if `p \le q`), `R = 1` (if `p = q+1`), or
`R = 0` (if `p > q+1`).
The analytic continuations of the functions with `p = q+1`, i.e. `\,_2F_1`,
`\,_3F_2`, `\,_4F_3`, etc, are all implemented and therefore these functions
can be evaluated for `|z| \ge 1`. The shortcuts :func:`~mpmath.hyp2f1`, :func:`~mpmath.hyp3f2`
are available to handle the most common cases (see their documentation),
but functions of higher degree are also supported via :func:`~mpmath.hyper`::
>>> hyper([1,2,3,4], [5,6,7], 1) # 4F3 at finite-valued branch point
1.141783505526870731311423
>>> hyper([4,5,6,7], [1,2,3], 1) # 4F3 at pole
+inf
>>> hyper([1,2,3,4,5], [6,7,8,9], 10) # 5F4
(1.543998916527972259717257 - 0.5876309929580408028816365j)
>>> hyper([1,2,3,4,5,6], [7,8,9,10,11], 1j) # 6F5
(0.9996565821853579063502466 + 0.0129721075905630604445669j)
Near `z = 1` with noninteger parameters::
>>> hyper(['1/3',1,'3/2',2], ['1/5','11/6','41/8'], 1)
2.219433352235586121250027
>>> hyper(['1/3',1,'3/2',2], ['1/5','11/6','5/4'], 1)
+inf
>>> eps1 = extradps(6)(lambda: 1 - mpf('1e-6'))()
>>> hyper(['1/3',1,'3/2',2], ['1/5','11/6','5/4'], eps1)
2923978034.412973409330956
Please note that, as currently implemented, evaluation of `\,_pF_{p-1}`
with `p \ge 3` may be slow or inaccurate when `|z-1|` is small,
for some parameter values.
When `p > q+1`, ``hyper`` computes the (iterated) Borel sum of the divergent
series. For `\,_2F_0` the Borel sum has an analytic solution and can be
computed efficiently (see :func:`~mpmath.hyp2f0`). For higher degrees, the functions
is evaluated first by attempting to sum it directly as an asymptotic
series (this only works for tiny `|z|`), and then by evaluating the Borel
regularized sum using numerical integration. Except for
special parameter combinations, this can be extremely slow.
>>> hyper([1,1], [], 0.5) # regularization of 2F0
(1.340965419580146562086448 + 0.8503366631752726568782447j)
>>> hyper([1,1,1,1], [1], 0.5) # regularization of 4F1
(1.108287213689475145830699 + 0.5327107430640678181200491j)
With the following magnitude of argument, the asymptotic series for `\,_3F_1`
gives only a few digits. Using Borel summation, ``hyper`` can produce
a value with full accuracy::
>>> mp.dps = 15
>>> hyper([2,0.5,4], [5.25], '0.08', force_series=True)
Traceback (most recent call last):
...
NoConvergence: Hypergeometric series converges too slowly. Try increasing maxterms.
>>> hyper([2,0.5,4], [5.25], '0.08', asymp_tol=1e-4)
1.0725535790737
>>> hyper([2,0.5,4], [5.25], '0.08')
(1.07269542893559 + 5.54668863216891e-5j)
>>> hyper([2,0.5,4], [5.25], '-0.08', asymp_tol=1e-4)
0.946344925484879
>>> hyper([2,0.5,4], [5.25], '-0.08')
0.946312503737771
>>> mp.dps = 25
>>> hyper([2,0.5,4], [5.25], '-0.08')
0.9463125037377662296700858
Note that with the positive `z` value, there is a complex part in the
correct result, which falls below the tolerance of the asymptotic series.
"""
hypercomb = r"""
Computes a weighted combination of hypergeometric functions
.. math ::
\sum_{r=1}^N \left[ \prod_{k=1}^{l_r} {w_{r,k}}^{c_{r,k}}
\frac{\prod_{k=1}^{m_r} \Gamma(\alpha_{r,k})}{\prod_{k=1}^{n_r}
\Gamma(\beta_{r,k})}
\,_{p_r}F_{q_r}(a_{r,1},\ldots,a_{r,p}; b_{r,1},
\ldots, b_{r,q}; z_r)\right].
Typically the parameters are linear combinations of a small set of base
parameters; :func:`~mpmath.hypercomb` permits computing a correct value in
the case that some of the `\alpha`, `\beta`, `b` turn out to be
nonpositive integers, or if division by zero occurs for some `w^c`,
assuming that there are opposing singularities that cancel out.
The limit is computed by evaluating the function with the base
parameters perturbed, at a higher working precision.
The first argument should be a function that takes the perturbable
base parameters ``params`` as input and returns `N` tuples
``(w, c, alpha, beta, a, b, z)``, where the coefficients ``w``, ``c``,
gamma factors ``alpha``, ``beta``, and hypergeometric coefficients
``a``, ``b`` each should be lists of numbers, and ``z`` should be a single
number.
**Examples**
The following evaluates
.. math ::
(a-1) \frac{\Gamma(a-3)}{\Gamma(a-4)} \,_1F_1(a,a-1,z) = e^z(a-4)(a+z-1)
with `a=1, z=3`. There is a zero factor, two gamma function poles, and
the 1F1 function is singular; all singularities cancel out to give a finite
value::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> hypercomb(lambda a: [([a-1],[1],[a-3],[a-4],[a],[a-1],3)], [1])
-180.769832308689
>>> -9*exp(3)
-180.769832308689
"""
hyp0f1 = r"""
Gives the hypergeometric function `\,_0F_1`, sometimes known as the
confluent limit function, defined as
.. math ::
\,_0F_1(a,z) = \sum_{k=0}^{\infty} \frac{1}{(a)_k} \frac{z^k}{k!}.
This function satisfies the differential equation `z f''(z) + a f'(z) = f(z)`,
and is related to the Bessel function of the first kind (see :func:`~mpmath.besselj`).
``hyp0f1(a,z)`` is equivalent to ``hyper([],[a],z)``; see documentation for
:func:`~mpmath.hyper` for more information.
**Examples**
Evaluation for arbitrary arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hyp0f1(2, 0.25)
1.130318207984970054415392
>>> hyp0f1((1,2), 1234567)
6.27287187546220705604627e+964
>>> hyp0f1(3+4j, 1000000j)
(3.905169561300910030267132e+606 + 3.807708544441684513934213e+606j)
Evaluation is supported for arbitrarily large values of `z`,
using asymptotic expansions::
>>> hyp0f1(1, 10**50)
2.131705322874965310390701e+8685889638065036553022565
>>> hyp0f1(1, -10**50)
1.115945364792025420300208e-13
Verifying the differential equation::
>>> a = 2.5
>>> f = lambda z: hyp0f1(a,z)
>>> for z in [0, 10, 3+4j]:
... chop(z*diff(f,z,2) + a*diff(f,z) - f(z))
...
0.0
0.0
0.0
"""
hyp1f1 = r"""
Gives the confluent hypergeometric function of the first kind,
.. math ::
\,_1F_1(a,b,z) = \sum_{k=0}^{\infty} \frac{(a)_k}{(b)_k} \frac{z^k}{k!},
also known as Kummer's function and sometimes denoted by `M(a,b,z)`. This
function gives one solution to the confluent (Kummer's) differential equation
.. math ::
z f''(z) + (b-z) f'(z) - af(z) = 0.
A second solution is given by the `U` function; see :func:`~mpmath.hyperu`.
Solutions are also given in an alternate form by the Whittaker
functions (:func:`~mpmath.whitm`, :func:`~mpmath.whitw`).
``hyp1f1(a,b,z)`` is equivalent
to ``hyper([a],[b],z)``; see documentation for :func:`~mpmath.hyper` for more
information.
**Examples**
Evaluation for real and complex values of the argument `z`, with
fixed parameters `a = 2, b = -1/3`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hyp1f1(2, (-1,3), 3.25)
-2815.956856924817275640248
>>> hyp1f1(2, (-1,3), -3.25)
-1.145036502407444445553107
>>> hyp1f1(2, (-1,3), 1000)
-8.021799872770764149793693e+441
>>> hyp1f1(2, (-1,3), -1000)
0.000003131987633006813594535331
>>> hyp1f1(2, (-1,3), 100+100j)
(-3.189190365227034385898282e+48 - 1.106169926814270418999315e+49j)
Parameters may be complex::
>>> hyp1f1(2+3j, -1+j, 10j)
(261.8977905181045142673351 + 160.8930312845682213562172j)
Arbitrarily large values of `z` are supported::
>>> hyp1f1(3, 4, 10**20)
3.890569218254486878220752e+43429448190325182745
>>> hyp1f1(3, 4, -10**20)
6.0e-60
>>> hyp1f1(3, 4, 10**20*j)
(-1.935753855797342532571597e-20 - 2.291911213325184901239155e-20j)
Verifying the differential equation::
>>> a, b = 1.5, 2
>>> f = lambda z: hyp1f1(a,b,z)
>>> for z in [0, -10, 3, 3+4j]:
... chop(z*diff(f,z,2) + (b-z)*diff(f,z) - a*f(z))
...
0.0
0.0
0.0
0.0
An integral representation::
>>> a, b = 1.5, 3
>>> z = 1.5
>>> hyp1f1(a,b,z)
2.269381460919952778587441
>>> g = lambda t: exp(z*t)*t**(a-1)*(1-t)**(b-a-1)
>>> gammaprod([b],[a,b-a])*quad(g, [0,1])
2.269381460919952778587441
"""
hyp1f2 = r"""
Gives the hypergeometric function `\,_1F_2(a_1,a_2;b_1,b_2; z)`.
The call ``hyp1f2(a1,b1,b2,z)`` is equivalent to
``hyper([a1],[b1,b2],z)``.
Evaluation works for complex and arbitrarily large arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> a, b, c = 1.5, (-1,3), 2.25
>>> hyp1f2(a, b, c, 10**20)
-1.159388148811981535941434e+8685889639
>>> hyp1f2(a, b, c, -10**20)
-12.60262607892655945795907
>>> hyp1f2(a, b, c, 10**20*j)
(4.237220401382240876065501e+6141851464 - 2.950930337531768015892987e+6141851464j)
>>> hyp1f2(2+3j, -2j, 0.5j, 10-20j)
(135881.9905586966432662004 - 86681.95885418079535738828j)
"""
hyp2f2 = r"""
Gives the hypergeometric function `\,_2F_2(a_1,a_2;b_1,b_2; z)`.
The call ``hyp2f2(a1,a2,b1,b2,z)`` is equivalent to
``hyper([a1,a2],[b1,b2],z)``.
Evaluation works for complex and arbitrarily large arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> a, b, c, d = 1.5, (-1,3), 2.25, 4
>>> hyp2f2(a, b, c, d, 10**20)
-5.275758229007902299823821e+43429448190325182663
>>> hyp2f2(a, b, c, d, -10**20)
2561445.079983207701073448
>>> hyp2f2(a, b, c, d, 10**20*j)
(2218276.509664121194836667 - 1280722.539991603850462856j)
>>> hyp2f2(2+3j, -2j, 0.5j, 4j, 10-20j)
(80500.68321405666957342788 - 20346.82752982813540993502j)
"""
hyp2f3 = r"""
Gives the hypergeometric function `\,_2F_3(a_1,a_2;b_1,b_2,b_3; z)`.
The call ``hyp2f3(a1,a2,b1,b2,b3,z)`` is equivalent to
``hyper([a1,a2],[b1,b2,b3],z)``.
Evaluation works for arbitrarily large arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> a1,a2,b1,b2,b3 = 1.5, (-1,3), 2.25, 4, (1,5)
>>> hyp2f3(a1,a2,b1,b2,b3,10**20)
-4.169178177065714963568963e+8685889590
>>> hyp2f3(a1,a2,b1,b2,b3,-10**20)
7064472.587757755088178629
>>> hyp2f3(a1,a2,b1,b2,b3,10**20*j)
(-5.163368465314934589818543e+6141851415 + 1.783578125755972803440364e+6141851416j)
>>> hyp2f3(2+3j, -2j, 0.5j, 4j, -1-j, 10-20j)
(-2280.938956687033150740228 + 13620.97336609573659199632j)
>>> hyp2f3(2+3j, -2j, 0.5j, 4j, -1-j, 10000000-20000000j)
(4.849835186175096516193e+3504 - 3.365981529122220091353633e+3504j)
"""
hyp2f1 = r"""
Gives the Gauss hypergeometric function `\,_2F_1` (often simply referred to as
*the* hypergeometric function), defined for `|z| < 1` as
.. math ::
\,_2F_1(a,b,c,z) = \sum_{k=0}^{\infty}
\frac{(a)_k (b)_k}{(c)_k} \frac{z^k}{k!}.
and for `|z| \ge 1` by analytic continuation, with a branch cut on `(1, \infty)`
when necessary.
Special cases of this function include many of the orthogonal polynomials as
well as the incomplete beta function and other functions. Properties of the
Gauss hypergeometric function are documented comprehensively in many references,
for example Abramowitz & Stegun, section 15.
The implementation supports the analytic continuation as well as evaluation
close to the unit circle where `|z| \approx 1`. The syntax ``hyp2f1(a,b,c,z)``
is equivalent to ``hyper([a,b],[c],z)``.
**Examples**
Evaluation with `z` inside, outside and on the unit circle, for
fixed parameters::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hyp2f1(2, (1,2), 4, 0.75)
1.303703703703703703703704
>>> hyp2f1(2, (1,2), 4, -1.75)
0.7431290566046919177853916
>>> hyp2f1(2, (1,2), 4, 1.75)
(1.418075801749271137026239 - 1.114976146679907015775102j)
>>> hyp2f1(2, (1,2), 4, 1)
1.6
>>> hyp2f1(2, (1,2), 4, -1)
0.8235498012182875315037882
>>> hyp2f1(2, (1,2), 4, j)
(0.9144026291433065674259078 + 0.2050415770437884900574923j)
>>> hyp2f1(2, (1,2), 4, 2+j)
(0.9274013540258103029011549 + 0.7455257875808100868984496j)
>>> hyp2f1(2, (1,2), 4, 0.25j)
(0.9931169055799728251931672 + 0.06154836525312066938147793j)
Evaluation with complex parameter values::
>>> hyp2f1(1+j, 0.75, 10j, 1+5j)
(0.8834833319713479923389638 + 0.7053886880648105068343509j)
Evaluation with `z = 1`::
>>> hyp2f1(-2.5, 3.5, 1.5, 1)
0.0
>>> hyp2f1(-2.5, 3, 4, 1)
0.06926406926406926406926407
>>> hyp2f1(2, 3, 4, 1)
+inf
Evaluation for huge arguments::
>>> hyp2f1((-1,3), 1.75, 4, '1e100')
(7.883714220959876246415651e+32 + 1.365499358305579597618785e+33j)
>>> hyp2f1((-1,3), 1.75, 4, '1e1000000')
(7.883714220959876246415651e+333332 + 1.365499358305579597618785e+333333j)
>>> hyp2f1((-1,3), 1.75, 4, '1e1000000j')
(1.365499358305579597618785e+333333 - 7.883714220959876246415651e+333332j)
An integral representation::
>>> a,b,c,z = -0.5, 1, 2.5, 0.25
>>> g = lambda t: t**(b-1) * (1-t)**(c-b-1) * (1-t*z)**(-a)
>>> gammaprod([c],[b,c-b]) * quad(g, [0,1])
0.9480458814362824478852618
>>> hyp2f1(a,b,c,z)
0.9480458814362824478852618
Verifying the hypergeometric differential equation::
>>> f = lambda z: hyp2f1(a,b,c,z)
>>> chop(z*(1-z)*diff(f,z,2) + (c-(a+b+1)*z)*diff(f,z) - a*b*f(z))
0.0
"""
hyp3f2 = r"""
Gives the generalized hypergeometric function `\,_3F_2`, defined for `|z| < 1`
as
.. math ::
\,_3F_2(a_1,a_2,a_3,b_1,b_2,z) = \sum_{k=0}^{\infty}
\frac{(a_1)_k (a_2)_k (a_3)_k}{(b_1)_k (b_2)_k} \frac{z^k}{k!}.
and for `|z| \ge 1` by analytic continuation. The analytic structure of this
function is similar to that of `\,_2F_1`, generally with a singularity at
`z = 1` and a branch cut on `(1, \infty)`.
Evaluation is supported inside, on, and outside
the circle of convergence `|z| = 1`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hyp3f2(1,2,3,4,5,0.25)
1.083533123380934241548707
>>> hyp3f2(1,2+2j,3,4,5,-10+10j)
(0.1574651066006004632914361 - 0.03194209021885226400892963j)
>>> hyp3f2(1,2,3,4,5,-10)
0.3071141169208772603266489
>>> hyp3f2(1,2,3,4,5,10)
(-0.4857045320523947050581423 - 0.5988311440454888436888028j)
>>> hyp3f2(0.25,1,1,2,1.5,1)
1.157370995096772047567631
>>> (8-pi-2*ln2)/3
1.157370995096772047567631
>>> hyp3f2(1+j,0.5j,2,1,-2j,-1)
(1.74518490615029486475959 + 0.1454701525056682297614029j)
>>> hyp3f2(1+j,0.5j,2,1,-2j,sqrt(j))
(0.9829816481834277511138055 - 0.4059040020276937085081127j)
>>> hyp3f2(-3,2,1,-5,4,1)
1.41
>>> hyp3f2(-3,2,1,-5,4,2)
2.12
Evaluation very close to the unit circle::
>>> hyp3f2(1,2,3,4,5,'1.0001')
(1.564877796743282766872279 - 3.76821518787438186031973e-11j)
>>> hyp3f2(1,2,3,4,5,'1+0.0001j')
(1.564747153061671573212831 + 0.0001305757570366084557648482j)
>>> hyp3f2(1,2,3,4,5,'0.9999')
1.564616644881686134983664
>>> hyp3f2(1,2,3,4,5,'-0.9999')
0.7823896253461678060196207
.. note ::
Evaluation for `|z-1|` small can currently be inaccurate or slow
for some parameter combinations.
For various parameter combinations, `\,_3F_2` admits representation in terms
of hypergeometric functions of lower degree, or in terms of
simpler functions::
>>> for a, b, z in [(1,2,-1), (2,0.5,1)]:
... hyp2f1(a,b,a+b+0.5,z)**2
... hyp3f2(2*a,a+b,2*b,a+b+0.5,2*a+2*b,z)
...
0.4246104461966439006086308
0.4246104461966439006086308
7.111111111111111111111111
7.111111111111111111111111
>>> z = 2+3j
>>> hyp3f2(0.5,1,1.5,2,2,z)
(0.7621440939243342419729144 + 0.4249117735058037649915723j)
>>> 4*(pi-2*ellipe(z))/(pi*z)
(0.7621440939243342419729144 + 0.4249117735058037649915723j)
"""
hyperu = r"""
Gives the Tricomi confluent hypergeometric function `U`, also known as
the Kummer or confluent hypergeometric function of the second kind. This
function gives a second linearly independent solution to the confluent
hypergeometric differential equation (the first is provided by `\,_1F_1` --
see :func:`~mpmath.hyp1f1`).
**Examples**
Evaluation for arbitrary complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hyperu(2,3,4)
0.0625
>>> hyperu(0.25, 5, 1000)
0.1779949416140579573763523
>>> hyperu(0.25, 5, -1000)
(0.1256256609322773150118907 - 0.1256256609322773150118907j)
The `U` function may be singular at `z = 0`::
>>> hyperu(1.5, 2, 0)
+inf
>>> hyperu(1.5, -2, 0)
0.1719434921288400112603671
Verifying the differential equation::
>>> a, b = 1.5, 2
>>> f = lambda z: hyperu(a,b,z)
>>> for z in [-10, 3, 3+4j]:
... chop(z*diff(f,z,2) + (b-z)*diff(f,z) - a*f(z))
...
0.0
0.0
0.0
An integral representation::
>>> a,b,z = 2, 3.5, 4.25
>>> hyperu(a,b,z)
0.06674960718150520648014567
>>> quad(lambda t: exp(-z*t)*t**(a-1)*(1+t)**(b-a-1),[0,inf]) / gamma(a)
0.06674960718150520648014567
[1] http://people.math.sfu.ca/~cbm/aands/page_504.htm
"""
hyp2f0 = r"""
Gives the hypergeometric function `\,_2F_0`, defined formally by the
series
.. math ::
\,_2F_0(a,b;;z) = \sum_{n=0}^{\infty} (a)_n (b)_n \frac{z^n}{n!}.
This series usually does not converge. For small enough `z`, it can be viewed
as an asymptotic series that may be summed directly with an appropriate
truncation. When this is not the case, :func:`~mpmath.hyp2f0` gives a regularized sum,
or equivalently, it uses a representation in terms of the
hypergeometric U function [1]. The series also converges when either `a` or `b`
is a nonpositive integer, as it then terminates into a polynomial
after `-a` or `-b` terms.
**Examples**
Evaluation is supported for arbitrary complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hyp2f0((2,3), 1.25, -100)
0.07095851870980052763312791
>>> hyp2f0((2,3), 1.25, 100)
(-0.03254379032170590665041131 + 0.07269254613282301012735797j)
>>> hyp2f0(-0.75, 1-j, 4j)
(-0.3579987031082732264862155 - 3.052951783922142735255881j)
Even with real arguments, the regularized value of 2F0 is often complex-valued,
but the imaginary part decreases exponentially as `z \to 0`. In the following
example, the first call uses complex evaluation while the second has a small
enough `z` to evaluate using the direct series and thus the returned value
is strictly real (this should be taken to indicate that the imaginary
part is less than ``eps``)::
>>> mp.dps = 15
>>> hyp2f0(1.5, 0.5, 0.05)
(1.04166637647907 + 8.34584913683906e-8j)
>>> hyp2f0(1.5, 0.5, 0.0005)
1.00037535207621
The imaginary part can be retrieved by increasing the working precision::
>>> mp.dps = 80
>>> nprint(hyp2f0(1.5, 0.5, 0.009).imag)
1.23828e-46
In the polynomial case (the series terminating), 2F0 can evaluate exactly::
>>> mp.dps = 15
>>> hyp2f0(-6,-6,2)
291793.0
>>> identify(hyp2f0(-2,1,0.25))
'(5/8)'
The coefficients of the polynomials can be recovered using Taylor expansion::
>>> nprint(taylor(lambda x: hyp2f0(-3,0.5,x), 0, 10))
[1.0, -1.5, 2.25, -1.875, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
>>> nprint(taylor(lambda x: hyp2f0(-4,0.5,x), 0, 10))
[1.0, -2.0, 4.5, -7.5, 6.5625, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
[1] http://people.math.sfu.ca/~cbm/aands/page_504.htm
"""
gammainc = r"""
``gammainc(z, a=0, b=inf)`` computes the (generalized) incomplete
gamma function with integration limits `[a, b]`:
.. math ::
\Gamma(z,a,b) = \int_a^b t^{z-1} e^{-t} \, dt
The generalized incomplete gamma function reduces to the
following special cases when one or both endpoints are fixed:
* `\Gamma(z,0,\infty)` is the standard ("complete")
gamma function, `\Gamma(z)` (available directly
as the mpmath function :func:`~mpmath.gamma`)
* `\Gamma(z,a,\infty)` is the "upper" incomplete gamma
function, `\Gamma(z,a)`
* `\Gamma(z,0,b)` is the "lower" incomplete gamma
function, `\gamma(z,b)`.
Of course, we have
`\Gamma(z,0,x) + \Gamma(z,x,\infty) = \Gamma(z)`
for all `z` and `x`.
Note however that some authors reverse the order of the
arguments when defining the lower and upper incomplete
gamma function, so one should be careful to get the correct
definition.
If also given the keyword argument ``regularized=True``,
:func:`~mpmath.gammainc` computes the "regularized" incomplete gamma
function
.. math ::
P(z,a,b) = \frac{\Gamma(z,a,b)}{\Gamma(z)}.
**Examples**
We can compare with numerical quadrature to verify that
:func:`~mpmath.gammainc` computes the integral in the definition::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> gammainc(2+3j, 4, 10)
(0.00977212668627705160602312 - 0.0770637306312989892451977j)
>>> quad(lambda t: t**(2+3j-1) * exp(-t), [4, 10])
(0.00977212668627705160602312 - 0.0770637306312989892451977j)
Argument symmetries follow directly from the integral definition::
>>> gammainc(3, 4, 5) + gammainc(3, 5, 4)
0.0
>>> gammainc(3,0,2) + gammainc(3,2,4); gammainc(3,0,4)
1.523793388892911312363331
1.523793388892911312363331
>>> findroot(lambda z: gammainc(2,z,3), 1)
3.0
Evaluation for arbitrarily large arguments::
>>> gammainc(10, 100)
4.083660630910611272288592e-26
>>> gammainc(10, 10000000000000000)
5.290402449901174752972486e-4342944819032375
>>> gammainc(3+4j, 1000000+1000000j)
(-1.257913707524362408877881e-434284 + 2.556691003883483531962095e-434284j)
Evaluation of a generalized incomplete gamma function automatically chooses
the representation that gives a more accurate result, depending on which
parameter is larger::
>>> gammainc(10000000, 3) - gammainc(10000000, 2) # Bad
0.0
>>> gammainc(10000000, 2, 3) # Good
1.755146243738946045873491e+4771204
>>> gammainc(2, 0, 100000001) - gammainc(2, 0, 100000000) # Bad
0.0
>>> gammainc(2, 100000000, 100000001) # Good
4.078258353474186729184421e-43429441
The incomplete gamma functions satisfy simple recurrence
relations::
>>> mp.dps = 25
>>> z, a = mpf(3.5), mpf(2)
>>> gammainc(z+1, a); z*gammainc(z,a) + a**z*exp(-a)
10.60130296933533459267329
10.60130296933533459267329
>>> gammainc(z+1,0,a); z*gammainc(z,0,a) - a**z*exp(-a)
1.030425427232114336470932
1.030425427232114336470932
Evaluation at integers and poles::
>>> gammainc(-3, -4, -5)
(-0.2214577048967798566234192 + 0.0j)
>>> gammainc(-3, 0, 5)
+inf
If `z` is an integer, the recurrence reduces the incomplete gamma
function to `P(a) \exp(-a) + Q(b) \exp(-b)` where `P` and
`Q` are polynomials::
>>> gammainc(1, 2); exp(-2)
0.1353352832366126918939995
0.1353352832366126918939995
>>> mp.dps = 50
>>> identify(gammainc(6, 1, 2), ['exp(-1)', 'exp(-2)'])
'(326*exp(-1) + (-872)*exp(-2))'
The incomplete gamma functions reduce to functions such as
the exponential integral Ei and the error function for special
arguments::
>>> mp.dps = 25
>>> gammainc(0, 4); -ei(-4)
0.00377935240984890647887486
0.00377935240984890647887486
>>> gammainc(0.5, 0, 2); sqrt(pi)*erf(sqrt(2))
1.691806732945198336509541
1.691806732945198336509541
"""
erf = r"""
Computes the error function, `\mathrm{erf}(x)`. The error
function is the normalized antiderivative of the Gaussian function
`\exp(-t^2)`. More precisely,
.. math::
\mathrm{erf}(x) = \frac{2}{\sqrt \pi} \int_0^x \exp(-t^2) \,dt
**Basic examples**
Simple values and limits include::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> erf(0)
0.0
>>> erf(1)
0.842700792949715
>>> erf(-1)
-0.842700792949715
>>> erf(inf)
1.0
>>> erf(-inf)
-1.0
For large real `x`, `\mathrm{erf}(x)` approaches 1 very
rapidly::
>>> erf(3)
0.999977909503001
>>> erf(5)
0.999999999998463
The error function is an odd function::
>>> nprint(chop(taylor(erf, 0, 5)))
[0.0, 1.12838, 0.0, -0.376126, 0.0, 0.112838]
:func:`~mpmath.erf` implements arbitrary-precision evaluation and
supports complex numbers::
>>> mp.dps = 50
>>> erf(0.5)
0.52049987781304653768274665389196452873645157575796
>>> mp.dps = 25
>>> erf(1+j)
(1.316151281697947644880271 + 0.1904534692378346862841089j)
Evaluation is supported for large arguments::
>>> mp.dps = 25
>>> erf('1e1000')
1.0
>>> erf('-1e1000')
-1.0
>>> erf('1e-1000')
1.128379167095512573896159e-1000
>>> erf('1e7j')
(0.0 + 8.593897639029319267398803e+43429448190317j)
>>> erf('1e7+1e7j')
(0.9999999858172446172631323 + 3.728805278735270407053139e-8j)
**Related functions**
See also :func:`~mpmath.erfc`, which is more accurate for large `x`,
and :func:`~mpmath.erfi` which gives the antiderivative of
`\exp(t^2)`.
The Fresnel integrals :func:`~mpmath.fresnels` and :func:`~mpmath.fresnelc`
are also related to the error function.
"""
erfc = r"""
Computes the complementary error function,
`\mathrm{erfc}(x) = 1-\mathrm{erf}(x)`.
This function avoids cancellation that occurs when naively
computing the complementary error function as ``1-erf(x)``::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> 1 - erf(10)
0.0
>>> erfc(10)
2.08848758376254e-45
:func:`~mpmath.erfc` works accurately even for ludicrously large
arguments::
>>> erfc(10**10)
4.3504398860243e-43429448190325182776
Complex arguments are supported::
>>> erfc(500+50j)
(1.19739830969552e-107492 + 1.46072418957528e-107491j)
"""
erfi = r"""
Computes the imaginary error function, `\mathrm{erfi}(x)`.
The imaginary error function is defined in analogy with the
error function, but with a positive sign in the integrand:
.. math ::
\mathrm{erfi}(x) = \frac{2}{\sqrt \pi} \int_0^x \exp(t^2) \,dt
Whereas the error function rapidly converges to 1 as `x` grows,
the imaginary error function rapidly diverges to infinity.
The functions are related as
`\mathrm{erfi}(x) = -i\,\mathrm{erf}(ix)` for all complex
numbers `x`.
**Examples**
Basic values and limits::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> erfi(0)
0.0
>>> erfi(1)
1.65042575879754
>>> erfi(-1)
-1.65042575879754
>>> erfi(inf)
+inf
>>> erfi(-inf)
-inf
Note the symmetry between erf and erfi::
>>> erfi(3j)
(0.0 + 0.999977909503001j)
>>> erf(3)
0.999977909503001
>>> erf(1+2j)
(-0.536643565778565 - 5.04914370344703j)
>>> erfi(2+1j)
(-5.04914370344703 - 0.536643565778565j)
Large arguments are supported::
>>> erfi(1000)
1.71130938718796e+434291
>>> erfi(10**10)
7.3167287567024e+43429448190325182754
>>> erfi(-10**10)
-7.3167287567024e+43429448190325182754
>>> erfi(1000-500j)
(2.49895233563961e+325717 + 2.6846779342253e+325717j)
>>> erfi(100000j)
(0.0 + 1.0j)
>>> erfi(-100000j)
(0.0 - 1.0j)
"""
erfinv = r"""
Computes the inverse error function, satisfying
.. math ::
\mathrm{erf}(\mathrm{erfinv}(x)) =
\mathrm{erfinv}(\mathrm{erf}(x)) = x.
This function is defined only for `-1 \le x \le 1`.
**Examples**
Special values include::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> erfinv(0)
0.0
>>> erfinv(1)
+inf
>>> erfinv(-1)
-inf
The domain is limited to the standard interval::
>>> erfinv(2)
Traceback (most recent call last):
...
ValueError: erfinv(x) is defined only for -1 <= x <= 1
It is simple to check that :func:`~mpmath.erfinv` computes inverse values of
:func:`~mpmath.erf` as promised::
>>> erf(erfinv(0.75))
0.75
>>> erf(erfinv(-0.995))
-0.995
:func:`~mpmath.erfinv` supports arbitrary-precision evaluation::
>>> mp.dps = 50
>>> x = erf(2)
>>> x
0.99532226501895273416206925636725292861089179704006
>>> erfinv(x)
2.0
A definite integral involving the inverse error function::
>>> mp.dps = 15
>>> quad(erfinv, [0, 1])
0.564189583547756
>>> 1/sqrt(pi)
0.564189583547756
The inverse error function can be used to generate random numbers
with a Gaussian distribution (although this is a relatively
inefficient algorithm)::
>>> nprint([erfinv(2*rand()-1) for n in range(6)]) # doctest: +SKIP
[-0.586747, 1.10233, -0.376796, 0.926037, -0.708142, -0.732012]
"""
npdf = r"""
``npdf(x, mu=0, sigma=1)`` evaluates the probability density
function of a normal distribution with mean value `\mu`
and variance `\sigma^2`.
Elementary properties of the probability distribution can
be verified using numerical integration::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> quad(npdf, [-inf, inf])
1.0
>>> quad(lambda x: npdf(x, 3), [3, inf])
0.5
>>> quad(lambda x: npdf(x, 3, 2), [3, inf])
0.5
See also :func:`~mpmath.ncdf`, which gives the cumulative
distribution.
"""
ncdf = r"""
``ncdf(x, mu=0, sigma=1)`` evaluates the cumulative distribution
function of a normal distribution with mean value `\mu`
and variance `\sigma^2`.
See also :func:`~mpmath.npdf`, which gives the probability density.
Elementary properties include::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> ncdf(pi, mu=pi)
0.5
>>> ncdf(-inf)
0.0
>>> ncdf(+inf)
1.0
The cumulative distribution is the integral of the density
function having identical mu and sigma::
>>> mp.dps = 15
>>> diff(ncdf, 2)
0.053990966513188
>>> npdf(2)
0.053990966513188
>>> diff(lambda x: ncdf(x, 1, 0.5), 0)
0.107981933026376
>>> npdf(0, 1, 0.5)
0.107981933026376
"""
expint = r"""
:func:`~mpmath.expint(n,z)` gives the generalized exponential integral
or En-function,
.. math ::
\mathrm{E}_n(z) = \int_1^{\infty} \frac{e^{-zt}}{t^n} dt,
where `n` and `z` may both be complex numbers. The case with `n = 1` is
also given by :func:`~mpmath.e1`.
**Examples**
Evaluation at real and complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> expint(1, 6.25)
0.0002704758872637179088496194
>>> expint(-3, 2+3j)
(0.00299658467335472929656159 + 0.06100816202125885450319632j)
>>> expint(2+3j, 4-5j)
(0.001803529474663565056945248 - 0.002235061547756185403349091j)
At negative integer values of `n`, `E_n(z)` reduces to a
rational-exponential function::
>>> f = lambda n, z: fac(n)*sum(z**k/fac(k-1) for k in range(1,n+2))/\
... exp(z)/z**(n+2)
>>> n = 3
>>> z = 1/pi
>>> expint(-n,z)
584.2604820613019908668219
>>> f(n,z)
584.2604820613019908668219
>>> n = 5
>>> expint(-n,z)
115366.5762594725451811138
>>> f(n,z)
115366.5762594725451811138
"""
e1 = r"""
Computes the exponential integral `\mathrm{E}_1(z)`, given by
.. math ::
\mathrm{E}_1(z) = \int_z^{\infty} \frac{e^{-t}}{t} dt.
This is equivalent to :func:`~mpmath.expint` with `n = 1`.
**Examples**
Two ways to evaluate this function::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> e1(6.25)
0.0002704758872637179088496194
>>> expint(1,6.25)
0.0002704758872637179088496194
The E1-function is essentially the same as the Ei-function (:func:`~mpmath.ei`)
with negated argument, except for an imaginary branch cut term::
>>> e1(2.5)
0.02491491787026973549562801
>>> -ei(-2.5)
0.02491491787026973549562801
>>> e1(-2.5)
(-7.073765894578600711923552 - 3.141592653589793238462643j)
>>> -ei(2.5)
-7.073765894578600711923552
"""
ei = r"""
Computes the exponential integral or Ei-function, `\mathrm{Ei}(x)`.
The exponential integral is defined as
.. math ::
\mathrm{Ei}(x) = \int_{-\infty\,}^x \frac{e^t}{t} \, dt.
When the integration range includes `t = 0`, the exponential
integral is interpreted as providing the Cauchy principal value.
For real `x`, the Ei-function behaves roughly like
`\mathrm{Ei}(x) \approx \exp(x) + \log(|x|)`.
The Ei-function is related to the more general family of exponential
integral functions denoted by `E_n`, which are available as :func:`~mpmath.expint`.
**Basic examples**
Some basic values and limits are::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> ei(0)
-inf
>>> ei(1)
1.89511781635594
>>> ei(inf)
+inf
>>> ei(-inf)
0.0
For `x < 0`, the defining integral can be evaluated
numerically as a reference::
>>> ei(-4)
-0.00377935240984891
>>> quad(lambda t: exp(t)/t, [-inf, -4])
-0.00377935240984891
:func:`~mpmath.ei` supports complex arguments and arbitrary
precision evaluation::
>>> mp.dps = 50
>>> ei(pi)
10.928374389331410348638445906907535171566338835056
>>> mp.dps = 25
>>> ei(3+4j)
(-4.154091651642689822535359 + 4.294418620024357476985535j)
**Related functions**
The exponential integral is closely related to the logarithmic
integral. See :func:`~mpmath.li` for additional information.
The exponential integral is related to the hyperbolic
and trigonometric integrals (see :func:`~mpmath.chi`, :func:`~mpmath.shi`,
:func:`~mpmath.ci`, :func:`~mpmath.si`) similarly to how the ordinary
exponential function is related to the hyperbolic and
trigonometric functions::
>>> mp.dps = 15
>>> ei(3)
9.93383257062542
>>> chi(3) + shi(3)
9.93383257062542
>>> chop(ci(3j) - j*si(3j) - pi*j/2)
9.93383257062542
Beware that logarithmic corrections, as in the last example
above, are required to obtain the correct branch in general.
For details, see [1].
The exponential integral is also a special case of the
hypergeometric function `\,_2F_2`::
>>> z = 0.6
>>> z*hyper([1,1],[2,2],z) + (ln(z)-ln(1/z))/2 + euler
0.769881289937359
>>> ei(z)
0.769881289937359
**References**
1. Relations between Ei and other functions:
http://functions.wolfram.com/GammaBetaErf/ExpIntegralEi/27/01/
2. Abramowitz & Stegun, section 5:
http://people.math.sfu.ca/~cbm/aands/page_228.htm
3. Asymptotic expansion for Ei:
http://mathworld.wolfram.com/En-Function.html
"""
li = r"""
Computes the logarithmic integral or li-function
`\mathrm{li}(x)`, defined by
.. math ::
\mathrm{li}(x) = \int_0^x \frac{1}{\log t} \, dt
The logarithmic integral has a singularity at `x = 1`.
Alternatively, ``li(x, offset=True)`` computes the offset
logarithmic integral (used in number theory)
.. math ::
\mathrm{Li}(x) = \int_2^x \frac{1}{\log t} \, dt.
These two functions are related via the simple identity
`\mathrm{Li}(x) = \mathrm{li}(x) - \mathrm{li}(2)`.
The logarithmic integral should also not be confused with
the polylogarithm (also denoted by Li), which is implemented
as :func:`~mpmath.polylog`.
**Examples**
Some basic values and limits::
>>> from mpmath import *
>>> mp.dps = 30; mp.pretty = True
>>> li(0)
0.0
>>> li(1)
-inf
>>> li(1)
-inf
>>> li(2)
1.04516378011749278484458888919
>>> findroot(li, 2)
1.45136923488338105028396848589
>>> li(inf)
+inf
>>> li(2, offset=True)
0.0
>>> li(1, offset=True)
-inf
>>> li(0, offset=True)
-1.04516378011749278484458888919
>>> li(10, offset=True)
5.12043572466980515267839286347
The logarithmic integral can be evaluated for arbitrary
complex arguments::
>>> mp.dps = 20
>>> li(3+4j)
(3.1343755504645775265 + 2.6769247817778742392j)
The logarithmic integral is related to the exponential integral::
>>> ei(log(3))
2.1635885946671919729
>>> li(3)
2.1635885946671919729
The logarithmic integral grows like `O(x/\log(x))`::
>>> mp.dps = 15
>>> x = 10**100
>>> x/log(x)
4.34294481903252e+97
>>> li(x)
4.3619719871407e+97
The prime number theorem states that the number of primes less
than `x` is asymptotic to `\mathrm{Li}(x)` (equivalently
`\mathrm{li}(x)`). For example, it is known that there are
exactly 1,925,320,391,606,803,968,923 prime numbers less than
`10^{23}` [1]. The logarithmic integral provides a very
accurate estimate::
>>> li(10**23, offset=True)
1.92532039161405e+21
A definite integral is::
>>> quad(li, [0, 1])
-0.693147180559945
>>> -ln(2)
-0.693147180559945
**References**
1. http://mathworld.wolfram.com/PrimeCountingFunction.html
2. http://mathworld.wolfram.com/LogarithmicIntegral.html
"""
ci = r"""
Computes the cosine integral,
.. math ::
\mathrm{Ci}(x) = -\int_x^{\infty} \frac{\cos t}{t}\,dt
= \gamma + \log x + \int_0^x \frac{\cos t - 1}{t}\,dt
**Examples**
Some values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> ci(0)
-inf
>>> ci(1)
0.3374039229009681346626462
>>> ci(pi)
0.07366791204642548599010096
>>> ci(inf)
0.0
>>> ci(-inf)
(0.0 + 3.141592653589793238462643j)
>>> ci(2+3j)
(1.408292501520849518759125 - 2.983617742029605093121118j)
The cosine integral behaves roughly like the sinc function
(see :func:`~mpmath.sinc`) for large real `x`::
>>> ci(10**10)
-4.875060251748226537857298e-11
>>> sinc(10**10)
-4.875060250875106915277943e-11
>>> chop(limit(ci, inf))
0.0
It has infinitely many roots on the positive real axis::
>>> findroot(ci, 1)
0.6165054856207162337971104
>>> findroot(ci, 2)
3.384180422551186426397851
Evaluation is supported for `z` anywhere in the complex plane::
>>> ci(10**6*(1+j))
(4.449410587611035724984376e+434287 + 9.75744874290013526417059e+434287j)
We can evaluate the defining integral as a reference::
>>> mp.dps = 15
>>> -quadosc(lambda t: cos(t)/t, [5, inf], omega=1)
-0.190029749656644
>>> ci(5)
-0.190029749656644
Some infinite series can be evaluated using the
cosine integral::
>>> nsum(lambda k: (-1)**k/(fac(2*k)*(2*k)), [1,inf])
-0.239811742000565
>>> ci(1) - euler
-0.239811742000565
"""
si = r"""
Computes the sine integral,
.. math ::
\mathrm{Si}(x) = \int_0^x \frac{\sin t}{t}\,dt.
The sine integral is thus the antiderivative of the sinc
function (see :func:`~mpmath.sinc`).
**Examples**
Some values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> si(0)
0.0
>>> si(1)
0.9460830703671830149413533
>>> si(-1)
-0.9460830703671830149413533
>>> si(pi)
1.851937051982466170361053
>>> si(inf)
1.570796326794896619231322
>>> si(-inf)
-1.570796326794896619231322
>>> si(2+3j)
(4.547513889562289219853204 + 1.399196580646054789459839j)
The sine integral approaches `\pi/2` for large real `x`::
>>> si(10**10)
1.570796326707584656968511
>>> pi/2
1.570796326794896619231322
Evaluation is supported for `z` anywhere in the complex plane::
>>> si(10**6*(1+j))
(-9.75744874290013526417059e+434287 + 4.449410587611035724984376e+434287j)
We can evaluate the defining integral as a reference::
>>> mp.dps = 15
>>> quad(sinc, [0, 5])
1.54993124494467
>>> si(5)
1.54993124494467
Some infinite series can be evaluated using the
sine integral::
>>> nsum(lambda k: (-1)**k/(fac(2*k+1)*(2*k+1)), [0,inf])
0.946083070367183
>>> si(1)
0.946083070367183
"""
chi = r"""
Computes the hyperbolic cosine integral, defined
in analogy with the cosine integral (see :func:`~mpmath.ci`) as
.. math ::
\mathrm{Chi}(x) = -\int_x^{\infty} \frac{\cosh t}{t}\,dt
= \gamma + \log x + \int_0^x \frac{\cosh t - 1}{t}\,dt
Some values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> chi(0)
-inf
>>> chi(1)
0.8378669409802082408946786
>>> chi(inf)
+inf
>>> findroot(chi, 0.5)
0.5238225713898644064509583
>>> chi(2+3j)
(-0.1683628683277204662429321 + 2.625115880451325002151688j)
Evaluation is supported for `z` anywhere in the complex plane::
>>> chi(10**6*(1+j))
(4.449410587611035724984376e+434287 - 9.75744874290013526417059e+434287j)
"""
shi = r"""
Computes the hyperbolic sine integral, defined
in analogy with the sine integral (see :func:`~mpmath.si`) as
.. math ::
\mathrm{Shi}(x) = \int_0^x \frac{\sinh t}{t}\,dt.
Some values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> shi(0)
0.0
>>> shi(1)
1.057250875375728514571842
>>> shi(-1)
-1.057250875375728514571842
>>> shi(inf)
+inf
>>> shi(2+3j)
(-0.1931890762719198291678095 + 2.645432555362369624818525j)
Evaluation is supported for `z` anywhere in the complex plane::
>>> shi(10**6*(1+j))
(4.449410587611035724984376e+434287 - 9.75744874290013526417059e+434287j)
"""
fresnels = r"""
Computes the Fresnel sine integral
.. math ::
S(x) = \int_0^x \sin\left(\frac{\pi t^2}{2}\right) \,dt
Note that some sources define this function
without the normalization factor `\pi/2`.
**Examples**
Some basic values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> fresnels(0)
0.0
>>> fresnels(inf)
0.5
>>> fresnels(-inf)
-0.5
>>> fresnels(1)
0.4382591473903547660767567
>>> fresnels(1+2j)
(36.72546488399143842838788 + 15.58775110440458732748279j)
Comparing with the definition::
>>> fresnels(3)
0.4963129989673750360976123
>>> quad(lambda t: sin(pi*t**2/2), [0,3])
0.4963129989673750360976123
"""
fresnelc = r"""
Computes the Fresnel cosine integral
.. math ::
C(x) = \int_0^x \cos\left(\frac{\pi t^2}{2}\right) \,dt
Note that some sources define this function
without the normalization factor `\pi/2`.
**Examples**
Some basic values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> fresnelc(0)
0.0
>>> fresnelc(inf)
0.5
>>> fresnelc(-inf)
-0.5
>>> fresnelc(1)
0.7798934003768228294742064
>>> fresnelc(1+2j)
(16.08787137412548041729489 - 36.22568799288165021578758j)
Comparing with the definition::
>>> fresnelc(3)
0.6057207892976856295561611
>>> quad(lambda t: cos(pi*t**2/2), [0,3])
0.6057207892976856295561611
"""
airyai = r"""
Computes the Airy function `\operatorname{Ai}(z)`, which is
the solution of the Airy differential equation `f''(z) - z f(z) = 0`
with initial conditions
.. math ::
\operatorname{Ai}(0) =
\frac{1}{3^{2/3}\Gamma\left(\frac{2}{3}\right)}
\operatorname{Ai}'(0) =
-\frac{1}{3^{1/3}\Gamma\left(\frac{1}{3}\right)}.
Other common ways of defining the Ai-function include
integrals such as
.. math ::
\operatorname{Ai}(x) = \frac{1}{\pi}
\int_0^{\infty} \cos\left(\frac{1}{3}t^3+xt\right) dt
\qquad x \in \mathbb{R}
\operatorname{Ai}(z) = \frac{\sqrt{3}}{2\pi}
\int_0^{\infty}
\exp\left(-\frac{t^3}{3}-\frac{z^3}{3t^3}\right) dt.
The Ai-function is an entire function with a turning point,
behaving roughly like a slowly decaying sine wave for `z < 0` and
like a rapidly decreasing exponential for `z > 0`.
A second solution of the Airy differential equation
is given by `\operatorname{Bi}(z)` (see :func:`~mpmath.airybi`).
Optionally, with *derivative=alpha*, :func:`airyai` can compute the
`\alpha`-th order fractional derivative with respect to `z`.
For `\alpha = n = 1,2,3,\ldots` this gives the derivative
`\operatorname{Ai}^{(n)}(z)`, and for `\alpha = -n = -1,-2,-3,\ldots`
this gives the `n`-fold iterated integral
.. math ::
f_0(z) = \operatorname{Ai}(z)
f_n(z) = \int_0^z f_{n-1}(t) dt.
The Ai-function has infinitely many zeros, all located along the
negative half of the real axis. They can be computed with
:func:`~mpmath.airyaizero`.
**Plots**
.. literalinclude :: /plots/ai.py
.. image :: /plots/ai.png
.. literalinclude :: /plots/ai_c.py
.. image :: /plots/ai_c.png
**Basic examples**
Limits and values include::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> airyai(0); 1/(power(3,'2/3')*gamma('2/3'))
0.3550280538878172392600632
0.3550280538878172392600632
>>> airyai(1)
0.1352924163128814155241474
>>> airyai(-1)
0.5355608832923521187995166
>>> airyai(inf); airyai(-inf)
0.0
0.0
Evaluation is supported for large magnitudes of the argument::
>>> airyai(-100)
0.1767533932395528780908311
>>> airyai(100)
2.634482152088184489550553e-291
>>> airyai(50+50j)
(-5.31790195707456404099817e-68 - 1.163588003770709748720107e-67j)
>>> airyai(-50+50j)
(1.041242537363167632587245e+158 + 3.347525544923600321838281e+157j)
Huge arguments are also fine::
>>> airyai(10**10)
1.162235978298741779953693e-289529654602171
>>> airyai(-10**10)
0.0001736206448152818510510181
>>> w = airyai(10**10*(1+j))
>>> w.real
5.711508683721355528322567e-186339621747698
>>> w.imag
1.867245506962312577848166e-186339621747697
The first root of the Ai-function is::
>>> findroot(airyai, -2)
-2.338107410459767038489197
>>> airyaizero(1)
-2.338107410459767038489197
**Properties and relations**
Verifying the Airy differential equation::
>>> for z in [-3.4, 0, 2.5, 1+2j]:
... chop(airyai(z,2) - z*airyai(z))
...
0.0
0.0
0.0
0.0
The first few terms of the Taylor series expansion around `z = 0`
(every third term is zero)::
>>> nprint(taylor(airyai, 0, 5))
[0.355028, -0.258819, 0.0, 0.0591713, -0.0215683, 0.0]
The Airy functions satisfy the Wronskian relation
`\operatorname{Ai}(z) \operatorname{Bi}'(z) -
\operatorname{Ai}'(z) \operatorname{Bi}(z) = 1/\pi`::
>>> z = -0.5
>>> airyai(z)*airybi(z,1) - airyai(z,1)*airybi(z)
0.3183098861837906715377675
>>> 1/pi
0.3183098861837906715377675
The Airy functions can be expressed in terms of Bessel
functions of order `\pm 1/3`. For `\Re[z] \le 0`, we have::
>>> z = -3
>>> airyai(z)
-0.3788142936776580743472439
>>> y = 2*power(-z,'3/2')/3
>>> (sqrt(-z) * (besselj('1/3',y) + besselj('-1/3',y)))/3
-0.3788142936776580743472439
**Derivatives and integrals**
Derivatives of the Ai-function (directly and using :func:`~mpmath.diff`)::
>>> airyai(-3,1); diff(airyai,-3)
0.3145837692165988136507873
0.3145837692165988136507873
>>> airyai(-3,2); diff(airyai,-3,2)
1.136442881032974223041732
1.136442881032974223041732
>>> airyai(1000,1); diff(airyai,1000)
-2.943133917910336090459748e-9156
-2.943133917910336090459748e-9156
Several derivatives at `z = 0`::
>>> airyai(0,0); airyai(0,1); airyai(0,2)
0.3550280538878172392600632
-0.2588194037928067984051836
0.0
>>> airyai(0,3); airyai(0,4); airyai(0,5)
0.3550280538878172392600632
-0.5176388075856135968103671
0.0
>>> airyai(0,15); airyai(0,16); airyai(0,17)
1292.30211615165475090663
-3188.655054727379756351861
0.0
The integral of the Ai-function::
>>> airyai(3,-1); quad(airyai, [0,3])
0.3299203760070217725002701
0.3299203760070217725002701
>>> airyai(-10,-1); quad(airyai, [0,-10])
-0.765698403134212917425148
-0.765698403134212917425148
Integrals of high or fractional order::
>>> airyai(-2,0.5); differint(airyai,-2,0.5,0)
(0.0 + 0.2453596101351438273844725j)
(0.0 + 0.2453596101351438273844725j)
>>> airyai(-2,-4); differint(airyai,-2,-4,0)
0.2939176441636809580339365
0.2939176441636809580339365
>>> airyai(0,-1); airyai(0,-2); airyai(0,-3)
0.0
0.0
0.0
Integrals of the Ai-function can be evaluated at limit points::
>>> airyai(-1000000,-1); airyai(-inf,-1)
-0.6666843728311539978751512
-0.6666666666666666666666667
>>> airyai(10,-1); airyai(+inf,-1)
0.3333333332991690159427932
0.3333333333333333333333333
>>> airyai(+inf,-2); airyai(+inf,-3)
+inf
+inf
>>> airyai(-1000000,-2); airyai(-inf,-2)
666666.4078472650651209742
+inf
>>> airyai(-1000000,-3); airyai(-inf,-3)
-333333074513.7520264995733
-inf
**References**
1. [DLMF]_ Chapter 9: Airy and Related Functions
2. [WolframFunctions]_ section: Bessel-Type Functions
"""
airybi = r"""
Computes the Airy function `\operatorname{Bi}(z)`, which is
the solution of the Airy differential equation `f''(z) - z f(z) = 0`
with initial conditions
.. math ::
\operatorname{Bi}(0) =
\frac{1}{3^{1/6}\Gamma\left(\frac{2}{3}\right)}
\operatorname{Bi}'(0) =
\frac{3^{1/6}}{\Gamma\left(\frac{1}{3}\right)}.
Like the Ai-function (see :func:`~mpmath.airyai`), the Bi-function
is oscillatory for `z < 0`, but it grows rather than decreases
for `z > 0`.
Optionally, as for :func:`~mpmath.airyai`, derivatives, integrals
and fractional derivatives can be computed with the *derivative*
parameter.
The Bi-function has infinitely many zeros along the negative
half-axis, as well as complex zeros, which can all be computed
with :func:`~mpmath.airybizero`.
**Plots**
.. literalinclude :: /plots/bi.py
.. image :: /plots/bi.png
.. literalinclude :: /plots/bi_c.py
.. image :: /plots/bi_c.png
**Basic examples**
Limits and values include::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> airybi(0); 1/(power(3,'1/6')*gamma('2/3'))
0.6149266274460007351509224
0.6149266274460007351509224
>>> airybi(1)
1.207423594952871259436379
>>> airybi(-1)
0.10399738949694461188869
>>> airybi(inf); airybi(-inf)
+inf
0.0
Evaluation is supported for large magnitudes of the argument::
>>> airybi(-100)
0.02427388768016013160566747
>>> airybi(100)
6.041223996670201399005265e+288
>>> airybi(50+50j)
(-5.322076267321435669290334e+63 + 1.478450291165243789749427e+65j)
>>> airybi(-50+50j)
(-3.347525544923600321838281e+157 + 1.041242537363167632587245e+158j)
Huge arguments::
>>> airybi(10**10)
1.369385787943539818688433e+289529654602165
>>> airybi(-10**10)
0.001775656141692932747610973
>>> w = airybi(10**10*(1+j))
>>> w.real
-6.559955931096196875845858e+186339621747689
>>> w.imag
-6.822462726981357180929024e+186339621747690
The first real root of the Bi-function is::
>>> findroot(airybi, -1); airybizero(1)
-1.17371322270912792491998
-1.17371322270912792491998
**Properties and relations**
Verifying the Airy differential equation::
>>> for z in [-3.4, 0, 2.5, 1+2j]:
... chop(airybi(z,2) - z*airybi(z))
...
0.0
0.0
0.0
0.0
The first few terms of the Taylor series expansion around `z = 0`
(every third term is zero)::
>>> nprint(taylor(airybi, 0, 5))
[0.614927, 0.448288, 0.0, 0.102488, 0.0373574, 0.0]
The Airy functions can be expressed in terms of Bessel
functions of order `\pm 1/3`. For `\Re[z] \le 0`, we have::
>>> z = -3
>>> airybi(z)
-0.1982896263749265432206449
>>> p = 2*power(-z,'3/2')/3
>>> sqrt(-mpf(z)/3)*(besselj('-1/3',p) - besselj('1/3',p))
-0.1982896263749265432206449
**Derivatives and integrals**
Derivatives of the Bi-function (directly and using :func:`~mpmath.diff`)::
>>> airybi(-3,1); diff(airybi,-3)
-0.675611222685258537668032
-0.675611222685258537668032
>>> airybi(-3,2); diff(airybi,-3,2)
0.5948688791247796296619346
0.5948688791247796296619346
>>> airybi(1000,1); diff(airybi,1000)
1.710055114624614989262335e+9156
1.710055114624614989262335e+9156
Several derivatives at `z = 0`::
>>> airybi(0,0); airybi(0,1); airybi(0,2)
0.6149266274460007351509224
0.4482883573538263579148237
0.0
>>> airybi(0,3); airybi(0,4); airybi(0,5)
0.6149266274460007351509224
0.8965767147076527158296474
0.0
>>> airybi(0,15); airybi(0,16); airybi(0,17)
2238.332923903442675949357
5522.912562599140729510628
0.0
The integral of the Bi-function::
>>> airybi(3,-1); quad(airybi, [0,3])
10.06200303130620056316655
10.06200303130620056316655
>>> airybi(-10,-1); quad(airybi, [0,-10])
-0.01504042480614002045135483
-0.01504042480614002045135483
Integrals of high or fractional order::
>>> airybi(-2,0.5); differint(airybi, -2, 0.5, 0)
(0.0 + 0.5019859055341699223453257j)
(0.0 + 0.5019859055341699223453257j)
>>> airybi(-2,-4); differint(airybi,-2,-4,0)
0.2809314599922447252139092
0.2809314599922447252139092
>>> airybi(0,-1); airybi(0,-2); airybi(0,-3)
0.0
0.0
0.0
Integrals of the Bi-function can be evaluated at limit points::
>>> airybi(-1000000,-1); airybi(-inf,-1)
0.000002191261128063434047966873
0.0
>>> airybi(10,-1); airybi(+inf,-1)
147809803.1074067161675853
+inf
>>> airybi(+inf,-2); airybi(+inf,-3)
+inf
+inf
>>> airybi(-1000000,-2); airybi(-inf,-2)
0.4482883750599908479851085
0.4482883573538263579148237
>>> gamma('2/3')*power(3,'2/3')/(2*pi)
0.4482883573538263579148237
>>> airybi(-100000,-3); airybi(-inf,-3)
-44828.52827206932872493133
-inf
>>> airybi(-100000,-4); airybi(-inf,-4)
2241411040.437759489540248
+inf
"""
airyaizero = r"""
Gives the `k`-th zero of the Airy Ai-function,
i.e. the `k`-th number `a_k` ordered by magnitude for which
`\operatorname{Ai}(a_k) = 0`.
Optionally, with *derivative=1*, the corresponding
zero `a'_k` of the derivative function, i.e.
`\operatorname{Ai}'(a'_k) = 0`, is computed.
**Examples**
Some values of `a_k`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> airyaizero(1)
-2.338107410459767038489197
>>> airyaizero(2)
-4.087949444130970616636989
>>> airyaizero(3)
-5.520559828095551059129856
>>> airyaizero(1000)
-281.0315196125215528353364
Some values of `a'_k`::
>>> airyaizero(1,1)
-1.018792971647471089017325
>>> airyaizero(2,1)
-3.248197582179836537875424
>>> airyaizero(3,1)
-4.820099211178735639400616
>>> airyaizero(1000,1)
-280.9378080358935070607097
Verification::
>>> chop(airyai(airyaizero(1)))
0.0
>>> chop(airyai(airyaizero(1,1),1))
0.0
"""
airybizero = r"""
With *complex=False*, gives the `k`-th real zero of the Airy Bi-function,
i.e. the `k`-th number `b_k` ordered by magnitude for which
`\operatorname{Bi}(b_k) = 0`.
With *complex=True*, gives the `k`-th complex zero in the upper
half plane `\beta_k`. Also the conjugate `\overline{\beta_k}`
is a zero.
Optionally, with *derivative=1*, the corresponding
zero `b'_k` or `\beta'_k` of the derivative function, i.e.
`\operatorname{Bi}'(b'_k) = 0` or `\operatorname{Bi}'(\beta'_k) = 0`,
is computed.
**Examples**
Some values of `b_k`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> airybizero(1)
-1.17371322270912792491998
>>> airybizero(2)
-3.271093302836352715680228
>>> airybizero(3)
-4.830737841662015932667709
>>> airybizero(1000)
-280.9378112034152401578834
Some values of `b_k`::
>>> airybizero(1,1)
-2.294439682614123246622459
>>> airybizero(2,1)
-4.073155089071828215552369
>>> airybizero(3,1)
-5.512395729663599496259593
>>> airybizero(1000,1)
-281.0315164471118527161362
Some values of `\beta_k`::
>>> airybizero(1,complex=True)
(0.9775448867316206859469927 + 2.141290706038744575749139j)
>>> airybizero(2,complex=True)
(1.896775013895336346627217 + 3.627291764358919410440499j)
>>> airybizero(3,complex=True)
(2.633157739354946595708019 + 4.855468179979844983174628j)
>>> airybizero(1000,complex=True)
(140.4978560578493018899793 + 243.3907724215792121244867j)
Some values of `\beta'_k`::
>>> airybizero(1,1,complex=True)
(0.2149470745374305676088329 + 1.100600143302797880647194j)
>>> airybizero(2,1,complex=True)
(1.458168309223507392028211 + 2.912249367458445419235083j)
>>> airybizero(3,1,complex=True)
(2.273760763013482299792362 + 4.254528549217097862167015j)
>>> airybizero(1000,1,complex=True)
(140.4509972835270559730423 + 243.3096175398562811896208j)
Verification::
>>> chop(airybi(airybizero(1)))
0.0
>>> chop(airybi(airybizero(1,1),1))
0.0
>>> u = airybizero(1,complex=True)
>>> chop(airybi(u))
0.0
>>> chop(airybi(conj(u)))
0.0
The complex zeros (in the upper and lower half-planes respectively)
asymptotically approach the rays `z = R \exp(\pm i \pi /3)`::
>>> arg(airybizero(1,complex=True))
1.142532510286334022305364
>>> arg(airybizero(1000,complex=True))
1.047271114786212061583917
>>> arg(airybizero(1000000,complex=True))
1.047197624741816183341355
>>> pi/3
1.047197551196597746154214
"""
ellipk = r"""
Evaluates the complete elliptic integral of the first kind,
`K(m)`, defined by
.. math ::
K(m) = \int_0^{\pi/2} \frac{dt}{\sqrt{1-m \sin^2 t}} \, = \,
\frac{\pi}{2} \,_2F_1\left(\frac{1}{2}, \frac{1}{2}, 1, m\right).
Note that the argument is the parameter `m = k^2`,
not the modulus `k` which is sometimes used.
**Plots**
.. literalinclude :: /plots/ellipk.py
.. image :: /plots/ellipk.png
**Examples**
Values and limits include::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> ellipk(0)
1.570796326794896619231322
>>> ellipk(inf)
(0.0 + 0.0j)
>>> ellipk(-inf)
0.0
>>> ellipk(1)
+inf
>>> ellipk(-1)
1.31102877714605990523242
>>> ellipk(2)
(1.31102877714605990523242 - 1.31102877714605990523242j)
Verifying the defining integral and hypergeometric
representation::
>>> ellipk(0.5)
1.85407467730137191843385
>>> quad(lambda t: (1-0.5*sin(t)**2)**-0.5, [0, pi/2])
1.85407467730137191843385
>>> pi/2*hyp2f1(0.5,0.5,1,0.5)
1.85407467730137191843385
Evaluation is supported for arbitrary complex `m`::
>>> ellipk(3+4j)
(0.9111955638049650086562171 + 0.6313342832413452438845091j)
A definite integral::
>>> quad(ellipk, [0, 1])
2.0
"""
agm = r"""
``agm(a, b)`` computes the arithmetic-geometric mean of `a` and
`b`, defined as the limit of the following iteration:
.. math ::
a_0 = a
b_0 = b
a_{n+1} = \frac{a_n+b_n}{2}
b_{n+1} = \sqrt{a_n b_n}
This function can be called with a single argument, computing
`\mathrm{agm}(a,1) = \mathrm{agm}(1,a)`.
**Examples**
It is a well-known theorem that the geometric mean of
two distinct positive numbers is less than the arithmetic
mean. It follows that the arithmetic-geometric mean lies
between the two means::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> a = mpf(3)
>>> b = mpf(4)
>>> sqrt(a*b)
3.46410161513775
>>> agm(a,b)
3.48202767635957
>>> (a+b)/2
3.5
The arithmetic-geometric mean is scale-invariant::
>>> agm(10*e, 10*pi)
29.261085515723
>>> 10*agm(e, pi)
29.261085515723
As an order-of-magnitude estimate, `\mathrm{agm}(1,x) \approx x`
for large `x`::
>>> agm(10**10)
643448704.760133
>>> agm(10**50)
1.34814309345871e+48
For tiny `x`, `\mathrm{agm}(1,x) \approx -\pi/(2 \log(x/4))`::
>>> agm('0.01')
0.262166887202249
>>> -pi/2/log('0.0025')
0.262172347753122
The arithmetic-geometric mean can also be computed for complex
numbers::
>>> agm(3, 2+j)
(2.51055133276184 + 0.547394054060638j)
The AGM iteration converges very quickly (each step doubles
the number of correct digits), so :func:`~mpmath.agm` supports efficient
high-precision evaluation::
>>> mp.dps = 10000
>>> a = agm(1,2)
>>> str(a)[-10:]
'1679581912'
**Mathematical relations**
The arithmetic-geometric mean may be used to evaluate the
following two parametric definite integrals:
.. math ::
I_1 = \int_0^{\infty}
\frac{1}{\sqrt{(x^2+a^2)(x^2+b^2)}} \,dx
I_2 = \int_0^{\pi/2}
\frac{1}{\sqrt{a^2 \cos^2(x) + b^2 \sin^2(x)}} \,dx
We have::
>>> mp.dps = 15
>>> a = 3
>>> b = 4
>>> f1 = lambda x: ((x**2+a**2)*(x**2+b**2))**-0.5
>>> f2 = lambda x: ((a*cos(x))**2 + (b*sin(x))**2)**-0.5
>>> quad(f1, [0, inf])
0.451115405388492
>>> quad(f2, [0, pi/2])
0.451115405388492
>>> pi/(2*agm(a,b))
0.451115405388492
A formula for `\Gamma(1/4)`::
>>> gamma(0.25)
3.62560990822191
>>> sqrt(2*sqrt(2*pi**3)/agm(1,sqrt(2)))
3.62560990822191
**Possible issues**
The branch cut chosen for complex `a` and `b` is somewhat
arbitrary.
"""
gegenbauer = r"""
Evaluates the Gegenbauer polynomial, or ultraspherical polynomial,
.. math ::
C_n^{(a)}(z) = {n+2a-1 \choose n} \,_2F_1\left(-n, n+2a;
a+\frac{1}{2}; \frac{1}{2}(1-z)\right).
When `n` is a nonnegative integer, this formula gives a polynomial
in `z` of degree `n`, but all parameters are permitted to be
complex numbers. With `a = 1/2`, the Gegenbauer polynomial
reduces to a Legendre polynomial.
**Examples**
Evaluation for arbitrary arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> gegenbauer(3, 0.5, -10)
-2485.0
>>> gegenbauer(1000, 10, 100)
3.012757178975667428359374e+2322
>>> gegenbauer(2+3j, -0.75, -1000j)
(-5038991.358609026523401901 + 9414549.285447104177860806j)
Evaluation at negative integer orders::
>>> gegenbauer(-4, 2, 1.75)
-1.0
>>> gegenbauer(-4, 3, 1.75)
0.0
>>> gegenbauer(-4, 2j, 1.75)
0.0
>>> gegenbauer(-7, 0.5, 3)
8989.0
The Gegenbauer polynomials solve the differential equation::
>>> n, a = 4.5, 1+2j
>>> f = lambda z: gegenbauer(n, a, z)
>>> for z in [0, 0.75, -0.5j]:
... chop((1-z**2)*diff(f,z,2) - (2*a+1)*z*diff(f,z) + n*(n+2*a)*f(z))
...
0.0
0.0
0.0
The Gegenbauer polynomials have generating function
`(1-2zt+t^2)^{-a}`::
>>> a, z = 2.5, 1
>>> taylor(lambda t: (1-2*z*t+t**2)**(-a), 0, 3)
[1.0, 5.0, 15.0, 35.0]
>>> [gegenbauer(n,a,z) for n in range(4)]
[1.0, 5.0, 15.0, 35.0]
The Gegenbauer polynomials are orthogonal on `[-1, 1]` with respect
to the weight `(1-z^2)^{a-\frac{1}{2}}`::
>>> a, n, m = 2.5, 4, 5
>>> Cn = lambda z: gegenbauer(n, a, z, zeroprec=1000)
>>> Cm = lambda z: gegenbauer(m, a, z, zeroprec=1000)
>>> chop(quad(lambda z: Cn(z)*Cm(z)*(1-z**2)*(a-0.5), [-1, 1]))
0.0
"""
laguerre = r"""
Gives the generalized (associated) Laguerre polynomial, defined by
.. math ::
L_n^a(z) = \frac{\Gamma(n+b+1)}{\Gamma(b+1) \Gamma(n+1)}
\,_1F_1(-n, a+1, z).
With `a = 0` and `n` a nonnegative integer, this reduces to an ordinary
Laguerre polynomial, the sequence of which begins
`L_0(z) = 1, L_1(z) = 1-z, L_2(z) = z^2-2z+1, \ldots`.
The Laguerre polynomials are orthogonal with respect to the weight
`z^a e^{-z}` on `[0, \infty)`.
**Plots**
.. literalinclude :: /plots/laguerre.py
.. image :: /plots/laguerre.png
**Examples**
Evaluation for arbitrary arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> laguerre(5, 0, 0.25)
0.03726399739583333333333333
>>> laguerre(1+j, 0.5, 2+3j)
(4.474921610704496808379097 - 11.02058050372068958069241j)
>>> laguerre(2, 0, 10000)
49980001.0
>>> laguerre(2.5, 0, 10000)
-9.327764910194842158583189e+4328
The first few Laguerre polynomials, normalized to have integer
coefficients::
>>> for n in range(7):
... chop(taylor(lambda z: fac(n)*laguerre(n, 0, z), 0, n))
...
[1.0]
[1.0, -1.0]
[2.0, -4.0, 1.0]
[6.0, -18.0, 9.0, -1.0]
[24.0, -96.0, 72.0, -16.0, 1.0]
[120.0, -600.0, 600.0, -200.0, 25.0, -1.0]
[720.0, -4320.0, 5400.0, -2400.0, 450.0, -36.0, 1.0]
Verifying orthogonality::
>>> Lm = lambda t: laguerre(m,a,t)
>>> Ln = lambda t: laguerre(n,a,t)
>>> a, n, m = 2.5, 2, 3
>>> chop(quad(lambda t: exp(-t)*t**a*Lm(t)*Ln(t), [0,inf]))
0.0
"""
hermite = r"""
Evaluates the Hermite polynomial `H_n(z)`, which may be defined using
the recurrence
.. math ::
H_0(z) = 1
H_1(z) = 2z
H_{n+1} = 2z H_n(z) - 2n H_{n-1}(z).
The Hermite polynomials are orthogonal on `(-\infty, \infty)` with
respect to the weight `e^{-z^2}`. More generally, allowing arbitrary complex
values of `n`, the Hermite function `H_n(z)` is defined as
.. math ::
H_n(z) = (2z)^n \,_2F_0\left(-\frac{n}{2}, \frac{1-n}{2},
-\frac{1}{z^2}\right)
for `\Re{z} > 0`, or generally
.. math ::
H_n(z) = 2^n \sqrt{\pi} \left(
\frac{1}{\Gamma\left(\frac{1-n}{2}\right)}
\,_1F_1\left(-\frac{n}{2}, \frac{1}{2}, z^2\right) -
\frac{2z}{\Gamma\left(-\frac{n}{2}\right)}
\,_1F_1\left(\frac{1-n}{2}, \frac{3}{2}, z^2\right)
\right).
**Plots**
.. literalinclude :: /plots/hermite.py
.. image :: /plots/hermite.png
**Examples**
Evaluation for arbitrary arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hermite(0, 10)
1.0
>>> hermite(1, 10); hermite(2, 10)
20.0
398.0
>>> hermite(10000, 2)
4.950440066552087387515653e+19334
>>> hermite(3, -10**8)
-7999999999999998800000000.0
>>> hermite(-3, -10**8)
1.675159751729877682920301e+4342944819032534
>>> hermite(2+3j, -1+2j)
(-0.07652130602993513389421901 - 0.1084662449961914580276007j)
Coefficients of the first few Hermite polynomials are::
>>> for n in range(7):
... chop(taylor(lambda z: hermite(n, z), 0, n))
...
[1.0]
[0.0, 2.0]
[-2.0, 0.0, 4.0]
[0.0, -12.0, 0.0, 8.0]
[12.0, 0.0, -48.0, 0.0, 16.0]
[0.0, 120.0, 0.0, -160.0, 0.0, 32.0]
[-120.0, 0.0, 720.0, 0.0, -480.0, 0.0, 64.0]
Values at `z = 0`::
>>> for n in range(-5, 9):
... hermite(n, 0)
...
0.02769459142039868792653387
0.08333333333333333333333333
0.2215567313631895034122709
0.5
0.8862269254527580136490837
1.0
0.0
-2.0
0.0
12.0
0.0
-120.0
0.0
1680.0
Hermite functions satisfy the differential equation::
>>> n = 4
>>> f = lambda z: hermite(n, z)
>>> z = 1.5
>>> chop(diff(f,z,2) - 2*z*diff(f,z) + 2*n*f(z))
0.0
Verifying orthogonality::
>>> chop(quad(lambda t: hermite(2,t)*hermite(4,t)*exp(-t**2), [-inf,inf]))
0.0
"""
jacobi = r"""
``jacobi(n, a, b, x)`` evaluates the Jacobi polynomial
`P_n^{(a,b)}(x)`. The Jacobi polynomials are a special
case of the hypergeometric function `\,_2F_1` given by:
.. math ::
P_n^{(a,b)}(x) = {n+a \choose n}
\,_2F_1\left(-n,1+a+b+n,a+1,\frac{1-x}{2}\right).
Note that this definition generalizes to nonintegral values
of `n`. When `n` is an integer, the hypergeometric series
terminates after a finite number of terms, giving
a polynomial in `x`.
**Evaluation of Jacobi polynomials**
A special evaluation is `P_n^{(a,b)}(1) = {n+a \choose n}`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> jacobi(4, 0.5, 0.25, 1)
2.4609375
>>> binomial(4+0.5, 4)
2.4609375
A Jacobi polynomial of degree `n` is equal to its
Taylor polynomial of degree `n`. The explicit
coefficients of Jacobi polynomials can therefore
be recovered easily using :func:`~mpmath.taylor`::
>>> for n in range(5):
... nprint(taylor(lambda x: jacobi(n,1,2,x), 0, n))
...
[1.0]
[-0.5, 2.5]
[-0.75, -1.5, 5.25]
[0.5, -3.5, -3.5, 10.5]
[0.625, 2.5, -11.25, -7.5, 20.625]
For nonintegral `n`, the Jacobi "polynomial" is no longer
a polynomial::
>>> nprint(taylor(lambda x: jacobi(0.5,1,2,x), 0, 4))
[0.309983, 1.84119, -1.26933, 1.26699, -1.34808]
**Orthogonality**
The Jacobi polynomials are orthogonal on the interval
`[-1, 1]` with respect to the weight function
`w(x) = (1-x)^a (1+x)^b`. That is,
`w(x) P_n^{(a,b)}(x) P_m^{(a,b)}(x)` integrates to
zero if `m \ne n` and to a nonzero number if `m = n`.
The orthogonality is easy to verify using numerical
quadrature::
>>> P = jacobi
>>> f = lambda x: (1-x)**a * (1+x)**b * P(m,a,b,x) * P(n,a,b,x)
>>> a = 2
>>> b = 3
>>> m, n = 3, 4
>>> chop(quad(f, [-1, 1]), 1)
0.0
>>> m, n = 4, 4
>>> quad(f, [-1, 1])
1.9047619047619
**Differential equation**
The Jacobi polynomials are solutions of the differential
equation
.. math ::
(1-x^2) y'' + (b-a-(a+b+2)x) y' + n (n+a+b+1) y = 0.
We can verify that :func:`~mpmath.jacobi` approximately satisfies
this equation::
>>> from mpmath import *
>>> mp.dps = 15
>>> a = 2.5
>>> b = 4
>>> n = 3
>>> y = lambda x: jacobi(n,a,b,x)
>>> x = pi
>>> A0 = n*(n+a+b+1)*y(x)
>>> A1 = (b-a-(a+b+2)*x)*diff(y,x)
>>> A2 = (1-x**2)*diff(y,x,2)
>>> nprint(A2 + A1 + A0, 1)
4.0e-12
The difference of order `10^{-12}` is as close to zero as
it could be at 15-digit working precision, since the terms
are large::
>>> A0, A1, A2
(26560.2328981879, -21503.7641037294, -5056.46879445852)
"""
legendre = r"""
``legendre(n, x)`` evaluates the Legendre polynomial `P_n(x)`.
The Legendre polynomials are given by the formula
.. math ::
P_n(x) = \frac{1}{2^n n!} \frac{d^n}{dx^n} (x^2 -1)^n.
Alternatively, they can be computed recursively using
.. math ::
P_0(x) = 1
P_1(x) = x
(n+1) P_{n+1}(x) = (2n+1) x P_n(x) - n P_{n-1}(x).
A third definition is in terms of the hypergeometric function
`\,_2F_1`, whereby they can be generalized to arbitrary `n`:
.. math ::
P_n(x) = \,_2F_1\left(-n, n+1, 1, \frac{1-x}{2}\right)
**Plots**
.. literalinclude :: /plots/legendre.py
.. image :: /plots/legendre.png
**Basic evaluation**
The Legendre polynomials assume fixed values at the points
`x = -1` and `x = 1`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> nprint([legendre(n, 1) for n in range(6)])
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
>>> nprint([legendre(n, -1) for n in range(6)])
[1.0, -1.0, 1.0, -1.0, 1.0, -1.0]
The coefficients of Legendre polynomials can be recovered
using degree-`n` Taylor expansion::
>>> for n in range(5):
... nprint(chop(taylor(lambda x: legendre(n, x), 0, n)))
...
[1.0]
[0.0, 1.0]
[-0.5, 0.0, 1.5]
[0.0, -1.5, 0.0, 2.5]
[0.375, 0.0, -3.75, 0.0, 4.375]
The roots of Legendre polynomials are located symmetrically
on the interval `[-1, 1]`::
>>> for n in range(5):
... nprint(polyroots(taylor(lambda x: legendre(n, x), 0, n)[::-1]))
...
[]
[0.0]
[-0.57735, 0.57735]
[-0.774597, 0.0, 0.774597]
[-0.861136, -0.339981, 0.339981, 0.861136]
An example of an evaluation for arbitrary `n`::
>>> legendre(0.75, 2+4j)
(1.94952805264875 + 2.1071073099422j)
**Orthogonality**
The Legendre polynomials are orthogonal on `[-1, 1]` with respect
to the trivial weight `w(x) = 1`. That is, `P_m(x) P_n(x)`
integrates to zero if `m \ne n` and to `2/(2n+1)` if `m = n`::
>>> m, n = 3, 4
>>> quad(lambda x: legendre(m,x)*legendre(n,x), [-1, 1])
0.0
>>> m, n = 4, 4
>>> quad(lambda x: legendre(m,x)*legendre(n,x), [-1, 1])
0.222222222222222
**Differential equation**
The Legendre polynomials satisfy the differential equation
.. math ::
((1-x^2) y')' + n(n+1) y' = 0.
We can verify this numerically::
>>> n = 3.6
>>> x = 0.73
>>> P = legendre
>>> A = diff(lambda t: (1-t**2)*diff(lambda u: P(n,u), t), x)
>>> B = n*(n+1)*P(n,x)
>>> nprint(A+B,1)
9.0e-16
"""
legenp = r"""
Calculates the (associated) Legendre function of the first kind of
degree *n* and order *m*, `P_n^m(z)`. Taking `m = 0` gives the ordinary
Legendre function of the first kind, `P_n(z)`. The parameters may be
complex numbers.
In terms of the Gauss hypergeometric function, the (associated) Legendre
function is defined as
.. math ::
P_n^m(z) = \frac{1}{\Gamma(1-m)} \frac{(1+z)^{m/2}}{(1-z)^{m/2}}
\,_2F_1\left(-n, n+1, 1-m, \frac{1-z}{2}\right).
With *type=3* instead of *type=2*, the alternative
definition
.. math ::
\hat{P}_n^m(z) = \frac{1}{\Gamma(1-m)} \frac{(z+1)^{m/2}}{(z-1)^{m/2}}
\,_2F_1\left(-n, n+1, 1-m, \frac{1-z}{2}\right).
is used. These functions correspond respectively to ``LegendreP[n,m,2,z]``
and ``LegendreP[n,m,3,z]`` in Mathematica.
The general solution of the (associated) Legendre differential equation
.. math ::
(1-z^2) f''(z) - 2zf'(z) + \left(n(n+1)-\frac{m^2}{1-z^2}\right)f(z) = 0
is given by `C_1 P_n^m(z) + C_2 Q_n^m(z)` for arbitrary constants
`C_1`, `C_2`, where `Q_n^m(z)` is a Legendre function of the
second kind as implemented by :func:`~mpmath.legenq`.
**Examples**
Evaluation for arbitrary parameters and arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> legenp(2, 0, 10); legendre(2, 10)
149.5
149.5
>>> legenp(-2, 0.5, 2.5)
(1.972260393822275434196053 - 1.972260393822275434196053j)
>>> legenp(2+3j, 1-j, -0.5+4j)
(-3.335677248386698208736542 - 5.663270217461022307645625j)
>>> chop(legenp(3, 2, -1.5, type=2))
28.125
>>> chop(legenp(3, 2, -1.5, type=3))
-28.125
Verifying the associated Legendre differential equation::
>>> n, m = 2, -0.5
>>> C1, C2 = 1, -3
>>> f = lambda z: C1*legenp(n,m,z) + C2*legenq(n,m,z)
>>> deq = lambda z: (1-z**2)*diff(f,z,2) - 2*z*diff(f,z) + \
... (n*(n+1)-m**2/(1-z**2))*f(z)
>>> for z in [0, 2, -1.5, 0.5+2j]:
... chop(deq(mpmathify(z)))
...
0.0
0.0
0.0
0.0
"""
legenq = r"""
Calculates the (associated) Legendre function of the second kind of
degree *n* and order *m*, `Q_n^m(z)`. Taking `m = 0` gives the ordinary
Legendre function of the second kind, `Q_n(z)`. The parameters may
complex numbers.
The Legendre functions of the second kind give a second set of
solutions to the (associated) Legendre differential equation.
(See :func:`~mpmath.legenp`.)
Unlike the Legendre functions of the first kind, they are not
polynomials of `z` for integer `n`, `m` but rational or logarithmic
functions with poles at `z = \pm 1`.
There are various ways to define Legendre functions of
the second kind, giving rise to different complex structure.
A version can be selected using the *type* keyword argument.
The *type=2* and *type=3* functions are given respectively by
.. math ::
Q_n^m(z) = \frac{\pi}{2 \sin(\pi m)}
\left( \cos(\pi m) P_n^m(z) -
\frac{\Gamma(1+m+n)}{\Gamma(1-m+n)} P_n^{-m}(z)\right)
\hat{Q}_n^m(z) = \frac{\pi}{2 \sin(\pi m)} e^{\pi i m}
\left( \hat{P}_n^m(z) -
\frac{\Gamma(1+m+n)}{\Gamma(1-m+n)} \hat{P}_n^{-m}(z)\right)
where `P` and `\hat{P}` are the *type=2* and *type=3* Legendre functions
of the first kind. The formulas above should be understood as limits
when `m` is an integer.
These functions correspond to ``LegendreQ[n,m,2,z]`` (or ``LegendreQ[n,m,z]``)
and ``LegendreQ[n,m,3,z]`` in Mathematica. The *type=3* function
is essentially the same as the function defined in
Abramowitz & Stegun (eq. 8.1.3) but with `(z+1)^{m/2}(z-1)^{m/2}` instead
of `(z^2-1)^{m/2}`, giving slightly different branches.
**Examples**
Evaluation for arbitrary parameters and arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> legenq(2, 0, 0.5)
-0.8186632680417568557122028
>>> legenq(-1.5, -2, 2.5)
(0.6655964618250228714288277 + 0.3937692045497259717762649j)
>>> legenq(2-j, 3+4j, -6+5j)
(-10001.95256487468541686564 - 6011.691337610097577791134j)
Different versions of the function::
>>> legenq(2, 1, 0.5)
0.7298060598018049369381857
>>> legenq(2, 1, 1.5)
(-7.902916572420817192300921 + 0.1998650072605976600724502j)
>>> legenq(2, 1, 0.5, type=3)
(2.040524284763495081918338 - 0.7298060598018049369381857j)
>>> chop(legenq(2, 1, 1.5, type=3))
-0.1998650072605976600724502
"""
chebyt = r"""
``chebyt(n, x)`` evaluates the Chebyshev polynomial of the first
kind `T_n(x)`, defined by the identity
.. math ::
T_n(\cos x) = \cos(n x).
The Chebyshev polynomials of the first kind are a special
case of the Jacobi polynomials, and by extension of the
hypergeometric function `\,_2F_1`. They can thus also be
evaluated for nonintegral `n`.
**Plots**
.. literalinclude :: /plots/chebyt.py
.. image :: /plots/chebyt.png
**Basic evaluation**
The coefficients of the `n`-th polynomial can be recovered
using using degree-`n` Taylor expansion::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(5):
... nprint(chop(taylor(lambda x: chebyt(n, x), 0, n)))
...
[1.0]
[0.0, 1.0]
[-1.0, 0.0, 2.0]
[0.0, -3.0, 0.0, 4.0]
[1.0, 0.0, -8.0, 0.0, 8.0]
**Orthogonality**
The Chebyshev polynomials of the first kind are orthogonal
on the interval `[-1, 1]` with respect to the weight
function `w(x) = 1/\sqrt{1-x^2}`::
>>> f = lambda x: chebyt(m,x)*chebyt(n,x)/sqrt(1-x**2)
>>> m, n = 3, 4
>>> nprint(quad(f, [-1, 1]),1)
0.0
>>> m, n = 4, 4
>>> quad(f, [-1, 1])
1.57079632596448
"""
chebyu = r"""
``chebyu(n, x)`` evaluates the Chebyshev polynomial of the second
kind `U_n(x)`, defined by the identity
.. math ::
U_n(\cos x) = \frac{\sin((n+1)x)}{\sin(x)}.
The Chebyshev polynomials of the second kind are a special
case of the Jacobi polynomials, and by extension of the
hypergeometric function `\,_2F_1`. They can thus also be
evaluated for nonintegral `n`.
**Plots**
.. literalinclude :: /plots/chebyu.py
.. image :: /plots/chebyu.png
**Basic evaluation**
The coefficients of the `n`-th polynomial can be recovered
using using degree-`n` Taylor expansion::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(5):
... nprint(chop(taylor(lambda x: chebyu(n, x), 0, n)))
...
[1.0]
[0.0, 2.0]
[-1.0, 0.0, 4.0]
[0.0, -4.0, 0.0, 8.0]
[1.0, 0.0, -12.0, 0.0, 16.0]
**Orthogonality**
The Chebyshev polynomials of the second kind are orthogonal
on the interval `[-1, 1]` with respect to the weight
function `w(x) = \sqrt{1-x^2}`::
>>> f = lambda x: chebyu(m,x)*chebyu(n,x)*sqrt(1-x**2)
>>> m, n = 3, 4
>>> quad(f, [-1, 1])
0.0
>>> m, n = 4, 4
>>> quad(f, [-1, 1])
1.5707963267949
"""
besselj = r"""
``besselj(n, x, derivative=0)`` gives the Bessel function of the first kind
`J_n(x)`. Bessel functions of the first kind are defined as
solutions of the differential equation
.. math ::
x^2 y'' + x y' + (x^2 - n^2) y = 0
which appears, among other things, when solving the radial
part of Laplace's equation in cylindrical coordinates. This
equation has two solutions for given `n`, where the
`J_n`-function is the solution that is nonsingular at `x = 0`.
For positive integer `n`, `J_n(x)` behaves roughly like a sine
(odd `n`) or cosine (even `n`) multiplied by a magnitude factor
that decays slowly as `x \to \pm\infty`.
Generally, `J_n` is a special case of the hypergeometric
function `\,_0F_1`:
.. math ::
J_n(x) = \frac{x^n}{2^n \Gamma(n+1)}
\,_0F_1\left(n+1,-\frac{x^2}{4}\right)
With *derivative* = `m \ne 0`, the `m`-th derivative
.. math ::
\frac{d^m}{dx^m} J_n(x)
is computed.
**Plots**
.. literalinclude :: /plots/besselj.py
.. image :: /plots/besselj.png
.. literalinclude :: /plots/besselj_c.py
.. image :: /plots/besselj_c.png
**Examples**
Evaluation is supported for arbitrary arguments, and at
arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> besselj(2, 1000)
-0.024777229528606
>>> besselj(4, 0.75)
0.000801070086542314
>>> besselj(2, 1000j)
(-2.48071721019185e+432 + 6.41567059811949e-437j)
>>> mp.dps = 25
>>> besselj(0.75j, 3+4j)
(-2.778118364828153309919653 - 1.5863603889018621585533j)
>>> mp.dps = 50
>>> besselj(1, pi)
0.28461534317975275734531059968613140570981118184947
Arguments may be large::
>>> mp.dps = 25
>>> besselj(0, 10000)
-0.007096160353388801477265164
>>> besselj(0, 10**10)
0.000002175591750246891726859055
>>> besselj(2, 10**100)
7.337048736538615712436929e-51
>>> besselj(2, 10**5*j)
(-3.540725411970948860173735e+43426 + 4.4949812409615803110051e-43433j)
The Bessel functions of the first kind satisfy simple
symmetries around `x = 0`::
>>> mp.dps = 15
>>> nprint([besselj(n,0) for n in range(5)])
[1.0, 0.0, 0.0, 0.0, 0.0]
>>> nprint([besselj(n,pi) for n in range(5)])
[-0.304242, 0.284615, 0.485434, 0.333458, 0.151425]
>>> nprint([besselj(n,-pi) for n in range(5)])
[-0.304242, -0.284615, 0.485434, -0.333458, 0.151425]
Roots of Bessel functions are often used::
>>> nprint([findroot(j0, k) for k in [2, 5, 8, 11, 14]])
[2.40483, 5.52008, 8.65373, 11.7915, 14.9309]
>>> nprint([findroot(j1, k) for k in [3, 7, 10, 13, 16]])
[3.83171, 7.01559, 10.1735, 13.3237, 16.4706]
The roots are not periodic, but the distance between successive
roots asymptotically approaches `2 \pi`. Bessel functions of
the first kind have the following normalization::
>>> quadosc(j0, [0, inf], period=2*pi)
1.0
>>> quadosc(j1, [0, inf], period=2*pi)
1.0
For `n = 1/2` or `n = -1/2`, the Bessel function reduces to a
trigonometric function::
>>> x = 10
>>> besselj(0.5, x), sqrt(2/(pi*x))*sin(x)
(-0.13726373575505, -0.13726373575505)
>>> besselj(-0.5, x), sqrt(2/(pi*x))*cos(x)
(-0.211708866331398, -0.211708866331398)
Derivatives of any order can be computed (negative orders
correspond to integration)::
>>> mp.dps = 25
>>> besselj(0, 7.5, 1)
-0.1352484275797055051822405
>>> diff(lambda x: besselj(0,x), 7.5)
-0.1352484275797055051822405
>>> besselj(0, 7.5, 10)
-0.1377811164763244890135677
>>> diff(lambda x: besselj(0,x), 7.5, 10)
-0.1377811164763244890135677
>>> besselj(0,7.5,-1) - besselj(0,3.5,-1)
-0.1241343240399987693521378
>>> quad(j0, [3.5, 7.5])
-0.1241343240399987693521378
Differentiation with a noninteger order gives the fractional derivative
in the sense of the Riemann-Liouville differintegral, as computed by
:func:`~mpmath.differint`::
>>> mp.dps = 15
>>> besselj(1, 3.5, 0.75)
-0.385977722939384
>>> differint(lambda x: besselj(1, x), 3.5, 0.75)
-0.385977722939384
"""
besseli = r"""
``besseli(n, x, derivative=0)`` gives the modified Bessel function of the
first kind,
.. math ::
I_n(x) = i^{-n} J_n(ix).
With *derivative* = `m \ne 0`, the `m`-th derivative
.. math ::
\frac{d^m}{dx^m} I_n(x)
is computed.
**Plots**
.. literalinclude :: /plots/besseli.py
.. image :: /plots/besseli.png
.. literalinclude :: /plots/besseli_c.py
.. image :: /plots/besseli_c.png
**Examples**
Some values of `I_n(x)`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> besseli(0,0)
1.0
>>> besseli(1,0)
0.0
>>> besseli(0,1)
1.266065877752008335598245
>>> besseli(3.5, 2+3j)
(-0.2904369752642538144289025 - 0.4469098397654815837307006j)
Arguments may be large::
>>> besseli(2, 1000)
2.480717210191852440616782e+432
>>> besseli(2, 10**10)
4.299602851624027900335391e+4342944813
>>> besseli(2, 6000+10000j)
(-2.114650753239580827144204e+2603 + 4.385040221241629041351886e+2602j)
For integers `n`, the following integral representation holds::
>>> mp.dps = 15
>>> n = 3
>>> x = 2.3
>>> quad(lambda t: exp(x*cos(t))*cos(n*t), [0,pi])/pi
0.349223221159309
>>> besseli(n,x)
0.349223221159309
Derivatives and antiderivatives of any order can be computed::
>>> mp.dps = 25
>>> besseli(2, 7.5, 1)
195.8229038931399062565883
>>> diff(lambda x: besseli(2,x), 7.5)
195.8229038931399062565883
>>> besseli(2, 7.5, 10)
153.3296508971734525525176
>>> diff(lambda x: besseli(2,x), 7.5, 10)
153.3296508971734525525176
>>> besseli(2,7.5,-1) - besseli(2,3.5,-1)
202.5043900051930141956876
>>> quad(lambda x: besseli(2,x), [3.5, 7.5])
202.5043900051930141956876
"""
bessely = r"""
``bessely(n, x, derivative=0)`` gives the Bessel function of the second kind,
.. math ::
Y_n(x) = \frac{J_n(x) \cos(\pi n) - J_{-n}(x)}{\sin(\pi n)}.
For `n` an integer, this formula should be understood as a
limit. With *derivative* = `m \ne 0`, the `m`-th derivative
.. math ::
\frac{d^m}{dx^m} Y_n(x)
is computed.
**Plots**
.. literalinclude :: /plots/bessely.py
.. image :: /plots/bessely.png
.. literalinclude :: /plots/bessely_c.py
.. image :: /plots/bessely_c.png
**Examples**
Some values of `Y_n(x)`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> bessely(0,0), bessely(1,0), bessely(2,0)
(-inf, -inf, -inf)
>>> bessely(1, pi)
0.3588729167767189594679827
>>> bessely(0.5, 3+4j)
(9.242861436961450520325216 - 3.085042824915332562522402j)
Arguments may be large::
>>> bessely(0, 10000)
0.00364780555898660588668872
>>> bessely(2.5, 10**50)
-4.8952500412050989295774e-26
>>> bessely(2.5, -10**50)
(0.0 + 4.8952500412050989295774e-26j)
Derivatives and antiderivatives of any order can be computed::
>>> bessely(2, 3.5, 1)
0.3842618820422660066089231
>>> diff(lambda x: bessely(2, x), 3.5)
0.3842618820422660066089231
>>> bessely(0.5, 3.5, 1)
-0.2066598304156764337900417
>>> diff(lambda x: bessely(0.5, x), 3.5)
-0.2066598304156764337900417
>>> diff(lambda x: bessely(2, x), 0.5, 10)
-208173867409.5547350101511
>>> bessely(2, 0.5, 10)
-208173867409.5547350101511
>>> bessely(2, 100.5, 100)
0.02668487547301372334849043
>>> quad(lambda x: bessely(2,x), [1,3])
-1.377046859093181969213262
>>> bessely(2,3,-1) - bessely(2,1,-1)
-1.377046859093181969213262
"""
besselk = r"""
``besselk(n, x)`` gives the modified Bessel function of the
second kind,
.. math ::
K_n(x) = \frac{\pi}{2} \frac{I_{-n}(x)-I_{n}(x)}{\sin(\pi n)}
For `n` an integer, this formula should be understood as a
limit.
**Plots**
.. literalinclude :: /plots/besselk.py
.. image :: /plots/besselk.png
.. literalinclude :: /plots/besselk_c.py
.. image :: /plots/besselk_c.png
**Examples**
Evaluation is supported for arbitrary complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> besselk(0,1)
0.4210244382407083333356274
>>> besselk(0, -1)
(0.4210244382407083333356274 - 3.97746326050642263725661j)
>>> besselk(3.5, 2+3j)
(-0.02090732889633760668464128 + 0.2464022641351420167819697j)
>>> besselk(2+3j, 0.5)
(0.9615816021726349402626083 + 0.1918250181801757416908224j)
Arguments may be large::
>>> besselk(0, 100)
4.656628229175902018939005e-45
>>> besselk(1, 10**6)
4.131967049321725588398296e-434298
>>> besselk(1, 10**6*j)
(0.001140348428252385844876706 - 0.0005200017201681152909000961j)
>>> besselk(4.5, fmul(10**50, j, exact=True))
(1.561034538142413947789221e-26 + 1.243554598118700063281496e-25j)
The point `x = 0` is a singularity (logarithmic if `n = 0`)::
>>> besselk(0,0)
+inf
>>> besselk(1,0)
+inf
>>> for n in range(-4, 5):
... print(besselk(n, '1e-1000'))
...
4.8e+4001
8.0e+3000
2.0e+2000
1.0e+1000
2302.701024509704096466802
1.0e+1000
2.0e+2000
8.0e+3000
4.8e+4001
"""
hankel1 = r"""
``hankel1(n,x)`` computes the Hankel function of the first kind,
which is the complex combination of Bessel functions given by
.. math ::
H_n^{(1)}(x) = J_n(x) + i Y_n(x).
**Plots**
.. literalinclude :: /plots/hankel1.py
.. image :: /plots/hankel1.png
.. literalinclude :: /plots/hankel1_c.py
.. image :: /plots/hankel1_c.png
**Examples**
The Hankel function is generally complex-valued::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hankel1(2, pi)
(0.4854339326315091097054957 - 0.0999007139290278787734903j)
>>> hankel1(3.5, pi)
(0.2340002029630507922628888 - 0.6419643823412927142424049j)
"""
hankel2 = r"""
``hankel2(n,x)`` computes the Hankel function of the second kind,
which is the complex combination of Bessel functions given by
.. math ::
H_n^{(2)}(x) = J_n(x) - i Y_n(x).
**Plots**
.. literalinclude :: /plots/hankel2.py
.. image :: /plots/hankel2.png
.. literalinclude :: /plots/hankel2_c.py
.. image :: /plots/hankel2_c.png
**Examples**
The Hankel function is generally complex-valued::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hankel2(2, pi)
(0.4854339326315091097054957 + 0.0999007139290278787734903j)
>>> hankel2(3.5, pi)
(0.2340002029630507922628888 + 0.6419643823412927142424049j)
"""
lambertw = r"""
The Lambert W function `W(z)` is defined as the inverse function
of `w \exp(w)`. In other words, the value of `W(z)` is such that
`z = W(z) \exp(W(z))` for any complex number `z`.
The Lambert W function is a multivalued function with infinitely
many branches `W_k(z)`, indexed by `k \in \mathbb{Z}`. Each branch
gives a different solution `w` of the equation `z = w \exp(w)`.
All branches are supported by :func:`~mpmath.lambertw`:
* ``lambertw(z)`` gives the principal solution (branch 0)
* ``lambertw(z, k)`` gives the solution on branch `k`
The Lambert W function has two partially real branches: the
principal branch (`k = 0`) is real for real `z > -1/e`, and the
`k = -1` branch is real for `-1/e < z < 0`. All branches except
`k = 0` have a logarithmic singularity at `z = 0`.
The definition, implementation and choice of branches
is based on [Corless]_.
**Plots**
.. literalinclude :: /plots/lambertw.py
.. image :: /plots/lambertw.png
.. literalinclude :: /plots/lambertw_c.py
.. image :: /plots/lambertw_c.png
**Basic examples**
The Lambert W function is the inverse of `w \exp(w)`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> w = lambertw(1)
>>> w
0.5671432904097838729999687
>>> w*exp(w)
1.0
Any branch gives a valid inverse::
>>> w = lambertw(1, k=3)
>>> w
(-2.853581755409037807206819 + 17.11353553941214591260783j)
>>> w = lambertw(1, k=25)
>>> w
(-5.047020464221569709378686 + 155.4763860949415867162066j)
>>> chop(w*exp(w))
1.0
**Applications to equation-solving**
The Lambert W function may be used to solve various kinds of
equations, such as finding the value of the infinite power
tower `z^{z^{z^{\ldots}}}`::
>>> def tower(z, n):
... if n == 0:
... return z
... return z ** tower(z, n-1)
...
>>> tower(mpf(0.5), 100)
0.6411857445049859844862005
>>> -lambertw(-log(0.5))/log(0.5)
0.6411857445049859844862005
**Properties**
The Lambert W function grows roughly like the natural logarithm
for large arguments::
>>> lambertw(1000); log(1000)
5.249602852401596227126056
6.907755278982137052053974
>>> lambertw(10**100); log(10**100)
224.8431064451185015393731
230.2585092994045684017991
The principal branch of the Lambert W function has a rational
Taylor series expansion around `z = 0`::
>>> nprint(taylor(lambertw, 0, 6), 10)
[0.0, 1.0, -1.0, 1.5, -2.666666667, 5.208333333, -10.8]
Some special values and limits are::
>>> lambertw(0)
0.0
>>> lambertw(1)
0.5671432904097838729999687
>>> lambertw(e)
1.0
>>> lambertw(inf)
+inf
>>> lambertw(0, k=-1)
-inf
>>> lambertw(0, k=3)
-inf
>>> lambertw(inf, k=2)
(+inf + 12.56637061435917295385057j)
>>> lambertw(inf, k=3)
(+inf + 18.84955592153875943077586j)
>>> lambertw(-inf, k=3)
(+inf + 21.9911485751285526692385j)
The `k = 0` and `k = -1` branches join at `z = -1/e` where
`W(z) = -1` for both branches. Since `-1/e` can only be represented
approximately with binary floating-point numbers, evaluating the
Lambert W function at this point only gives `-1` approximately::
>>> lambertw(-1/e, 0)
-0.9999999999998371330228251
>>> lambertw(-1/e, -1)
-1.000000000000162866977175
If `-1/e` happens to round in the negative direction, there might be
a small imaginary part::
>>> mp.dps = 15
>>> lambertw(-1/e)
(-1.0 + 8.22007971483662e-9j)
>>> lambertw(-1/e+eps)
-0.999999966242188
**References**
1. [Corless]_
"""
barnesg = r"""
Evaluates the Barnes G-function, which generalizes the
superfactorial (:func:`~mpmath.superfac`) and by extension also the
hyperfactorial (:func:`~mpmath.hyperfac`) to the complex numbers
in an analogous way to how the gamma function generalizes
the ordinary factorial.
The Barnes G-function may be defined in terms of a Weierstrass
product:
.. math ::
G(z+1) = (2\pi)^{z/2} e^{-[z(z+1)+\gamma z^2]/2}
\prod_{n=1}^\infty
\left[\left(1+\frac{z}{n}\right)^ne^{-z+z^2/(2n)}\right]
For positive integers `n`, we have have relation to superfactorials
`G(n) = \mathrm{sf}(n-2) = 0! \cdot 1! \cdots (n-2)!`.
**Examples**
Some elementary values and limits of the Barnes G-function::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> barnesg(1), barnesg(2), barnesg(3)
(1.0, 1.0, 1.0)
>>> barnesg(4)
2.0
>>> barnesg(5)
12.0
>>> barnesg(6)
288.0
>>> barnesg(7)
34560.0
>>> barnesg(8)
24883200.0
>>> barnesg(inf)
+inf
>>> barnesg(0), barnesg(-1), barnesg(-2)
(0.0, 0.0, 0.0)
Closed-form values are known for some rational arguments::
>>> barnesg('1/2')
0.603244281209446
>>> sqrt(exp(0.25+log(2)/12)/sqrt(pi)/glaisher**3)
0.603244281209446
>>> barnesg('1/4')
0.29375596533861
>>> nthroot(exp('3/8')/exp(catalan/pi)/
... gamma(0.25)**3/sqrt(glaisher)**9, 4)
0.29375596533861
The Barnes G-function satisfies the functional equation
`G(z+1) = \Gamma(z) G(z)`::
>>> z = pi
>>> barnesg(z+1)
2.39292119327948
>>> gamma(z)*barnesg(z)
2.39292119327948
The asymptotic growth rate of the Barnes G-function is related to
the Glaisher-Kinkelin constant::
>>> limit(lambda n: barnesg(n+1)/(n**(n**2/2-mpf(1)/12)*
... (2*pi)**(n/2)*exp(-3*n**2/4)), inf)
0.847536694177301
>>> exp('1/12')/glaisher
0.847536694177301
The Barnes G-function can be differentiated in closed form::
>>> z = 3
>>> diff(barnesg, z)
0.264507203401607
>>> barnesg(z)*((z-1)*psi(0,z)-z+(log(2*pi)+1)/2)
0.264507203401607
Evaluation is supported for arbitrary arguments and at arbitrary
precision::
>>> barnesg(6.5)
2548.7457695685
>>> barnesg(-pi)
0.00535976768353037
>>> barnesg(3+4j)
(-0.000676375932234244 - 4.42236140124728e-5j)
>>> mp.dps = 50
>>> barnesg(1/sqrt(2))
0.81305501090451340843586085064413533788206204124732
>>> q = barnesg(10j)
>>> q.real
0.000000000021852360840356557241543036724799812371995850552234
>>> q.imag
-0.00000000000070035335320062304849020654215545839053210041457588
>>> mp.dps = 15
>>> barnesg(100)
3.10361006263698e+6626
>>> barnesg(-101)
0.0
>>> barnesg(-10.5)
5.94463017605008e+25
>>> barnesg(-10000.5)
-6.14322868174828e+167480422
>>> barnesg(1000j)
(5.21133054865546e-1173597 + 4.27461836811016e-1173597j)
>>> barnesg(-1000+1000j)
(2.43114569750291e+1026623 + 2.24851410674842e+1026623j)
**References**
1. Whittaker & Watson, *A Course of Modern Analysis*,
Cambridge University Press, 4th edition (1927), p.264
2. http://en.wikipedia.org/wiki/Barnes_G-function
3. http://mathworld.wolfram.com/BarnesG-Function.html
"""
superfac = r"""
Computes the superfactorial, defined as the product of
consecutive factorials
.. math ::
\mathrm{sf}(n) = \prod_{k=1}^n k!
For general complex `z`, `\mathrm{sf}(z)` is defined
in terms of the Barnes G-function (see :func:`~mpmath.barnesg`).
**Examples**
The first few superfactorials are (OEIS A000178)::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(10):
... print("%s %s" % (n, superfac(n)))
...
0 1.0
1 1.0
2 2.0
3 12.0
4 288.0
5 34560.0
6 24883200.0
7 125411328000.0
8 5.05658474496e+15
9 1.83493347225108e+21
Superfactorials grow very rapidly::
>>> superfac(1000)
3.24570818422368e+1177245
>>> superfac(10**10)
2.61398543581249e+467427913956904067453
Evaluation is supported for arbitrary arguments::
>>> mp.dps = 25
>>> superfac(pi)
17.20051550121297985285333
>>> superfac(2+3j)
(-0.005915485633199789627466468 + 0.008156449464604044948738263j)
>>> diff(superfac, 1)
0.2645072034016070205673056
**References**
1. http://oeis.org/A000178
"""
hyperfac = r"""
Computes the hyperfactorial, defined for integers as the product
.. math ::
H(n) = \prod_{k=1}^n k^k.
The hyperfactorial satisfies the recurrence formula `H(z) = z^z H(z-1)`.
It can be defined more generally in terms of the Barnes G-function (see
:func:`~mpmath.barnesg`) and the gamma function by the formula
.. math ::
H(z) = \frac{\Gamma(z+1)^z}{G(z)}.
The extension to complex numbers can also be done via
the integral representation
.. math ::
H(z) = (2\pi)^{-z/2} \exp \left[
{z+1 \choose 2} + \int_0^z \log(t!)\,dt
\right].
**Examples**
The rapidly-growing sequence of hyperfactorials begins
(OEIS A002109)::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(10):
... print("%s %s" % (n, hyperfac(n)))
...
0 1.0
1 1.0
2 4.0
3 108.0
4 27648.0
5 86400000.0
6 4031078400000.0
7 3.3197663987712e+18
8 5.56964379417266e+25
9 2.15779412229419e+34
Some even larger hyperfactorials are::
>>> hyperfac(1000)
5.46458120882585e+1392926
>>> hyperfac(10**10)
4.60408207642219e+489142638002418704309
The hyperfactorial can be evaluated for arbitrary arguments::
>>> hyperfac(0.5)
0.880449235173423
>>> diff(hyperfac, 1)
0.581061466795327
>>> hyperfac(pi)
205.211134637462
>>> hyperfac(-10+1j)
(3.01144471378225e+46 - 2.45285242480185e+46j)
The recurrence property of the hyperfactorial holds
generally::
>>> z = 3-4*j
>>> hyperfac(z)
(-4.49795891462086e-7 - 6.33262283196162e-7j)
>>> z**z * hyperfac(z-1)
(-4.49795891462086e-7 - 6.33262283196162e-7j)
>>> z = mpf(-0.6)
>>> chop(z**z * hyperfac(z-1))
1.28170142849352
>>> hyperfac(z)
1.28170142849352
The hyperfactorial may also be computed using the integral
definition::
>>> z = 2.5
>>> hyperfac(z)
15.9842119922237
>>> (2*pi)**(-z/2)*exp(binomial(z+1,2) +
... quad(lambda t: loggamma(t+1), [0, z]))
15.9842119922237
:func:`~mpmath.hyperfac` supports arbitrary-precision evaluation::
>>> mp.dps = 50
>>> hyperfac(10)
215779412229418562091680268288000000000000000.0
>>> hyperfac(1/sqrt(2))
0.89404818005227001975423476035729076375705084390942
**References**
1. http://oeis.org/A002109
2. http://mathworld.wolfram.com/Hyperfactorial.html
"""
rgamma = r"""
Computes the reciprocal of the gamma function, `1/\Gamma(z)`. This
function evaluates to zero at the poles
of the gamma function, `z = 0, -1, -2, \ldots`.
**Examples**
Basic examples::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> rgamma(1)
1.0
>>> rgamma(4)
0.1666666666666666666666667
>>> rgamma(0); rgamma(-1)
0.0
0.0
>>> rgamma(1000)
2.485168143266784862783596e-2565
>>> rgamma(inf)
0.0
A definite integral that can be evaluated in terms of elementary
integrals::
>>> quad(rgamma, [0,inf])
2.807770242028519365221501
>>> e + quad(lambda t: exp(-t)/(pi**2+log(t)**2), [0,inf])
2.807770242028519365221501
"""
loggamma = r"""
Computes the principal branch of the log-gamma function,
`\ln \Gamma(z)`. Unlike `\ln(\Gamma(z))`, which has infinitely many
complex branch cuts, the principal log-gamma function only has a single
branch cut along the negative half-axis. The principal branch
continuously matches the asymptotic Stirling expansion
.. math ::
\ln \Gamma(z) \sim \frac{\ln(2 \pi)}{2} +
\left(z-\frac{1}{2}\right) \ln(z) - z + O(z^{-1}).
The real parts of both functions agree, but their imaginary
parts generally differ by `2 n \pi` for some `n \in \mathbb{Z}`.
They coincide for `z \in \mathbb{R}, z > 0`.
Computationally, it is advantageous to use :func:`~mpmath.loggamma`
instead of :func:`~mpmath.gamma` for extremely large arguments.
**Examples**
Comparing with `\ln(\Gamma(z))`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> loggamma('13.2'); log(gamma('13.2'))
20.49400419456603678498394
20.49400419456603678498394
>>> loggamma(3+4j)
(-1.756626784603784110530604 + 4.742664438034657928194889j)
>>> log(gamma(3+4j))
(-1.756626784603784110530604 - 1.540520869144928548730397j)
>>> log(gamma(3+4j)) + 2*pi*j
(-1.756626784603784110530604 + 4.742664438034657928194889j)
Note the imaginary parts for negative arguments::
>>> loggamma(-0.5); loggamma(-1.5); loggamma(-2.5)
(1.265512123484645396488946 - 3.141592653589793238462643j)
(0.8600470153764810145109327 - 6.283185307179586476925287j)
(-0.05624371649767405067259453 - 9.42477796076937971538793j)
Some special values::
>>> loggamma(1); loggamma(2)
0.0
0.0
>>> loggamma(3); +ln2
0.6931471805599453094172321
0.6931471805599453094172321
>>> loggamma(3.5); log(15*sqrt(pi)/8)
1.200973602347074224816022
1.200973602347074224816022
>>> loggamma(inf)
+inf
Huge arguments are permitted::
>>> loggamma('1e30')
6.807755278982137052053974e+31
>>> loggamma('1e300')
6.897755278982137052053974e+302
>>> loggamma('1e3000')
6.906755278982137052053974e+3003
>>> loggamma('1e100000000000000000000')
2.302585092994045684007991e+100000000000000000020
>>> loggamma('1e30j')
(-1.570796326794896619231322e+30 + 6.807755278982137052053974e+31j)
>>> loggamma('1e300j')
(-1.570796326794896619231322e+300 + 6.897755278982137052053974e+302j)
>>> loggamma('1e3000j')
(-1.570796326794896619231322e+3000 + 6.906755278982137052053974e+3003j)
The log-gamma function can be integrated analytically
on any interval of unit length::
>>> z = 0
>>> quad(loggamma, [z,z+1]); log(2*pi)/2
0.9189385332046727417803297
0.9189385332046727417803297
>>> z = 3+4j
>>> quad(loggamma, [z,z+1]); (log(z)-1)*z + log(2*pi)/2
(-0.9619286014994750641314421 + 5.219637303741238195688575j)
(-0.9619286014994750641314421 + 5.219637303741238195688575j)
The derivatives of the log-gamma function are given by the
polygamma function (:func:`~mpmath.psi`)::
>>> diff(loggamma, -4+3j); psi(0, -4+3j)
(1.688493531222971393607153 + 2.554898911356806978892748j)
(1.688493531222971393607153 + 2.554898911356806978892748j)
>>> diff(loggamma, -4+3j, 2); psi(1, -4+3j)
(-0.1539414829219882371561038 - 0.1020485197430267719746479j)
(-0.1539414829219882371561038 - 0.1020485197430267719746479j)
The log-gamma function satisfies an additive form of the
recurrence relation for the ordinary gamma function::
>>> z = 2+3j
>>> loggamma(z); loggamma(z+1) - log(z)
(-2.092851753092733349564189 + 2.302396543466867626153708j)
(-2.092851753092733349564189 + 2.302396543466867626153708j)
"""
siegeltheta = r"""
Computes the Riemann-Siegel theta function,
.. math ::
\theta(t) = \frac{
\log\Gamma\left(\frac{1+2it}{4}\right) -
\log\Gamma\left(\frac{1-2it}{4}\right)
}{2i} - \frac{\log \pi}{2} t.
The Riemann-Siegel theta function is important in
providing the phase factor for the Z-function
(see :func:`~mpmath.siegelz`). Evaluation is supported for real and
complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> siegeltheta(0)
0.0
>>> siegeltheta(inf)
+inf
>>> siegeltheta(-inf)
-inf
>>> siegeltheta(1)
-1.767547952812290388302216
>>> siegeltheta(10+0.25j)
(-3.068638039426838572528867 + 0.05804937947429712998395177j)
Arbitrary derivatives may be computed with derivative = k
>>> siegeltheta(1234, derivative=2)
0.0004051864079114053109473741
>>> diff(siegeltheta, 1234, n=2)
0.0004051864079114053109473741
The Riemann-Siegel theta function has odd symmetry around `t = 0`,
two local extreme points and three real roots including 0 (located
symmetrically)::
>>> nprint(chop(taylor(siegeltheta, 0, 5)))
[0.0, -2.68609, 0.0, 2.69433, 0.0, -6.40218]
>>> findroot(diffun(siegeltheta), 7)
6.28983598883690277966509
>>> findroot(siegeltheta, 20)
17.84559954041086081682634
For large `t`, there is a famous asymptotic formula
for `\theta(t)`, to first order given by::
>>> t = mpf(10**6)
>>> siegeltheta(t)
5488816.353078403444882823
>>> -t*log(2*pi/t)/2-t/2
5488816.745777464310273645
"""
grampoint = r"""
Gives the `n`-th Gram point `g_n`, defined as the solution
to the equation `\theta(g_n) = \pi n` where `\theta(t)`
is the Riemann-Siegel theta function (:func:`~mpmath.siegeltheta`).
The first few Gram points are::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> grampoint(0)
17.84559954041086081682634
>>> grampoint(1)
23.17028270124630927899664
>>> grampoint(2)
27.67018221781633796093849
>>> grampoint(3)
31.71797995476405317955149
Checking the definition::
>>> siegeltheta(grampoint(3))
9.42477796076937971538793
>>> 3*pi
9.42477796076937971538793
A large Gram point::
>>> grampoint(10**10)
3293531632.728335454561153
Gram points are useful when studying the Z-function
(:func:`~mpmath.siegelz`). See the documentation of that function
for additional examples.
:func:`~mpmath.grampoint` can solve the defining equation for
nonintegral `n`. There is a fixed point where `g(x) = x`::
>>> findroot(lambda x: grampoint(x) - x, 10000)
9146.698193171459265866198
**References**
1. http://mathworld.wolfram.com/GramPoint.html
"""
siegelz = r"""
Computes the Z-function, also known as the Riemann-Siegel Z function,
.. math ::
Z(t) = e^{i \theta(t)} \zeta(1/2+it)
where `\zeta(s)` is the Riemann zeta function (:func:`~mpmath.zeta`)
and where `\theta(t)` denotes the Riemann-Siegel theta function
(see :func:`~mpmath.siegeltheta`).
Evaluation is supported for real and complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> siegelz(1)
-0.7363054628673177346778998
>>> siegelz(3+4j)
(-0.1852895764366314976003936 - 0.2773099198055652246992479j)
The first four derivatives are supported, using the
optional *derivative* keyword argument::
>>> siegelz(1234567, derivative=3)
56.89689348495089294249178
>>> diff(siegelz, 1234567, n=3)
56.89689348495089294249178
The Z-function has a Maclaurin expansion::
>>> nprint(chop(taylor(siegelz, 0, 4)))
[-1.46035, 0.0, 2.73588, 0.0, -8.39357]
The Z-function `Z(t)` is equal to `\pm |\zeta(s)|` on the
critical line `s = 1/2+it` (i.e. for real arguments `t`
to `Z`). Its zeros coincide with those of the Riemann zeta
function::
>>> findroot(siegelz, 14)
14.13472514173469379045725
>>> findroot(siegelz, 20)
21.02203963877155499262848
>>> findroot(zeta, 0.5+14j)
(0.5 + 14.13472514173469379045725j)
>>> findroot(zeta, 0.5+20j)
(0.5 + 21.02203963877155499262848j)
Since the Z-function is real-valued on the critical line
(and unlike `|\zeta(s)|` analytic), it is useful for
investigating the zeros of the Riemann zeta function.
For example, one can use a root-finding algorithm based
on sign changes::
>>> findroot(siegelz, [100, 200], solver='bisect')
176.4414342977104188888926
To locate roots, Gram points `g_n` which can be computed
by :func:`~mpmath.grampoint` are useful. If `(-1)^n Z(g_n)` is
positive for two consecutive `n`, then `Z(t)` must have
a zero between those points::
>>> g10 = grampoint(10)
>>> g11 = grampoint(11)
>>> (-1)**10 * siegelz(g10) > 0
True
>>> (-1)**11 * siegelz(g11) > 0
True
>>> findroot(siegelz, [g10, g11], solver='bisect')
56.44624769706339480436776
>>> g10, g11
(54.67523744685325626632663, 57.54516517954725443703014)
"""
riemannr = r"""
Evaluates the Riemann R function, a smooth approximation of the
prime counting function `\pi(x)` (see :func:`~mpmath.primepi`). The Riemann
R function gives a fast numerical approximation useful e.g. to
roughly estimate the number of primes in a given interval.
The Riemann R function is computed using the rapidly convergent Gram
series,
.. math ::
R(x) = 1 + \sum_{k=1}^{\infty}
\frac{\log^k x}{k k! \zeta(k+1)}.
From the Gram series, one sees that the Riemann R function is a
well-defined analytic function (except for a branch cut along
the negative real half-axis); it can be evaluated for arbitrary
real or complex arguments.
The Riemann R function gives a very accurate approximation
of the prime counting function. For example, it is wrong by at
most 2 for `x < 1000`, and for `x = 10^9` differs from the exact
value of `\pi(x)` by 79, or less than two parts in a million.
It is about 10 times more accurate than the logarithmic integral
estimate (see :func:`~mpmath.li`), which however is even faster to evaluate.
It is orders of magnitude more accurate than the extremely
fast `x/\log x` estimate.
**Examples**
For small arguments, the Riemann R function almost exactly
gives the prime counting function if rounded to the nearest
integer::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> primepi(50), riemannr(50)
(15, 14.9757023241462)
>>> max(abs(primepi(n)-int(round(riemannr(n)))) for n in range(100))
1
>>> max(abs(primepi(n)-int(round(riemannr(n)))) for n in range(300))
2
The Riemann R function can be evaluated for arguments far too large
for exact determination of `\pi(x)` to be computationally
feasible with any presently known algorithm::
>>> riemannr(10**30)
1.46923988977204e+28
>>> riemannr(10**100)
4.3619719871407e+97
>>> riemannr(10**1000)
4.3448325764012e+996
A comparison of the Riemann R function and logarithmic integral estimates
for `\pi(x)` using exact values of `\pi(10^n)` up to `n = 9`.
The fractional error is shown in parentheses::
>>> exact = [4,25,168,1229,9592,78498,664579,5761455,50847534]
>>> for n, p in enumerate(exact):
... n += 1
... r, l = riemannr(10**n), li(10**n)
... rerr, lerr = nstr((r-p)/p,3), nstr((l-p)/p,3)
... print("%i %i %s(%s) %s(%s)" % (n, p, r, rerr, l, lerr))
...
1 4 4.56458314100509(0.141) 6.1655995047873(0.541)
2 25 25.6616332669242(0.0265) 30.1261415840796(0.205)
3 168 168.359446281167(0.00214) 177.609657990152(0.0572)
4 1229 1226.93121834343(-0.00168) 1246.13721589939(0.0139)
5 9592 9587.43173884197(-0.000476) 9629.8090010508(0.00394)
6 78498 78527.3994291277(0.000375) 78627.5491594622(0.00165)
7 664579 664667.447564748(0.000133) 664918.405048569(0.000511)
8 5761455 5761551.86732017(1.68e-5) 5762209.37544803(0.000131)
9 50847534 50847455.4277214(-1.55e-6) 50849234.9570018(3.35e-5)
The derivative of the Riemann R function gives the approximate
probability for a number of magnitude `x` to be prime::
>>> diff(riemannr, 1000)
0.141903028110784
>>> mpf(primepi(1050) - primepi(950)) / 100
0.15
Evaluation is supported for arbitrary arguments and at arbitrary
precision::
>>> mp.dps = 30
>>> riemannr(7.5)
3.72934743264966261918857135136
>>> riemannr(-4+2j)
(-0.551002208155486427591793957644 + 2.16966398138119450043195899746j)
"""
primepi = r"""
Evaluates the prime counting function, `\pi(x)`, which gives
the number of primes less than or equal to `x`. The argument
`x` may be fractional.
The prime counting function is very expensive to evaluate
precisely for large `x`, and the present implementation is
not optimized in any way. For numerical approximation of the
prime counting function, it is better to use :func:`~mpmath.primepi2`
or :func:`~mpmath.riemannr`.
Some values of the prime counting function::
>>> from mpmath import *
>>> [primepi(k) for k in range(20)]
[0, 0, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 8]
>>> primepi(3.5)
2
>>> primepi(100000)
9592
"""
primepi2 = r"""
Returns an interval (as an ``mpi`` instance) providing bounds
for the value of the prime counting function `\pi(x)`. For small
`x`, :func:`~mpmath.primepi2` returns an exact interval based on
the output of :func:`~mpmath.primepi`. For `x > 2656`, a loose interval
based on Schoenfeld's inequality
.. math ::
|\pi(x) - \mathrm{li}(x)| < \frac{\sqrt x \log x}{8 \pi}
is returned. This estimate is rigorous assuming the truth of
the Riemann hypothesis, and can be computed very quickly.
**Examples**
Exact values of the prime counting function for small `x`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> iv.dps = 15; iv.pretty = True
>>> primepi2(10)
[4.0, 4.0]
>>> primepi2(100)
[25.0, 25.0]
>>> primepi2(1000)
[168.0, 168.0]
Loose intervals are generated for moderately large `x`:
>>> primepi2(10000), primepi(10000)
([1209.0, 1283.0], 1229)
>>> primepi2(50000), primepi(50000)
([5070.0, 5263.0], 5133)
As `x` increases, the absolute error gets worse while the relative
error improves. The exact value of `\pi(10^{23})` is
1925320391606803968923, and :func:`~mpmath.primepi2` gives 9 significant
digits::
>>> p = primepi2(10**23)
>>> p
[1.9253203909477020467e+21, 1.925320392280406229e+21]
>>> mpf(p.delta) / mpf(p.a)
6.9219865355293e-10
A more precise, nonrigorous estimate for `\pi(x)` can be
obtained using the Riemann R function (:func:`~mpmath.riemannr`).
For large enough `x`, the value returned by :func:`~mpmath.primepi2`
essentially amounts to a small perturbation of the value returned by
:func:`~mpmath.riemannr`::
>>> primepi2(10**100)
[4.3619719871407024816e+97, 4.3619719871407032404e+97]
>>> riemannr(10**100)
4.3619719871407e+97
"""
primezeta = r"""
Computes the prime zeta function, which is defined
in analogy with the Riemann zeta function (:func:`~mpmath.zeta`)
as
.. math ::
P(s) = \sum_p \frac{1}{p^s}
where the sum is taken over all prime numbers `p`. Although
this sum only converges for `\mathrm{Re}(s) > 1`, the
function is defined by analytic continuation in the
half-plane `\mathrm{Re}(s) > 0`.
**Examples**
Arbitrary-precision evaluation for real and complex arguments is
supported::
>>> from mpmath import *
>>> mp.dps = 30; mp.pretty = True
>>> primezeta(2)
0.452247420041065498506543364832
>>> primezeta(pi)
0.15483752698840284272036497397
>>> mp.dps = 50
>>> primezeta(3)
0.17476263929944353642311331466570670097541212192615
>>> mp.dps = 20
>>> primezeta(3+4j)
(-0.12085382601645763295 - 0.013370403397787023602j)
The prime zeta function has a logarithmic pole at `s = 1`,
with residue equal to the difference of the Mertens and
Euler constants::
>>> primezeta(1)
+inf
>>> extradps(25)(lambda x: primezeta(1+x)+log(x))(+eps)
-0.31571845205389007685
>>> mertens-euler
-0.31571845205389007685
The analytic continuation to `0 < \mathrm{Re}(s) \le 1`
is implemented. In this strip the function exhibits
very complex behavior; on the unit interval, it has poles at
`1/n` for every squarefree integer `n`::
>>> primezeta(0.5) # Pole at s = 1/2
(-inf + 3.1415926535897932385j)
>>> primezeta(0.25)
(-1.0416106801757269036 + 0.52359877559829887308j)
>>> primezeta(0.5+10j)
(0.54892423556409790529 + 0.45626803423487934264j)
Although evaluation works in principle for any `\mathrm{Re}(s) > 0`,
it should be noted that the evaluation time increases exponentially
as `s` approaches the imaginary axis.
For large `\mathrm{Re}(s)`, `P(s)` is asymptotic to `2^{-s}`::
>>> primezeta(inf)
0.0
>>> primezeta(10), mpf(2)**-10
(0.00099360357443698021786, 0.0009765625)
>>> primezeta(1000)
9.3326361850321887899e-302
>>> primezeta(1000+1000j)
(-3.8565440833654995949e-302 - 8.4985390447553234305e-302j)
**References**
Carl-Erik Froberg, "On the prime zeta function",
BIT 8 (1968), pp. 187-202.
"""
bernpoly = r"""
Evaluates the Bernoulli polynomial `B_n(z)`.
The first few Bernoulli polynomials are::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(6):
... nprint(chop(taylor(lambda x: bernpoly(n,x), 0, n)))
...
[1.0]
[-0.5, 1.0]
[0.166667, -1.0, 1.0]
[0.0, 0.5, -1.5, 1.0]
[-0.0333333, 0.0, 1.0, -2.0, 1.0]
[0.0, -0.166667, 0.0, 1.66667, -2.5, 1.0]
At `z = 0`, the Bernoulli polynomial evaluates to a
Bernoulli number (see :func:`~mpmath.bernoulli`)::
>>> bernpoly(12, 0), bernoulli(12)
(-0.253113553113553, -0.253113553113553)
>>> bernpoly(13, 0), bernoulli(13)
(0.0, 0.0)
Evaluation is accurate for large `n` and small `z`::
>>> mp.dps = 25
>>> bernpoly(100, 0.5)
2.838224957069370695926416e+78
>>> bernpoly(1000, 10.5)
5.318704469415522036482914e+1769
"""
polylog = r"""
Computes the polylogarithm, defined by the sum
.. math ::
\mathrm{Li}_s(z) = \sum_{k=1}^{\infty} \frac{z^k}{k^s}.
This series is convergent only for `|z| < 1`, so elsewhere
the analytic continuation is implied.
The polylogarithm should not be confused with the logarithmic
integral (also denoted by Li or li), which is implemented
as :func:`~mpmath.li`.
**Examples**
The polylogarithm satisfies a huge number of functional identities.
A sample of polylogarithm evaluations is shown below::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> polylog(1,0.5), log(2)
(0.693147180559945, 0.693147180559945)
>>> polylog(2,0.5), (pi**2-6*log(2)**2)/12
(0.582240526465012, 0.582240526465012)
>>> polylog(2,-phi), -log(phi)**2-pi**2/10
(-1.21852526068613, -1.21852526068613)
>>> polylog(3,0.5), 7*zeta(3)/8-pi**2*log(2)/12+log(2)**3/6
(0.53721319360804, 0.53721319360804)
:func:`~mpmath.polylog` can evaluate the analytic continuation of the
polylogarithm when `s` is an integer::
>>> polylog(2, 10)
(0.536301287357863 - 7.23378441241546j)
>>> polylog(2, -10)
-4.1982778868581
>>> polylog(2, 10j)
(-3.05968879432873 + 3.71678149306807j)
>>> polylog(-2, 10)
-0.150891632373114
>>> polylog(-2, -10)
0.067618332081142
>>> polylog(-2, 10j)
(0.0384353698579347 + 0.0912451798066779j)
Some more examples, with arguments on the unit circle (note that
the series definition cannot be used for computation here)::
>>> polylog(2,j)
(-0.205616758356028 + 0.915965594177219j)
>>> j*catalan-pi**2/48
(-0.205616758356028 + 0.915965594177219j)
>>> polylog(3,exp(2*pi*j/3))
(-0.534247512515375 + 0.765587078525922j)
>>> -4*zeta(3)/9 + 2*j*pi**3/81
(-0.534247512515375 + 0.765587078525921j)
Polylogarithms of different order are related by integration
and differentiation::
>>> s, z = 3, 0.5
>>> polylog(s+1, z)
0.517479061673899
>>> quad(lambda t: polylog(s,t)/t, [0, z])
0.517479061673899
>>> z*diff(lambda t: polylog(s+2,t), z)
0.517479061673899
Taylor series expansions around `z = 0` are::
>>> for n in range(-3, 4):
... nprint(taylor(lambda x: polylog(n,x), 0, 5))
...
[0.0, 1.0, 8.0, 27.0, 64.0, 125.0]
[0.0, 1.0, 4.0, 9.0, 16.0, 25.0]
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
[0.0, 1.0, 1.0, 1.0, 1.0, 1.0]
[0.0, 1.0, 0.5, 0.333333, 0.25, 0.2]
[0.0, 1.0, 0.25, 0.111111, 0.0625, 0.04]
[0.0, 1.0, 0.125, 0.037037, 0.015625, 0.008]
The series defining the polylogarithm is simultaneously
a Taylor series and an L-series. For certain values of `z`, the
polylogarithm reduces to a pure zeta function::
>>> polylog(pi, 1), zeta(pi)
(1.17624173838258, 1.17624173838258)
>>> polylog(pi, -1), -altzeta(pi)
(-0.909670702980385, -0.909670702980385)
Evaluation for arbitrary, nonintegral `s` is supported
for `z` within the unit circle:
>>> polylog(3+4j, 0.25)
(0.24258605789446 - 0.00222938275488344j)
>>> nsum(lambda k: 0.25**k / k**(3+4j), [1,inf])
(0.24258605789446 - 0.00222938275488344j)
It is also currently supported outside of the unit circle for `z`
not too large in magnitude::
>>> polylog(1+j, 20+40j)
(-7.1421172179728 - 3.92726697721369j)
>>> polylog(1+j, 200+400j)
Traceback (most recent call last):
...
NotImplementedError: polylog for arbitrary s and z
**References**
1. Richard Crandall, "Note on fast polylogarithm computation"
http://people.reed.edu/~crandall/papers/Polylog.pdf
2. http://en.wikipedia.org/wiki/Polylogarithm
3. http://mathworld.wolfram.com/Polylogarithm.html
"""
bell = r"""
For `n` a nonnegative integer, ``bell(n,x)`` evaluates the Bell
polynomial `B_n(x)`, the first few of which are
.. math ::
B_0(x) = 1
B_1(x) = x
B_2(x) = x^2+x
B_3(x) = x^3+3x^2+x
If `x = 1` or :func:`~mpmath.bell` is called with only one argument, it
gives the `n`-th Bell number `B_n`, which is the number of
partitions of a set with `n` elements. By setting the precision to
at least `\log_{10} B_n` digits, :func:`~mpmath.bell` provides fast
calculation of exact Bell numbers.
In general, :func:`~mpmath.bell` computes
.. math ::
B_n(x) = e^{-x} \left(\mathrm{sinc}(\pi n) + E_n(x)\right)
where `E_n(x)` is the generalized exponential function implemented
by :func:`~mpmath.polyexp`. This is an extension of Dobinski's formula [1],
where the modification is the sinc term ensuring that `B_n(x)` is
continuous in `n`; :func:`~mpmath.bell` can thus be evaluated,
differentiated, etc for arbitrary complex arguments.
**Examples**
Simple evaluations::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> bell(0, 2.5)
1.0
>>> bell(1, 2.5)
2.5
>>> bell(2, 2.5)
8.75
Evaluation for arbitrary complex arguments::
>>> bell(5.75+1j, 2-3j)
(-10767.71345136587098445143 - 15449.55065599872579097221j)
The first few Bell polynomials::
>>> for k in range(7):
... nprint(taylor(lambda x: bell(k,x), 0, k))
...
[1.0]
[0.0, 1.0]
[0.0, 1.0, 1.0]
[0.0, 1.0, 3.0, 1.0]
[0.0, 1.0, 7.0, 6.0, 1.0]
[0.0, 1.0, 15.0, 25.0, 10.0, 1.0]
[0.0, 1.0, 31.0, 90.0, 65.0, 15.0, 1.0]
The first few Bell numbers and complementary Bell numbers::
>>> [int(bell(k)) for k in range(10)]
[1, 1, 2, 5, 15, 52, 203, 877, 4140, 21147]
>>> [int(bell(k,-1)) for k in range(10)]
[1, -1, 0, 1, 1, -2, -9, -9, 50, 267]
Large Bell numbers::
>>> mp.dps = 50
>>> bell(50)
185724268771078270438257767181908917499221852770.0
>>> bell(50,-1)
-29113173035759403920216141265491160286912.0
Some even larger values::
>>> mp.dps = 25
>>> bell(1000,-1)
-1.237132026969293954162816e+1869
>>> bell(1000)
2.989901335682408421480422e+1927
>>> bell(1000,2)
6.591553486811969380442171e+1987
>>> bell(1000,100.5)
9.101014101401543575679639e+2529
A determinant identity satisfied by Bell numbers::
>>> mp.dps = 15
>>> N = 8
>>> det([[bell(k+j) for j in range(N)] for k in range(N)])
125411328000.0
>>> superfac(N-1)
125411328000.0
**References**
1. http://mathworld.wolfram.com/DobinskisFormula.html
"""
polyexp = r"""
Evaluates the polyexponential function, defined for arbitrary
complex `s`, `z` by the series
.. math ::
E_s(z) = \sum_{k=1}^{\infty} \frac{k^s}{k!} z^k.
`E_s(z)` is constructed from the exponential function analogously
to how the polylogarithm is constructed from the ordinary
logarithm; as a function of `s` (with `z` fixed), `E_s` is an L-series
It is an entire function of both `s` and `z`.
The polyexponential function provides a generalization of the
Bell polynomials `B_n(x)` (see :func:`~mpmath.bell`) to noninteger orders `n`.
In terms of the Bell polynomials,
.. math ::
E_s(z) = e^z B_s(z) - \mathrm{sinc}(\pi s).
Note that `B_n(x)` and `e^{-x} E_n(x)` are identical if `n`
is a nonzero integer, but not otherwise. In particular, they differ
at `n = 0`.
**Examples**
Evaluating a series::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> nsum(lambda k: sqrt(k)/fac(k), [1,inf])
2.101755547733791780315904
>>> polyexp(0.5,1)
2.101755547733791780315904
Evaluation for arbitrary arguments::
>>> polyexp(-3-4j, 2.5+2j)
(2.351660261190434618268706 + 1.202966666673054671364215j)
Evaluation is accurate for tiny function values::
>>> polyexp(4, -100)
3.499471750566824369520223e-36
If `n` is a nonpositive integer, `E_n` reduces to a special
instance of the hypergeometric function `\,_pF_q`::
>>> n = 3
>>> x = pi
>>> polyexp(-n,x)
4.042192318847986561771779
>>> x*hyper([1]*(n+1), [2]*(n+1), x)
4.042192318847986561771779
"""
cyclotomic = r"""
Evaluates the cyclotomic polynomial `\Phi_n(x)`, defined by
.. math ::
\Phi_n(x) = \prod_{\zeta} (x - \zeta)
where `\zeta` ranges over all primitive `n`-th roots of unity
(see :func:`~mpmath.unitroots`). An equivalent representation, used
for computation, is
.. math ::
\Phi_n(x) = \prod_{d\mid n}(x^d-1)^{\mu(n/d)} = \Phi_n(x)
where `\mu(m)` denotes the Moebius function. The cyclotomic
polynomials are integer polynomials, the first of which can be
written explicitly as
.. math ::
\Phi_0(x) = 1
\Phi_1(x) = x - 1
\Phi_2(x) = x + 1
\Phi_3(x) = x^3 + x^2 + 1
\Phi_4(x) = x^2 + 1
\Phi_5(x) = x^4 + x^3 + x^2 + x + 1
\Phi_6(x) = x^2 - x + 1
**Examples**
The coefficients of low-order cyclotomic polynomials can be recovered
using Taylor expansion::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(9):
... p = chop(taylor(lambda x: cyclotomic(n,x), 0, 10))
... print("%s %s" % (n, nstr(p[:10+1-p[::-1].index(1)])))
...
0 [1.0]
1 [-1.0, 1.0]
2 [1.0, 1.0]
3 [1.0, 1.0, 1.0]
4 [1.0, 0.0, 1.0]
5 [1.0, 1.0, 1.0, 1.0, 1.0]
6 [1.0, -1.0, 1.0]
7 [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
8 [1.0, 0.0, 0.0, 0.0, 1.0]
The definition as a product over primitive roots may be checked
by computing the product explicitly (for a real argument, this
method will generally introduce numerical noise in the imaginary
part)::
>>> mp.dps = 25
>>> z = 3+4j
>>> cyclotomic(10, z)
(-419.0 - 360.0j)
>>> fprod(z-r for r in unitroots(10, primitive=True))
(-419.0 - 360.0j)
>>> z = 3
>>> cyclotomic(10, z)
61.0
>>> fprod(z-r for r in unitroots(10, primitive=True))
(61.0 - 3.146045605088568607055454e-25j)
Up to permutation, the roots of a given cyclotomic polynomial
can be checked to agree with the list of primitive roots::
>>> p = taylor(lambda x: cyclotomic(6,x), 0, 6)[:3]
>>> for r in polyroots(p[::-1]):
... print(r)
...
(0.5 - 0.8660254037844386467637232j)
(0.5 + 0.8660254037844386467637232j)
>>>
>>> for r in unitroots(6, primitive=True):
... print(r)
...
(0.5 + 0.8660254037844386467637232j)
(0.5 - 0.8660254037844386467637232j)
"""
meijerg = r"""
Evaluates the Meijer G-function, defined as
.. math ::
G^{m,n}_{p,q} \left( \left. \begin{matrix}
a_1, \dots, a_n ; a_{n+1} \dots a_p \\
b_1, \dots, b_m ; b_{m+1} \dots b_q
\end{matrix}\; \right| \; z ; r \right) =
\frac{1}{2 \pi i} \int_L
\frac{\prod_{j=1}^m \Gamma(b_j+s) \prod_{j=1}^n\Gamma(1-a_j-s)}
{\prod_{j=n+1}^{p}\Gamma(a_j+s) \prod_{j=m+1}^q \Gamma(1-b_j-s)}
z^{-s/r} ds
for an appropriate choice of the contour `L` (see references).
There are `p` elements `a_j`.
The argument *a_s* should be a pair of lists, the first containing the
`n` elements `a_1, \ldots, a_n` and the second containing
the `p-n` elements `a_{n+1}, \ldots a_p`.
There are `q` elements `b_j`.
The argument *b_s* should be a pair of lists, the first containing the
`m` elements `b_1, \ldots, b_m` and the second containing
the `q-m` elements `b_{m+1}, \ldots b_q`.
The implicit tuple `(m, n, p, q)` constitutes the order or degree of the
Meijer G-function, and is determined by the lengths of the coefficient
vectors. Confusingly, the indices in this tuple appear in a different order
from the coefficients, but this notation is standard. The many examples
given below should hopefully clear up any potential confusion.
**Algorithm**
The Meijer G-function is evaluated as a combination of hypergeometric series.
There are two versions of the function, which can be selected with
the optional *series* argument.
*series=1* uses a sum of `m` `\,_pF_{q-1}` functions of `z`
*series=2* uses a sum of `n` `\,_qF_{p-1}` functions of `1/z`
The default series is chosen based on the degree and `|z|` in order
to be consistent with Mathematica's. This definition of the Meijer G-function
has a discontinuity at `|z| = 1` for some orders, which can
be avoided by explicitly specifying a series.
Keyword arguments are forwarded to :func:`~mpmath.hypercomb`.
**Examples**
Many standard functions are special cases of the Meijer G-function
(possibly rescaled and/or with branch cut corrections). We define
some test parameters::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> a = mpf(0.75)
>>> b = mpf(1.5)
>>> z = mpf(2.25)
The exponential function:
`e^z = G^{1,0}_{0,1} \left( \left. \begin{matrix} - \\ 0 \end{matrix} \;
\right| \; -z \right)`
>>> meijerg([[],[]], [[0],[]], -z)
9.487735836358525720550369
>>> exp(z)
9.487735836358525720550369
The natural logarithm:
`\log(1+z) = G^{1,2}_{2,2} \left( \left. \begin{matrix} 1, 1 \\ 1, 0
\end{matrix} \; \right| \; -z \right)`
>>> meijerg([[1,1],[]], [[1],[0]], z)
1.178654996341646117219023
>>> log(1+z)
1.178654996341646117219023
A rational function:
`\frac{z}{z+1} = G^{1,2}_{2,2} \left( \left. \begin{matrix} 1, 1 \\ 1, 1
\end{matrix} \; \right| \; z \right)`
>>> meijerg([[1,1],[]], [[1],[1]], z)
0.6923076923076923076923077
>>> z/(z+1)
0.6923076923076923076923077
The sine and cosine functions:
`\frac{1}{\sqrt \pi} \sin(2 \sqrt z) = G^{1,0}_{0,2} \left( \left. \begin{matrix}
- \\ \frac{1}{2}, 0 \end{matrix} \; \right| \; z \right)`
`\frac{1}{\sqrt \pi} \cos(2 \sqrt z) = G^{1,0}_{0,2} \left( \left. \begin{matrix}
- \\ 0, \frac{1}{2} \end{matrix} \; \right| \; z \right)`
>>> meijerg([[],[]], [[0.5],[0]], (z/2)**2)
0.4389807929218676682296453
>>> sin(z)/sqrt(pi)
0.4389807929218676682296453
>>> meijerg([[],[]], [[0],[0.5]], (z/2)**2)
-0.3544090145996275423331762
>>> cos(z)/sqrt(pi)
-0.3544090145996275423331762
Bessel functions:
`J_a(2 \sqrt z) = G^{1,0}_{0,2} \left( \left.
\begin{matrix} - \\ \frac{a}{2}, -\frac{a}{2}
\end{matrix} \; \right| \; z \right)`
`Y_a(2 \sqrt z) = G^{2,0}_{1,3} \left( \left.
\begin{matrix} \frac{-a-1}{2} \\ \frac{a}{2}, -\frac{a}{2}, \frac{-a-1}{2}
\end{matrix} \; \right| \; z \right)`
`(-z)^{a/2} z^{-a/2} I_a(2 \sqrt z) = G^{1,0}_{0,2} \left( \left.
\begin{matrix} - \\ \frac{a}{2}, -\frac{a}{2}
\end{matrix} \; \right| \; -z \right)`
`2 K_a(2 \sqrt z) = G^{2,0}_{0,2} \left( \left.
\begin{matrix} - \\ \frac{a}{2}, -\frac{a}{2}
\end{matrix} \; \right| \; z \right)`
As the example with the Bessel *I* function shows, a branch
factor is required for some arguments when inverting the square root.
>>> meijerg([[],[]], [[a/2],[-a/2]], (z/2)**2)
0.5059425789597154858527264
>>> besselj(a,z)
0.5059425789597154858527264
>>> meijerg([[],[(-a-1)/2]], [[a/2,-a/2],[(-a-1)/2]], (z/2)**2)
0.1853868950066556941442559
>>> bessely(a, z)
0.1853868950066556941442559
>>> meijerg([[],[]], [[a/2],[-a/2]], -(z/2)**2)
(0.8685913322427653875717476 + 2.096964974460199200551738j)
>>> (-z)**(a/2) / z**(a/2) * besseli(a, z)
(0.8685913322427653875717476 + 2.096964974460199200551738j)
>>> 0.5*meijerg([[],[]], [[a/2,-a/2],[]], (z/2)**2)
0.09334163695597828403796071
>>> besselk(a,z)
0.09334163695597828403796071
Error functions:
`\sqrt{\pi} z^{2(a-1)} \mathrm{erfc}(z) = G^{2,0}_{1,2} \left( \left.
\begin{matrix} a \\ a-1, a-\frac{1}{2}
\end{matrix} \; \right| \; z, \frac{1}{2} \right)`
>>> meijerg([[],[a]], [[a-1,a-0.5],[]], z, 0.5)
0.00172839843123091957468712
>>> sqrt(pi) * z**(2*a-2) * erfc(z)
0.00172839843123091957468712
A Meijer G-function of higher degree, (1,1,2,3):
>>> meijerg([[a],[b]], [[a],[b,a-1]], z)
1.55984467443050210115617
>>> sin((b-a)*pi)/pi*(exp(z)-1)*z**(a-1)
1.55984467443050210115617
A Meijer G-function of still higher degree, (4,1,2,4), that can
be expanded as a messy combination of exponential integrals:
>>> meijerg([[a],[2*b-a]], [[b,a,b-0.5,-1-a+2*b],[]], z)
0.3323667133658557271898061
>>> chop(4**(a-b+1)*sqrt(pi)*gamma(2*b-2*a)*z**a*\
... expint(2*b-2*a, -2*sqrt(-z))*expint(2*b-2*a, 2*sqrt(-z)))
0.3323667133658557271898061
In the following case, different series give different values::
>>> chop(meijerg([[1],[0.25]],[[3],[0.5]],-2))
-0.06417628097442437076207337
>>> meijerg([[1],[0.25]],[[3],[0.5]],-2,series=1)
0.1428699426155117511873047
>>> chop(meijerg([[1],[0.25]],[[3],[0.5]],-2,series=2))
-0.06417628097442437076207337
**References**
1. http://en.wikipedia.org/wiki/Meijer_G-function
2. http://mathworld.wolfram.com/MeijerG-Function.html
3. http://functions.wolfram.com/HypergeometricFunctions/MeijerG/
4. http://functions.wolfram.com/HypergeometricFunctions/MeijerG1/
"""
clsin = r"""
Computes the Clausen sine function, defined formally by the series
.. math ::
\mathrm{Cl}_s(z) = \sum_{k=1}^{\infty} \frac{\sin(kz)}{k^s}.
The special case `\mathrm{Cl}_2(z)` (i.e. ``clsin(2,z)``) is the classical
"Clausen function". More generally, the Clausen function is defined for
complex `s` and `z`, even when the series does not converge. The
Clausen function is related to the polylogarithm (:func:`~mpmath.polylog`) as
.. math ::
\mathrm{Cl}_s(z) = \frac{1}{2i}\left(\mathrm{Li}_s\left(e^{iz}\right) -
\mathrm{Li}_s\left(e^{-iz}\right)\right)
= \mathrm{Im}\left[\mathrm{Li}_s(e^{iz})\right] \quad (s, z \in \mathbb{R}),
and this representation can be taken to provide the analytic continuation of the
series. The complementary function :func:`~mpmath.clcos` gives the corresponding
cosine sum.
**Examples**
Evaluation for arbitrarily chosen `s` and `z`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> s, z = 3, 4
>>> clsin(s, z); nsum(lambda k: sin(z*k)/k**s, [1,inf])
-0.6533010136329338746275795
-0.6533010136329338746275795
Using `z + \pi` instead of `z` gives an alternating series::
>>> clsin(s, z+pi)
0.8860032351260589402871624
>>> nsum(lambda k: (-1)**k*sin(z*k)/k**s, [1,inf])
0.8860032351260589402871624
With `s = 1`, the sum can be expressed in closed form
using elementary functions::
>>> z = 1 + sqrt(3)
>>> clsin(1, z)
0.2047709230104579724675985
>>> chop((log(1-exp(-j*z)) - log(1-exp(j*z)))/(2*j))
0.2047709230104579724675985
>>> nsum(lambda k: sin(k*z)/k, [1,inf])
0.2047709230104579724675985
The classical Clausen function `\mathrm{Cl}_2(\theta)` gives the
value of the integral `\int_0^{\theta} -\ln(2\sin(x/2)) dx` for
`0 < \theta < 2 \pi`::
>>> cl2 = lambda t: clsin(2, t)
>>> cl2(3.5)
-0.2465045302347694216534255
>>> -quad(lambda x: ln(2*sin(0.5*x)), [0, 3.5])
-0.2465045302347694216534255
This function is symmetric about `\theta = \pi` with zeros and extreme
points::
>>> cl2(0); cl2(pi/3); chop(cl2(pi)); cl2(5*pi/3); chop(cl2(2*pi))
0.0
1.014941606409653625021203
0.0
-1.014941606409653625021203
0.0
Catalan's constant is a special value::
>>> cl2(pi/2)
0.9159655941772190150546035
>>> +catalan
0.9159655941772190150546035
The Clausen sine function can be expressed in closed form when
`s` is an odd integer (becoming zero when `s` < 0)::
>>> z = 1 + sqrt(2)
>>> clsin(1, z); (pi-z)/2
0.3636895456083490948304773
0.3636895456083490948304773
>>> clsin(3, z); pi**2/6*z - pi*z**2/4 + z**3/12
0.5661751584451144991707161
0.5661751584451144991707161
>>> clsin(-1, z)
0.0
>>> clsin(-3, z)
0.0
It can also be expressed in closed form for even integer `s \le 0`,
providing a finite sum for series such as
`\sin(z) + \sin(2z) + \sin(3z) + \ldots`::
>>> z = 1 + sqrt(2)
>>> clsin(0, z)
0.1903105029507513881275865
>>> cot(z/2)/2
0.1903105029507513881275865
>>> clsin(-2, z)
-0.1089406163841548817581392
>>> -cot(z/2)*csc(z/2)**2/4
-0.1089406163841548817581392
Call with ``pi=True`` to multiply `z` by `\pi` exactly::
>>> clsin(3, 3*pi)
-8.892316224968072424732898e-26
>>> clsin(3, 3, pi=True)
0.0
Evaluation for complex `s`, `z` in a nonconvergent case::
>>> s, z = -1-j, 1+2j
>>> clsin(s, z)
(-0.593079480117379002516034 + 0.9038644233367868273362446j)
>>> extraprec(20)(nsum)(lambda k: sin(k*z)/k**s, [1,inf])
(-0.593079480117379002516034 + 0.9038644233367868273362446j)
"""
clcos = r"""
Computes the Clausen cosine function, defined formally by the series
.. math ::
\mathrm{\widetilde{Cl}}_s(z) = \sum_{k=1}^{\infty} \frac{\cos(kz)}{k^s}.
This function is complementary to the Clausen sine function
:func:`~mpmath.clsin`. In terms of the polylogarithm,
.. math ::
\mathrm{\widetilde{Cl}}_s(z) =
\frac{1}{2}\left(\mathrm{Li}_s\left(e^{iz}\right) +
\mathrm{Li}_s\left(e^{-iz}\right)\right)
= \mathrm{Re}\left[\mathrm{Li}_s(e^{iz})\right] \quad (s, z \in \mathbb{R}).
**Examples**
Evaluation for arbitrarily chosen `s` and `z`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> s, z = 3, 4
>>> clcos(s, z); nsum(lambda k: cos(z*k)/k**s, [1,inf])
-0.6518926267198991308332759
-0.6518926267198991308332759
Using `z + \pi` instead of `z` gives an alternating series::
>>> s, z = 3, 0.5
>>> clcos(s, z+pi)
-0.8155530586502260817855618
>>> nsum(lambda k: (-1)**k*cos(z*k)/k**s, [1,inf])
-0.8155530586502260817855618
With `s = 1`, the sum can be expressed in closed form
using elementary functions::
>>> z = 1 + sqrt(3)
>>> clcos(1, z)
-0.6720334373369714849797918
>>> chop(-0.5*(log(1-exp(j*z))+log(1-exp(-j*z))))
-0.6720334373369714849797918
>>> -log(abs(2*sin(0.5*z))) # Equivalent to above when z is real
-0.6720334373369714849797918
>>> nsum(lambda k: cos(k*z)/k, [1,inf])
-0.6720334373369714849797918
It can also be expressed in closed form when `s` is an even integer.
For example,
>>> clcos(2,z)
-0.7805359025135583118863007
>>> pi**2/6 - pi*z/2 + z**2/4
-0.7805359025135583118863007
The case `s = 0` gives the renormalized sum of
`\cos(z) + \cos(2z) + \cos(3z) + \ldots` (which happens to be the same for
any value of `z`)::
>>> clcos(0, z)
-0.5
>>> nsum(lambda k: cos(k*z), [1,inf])
-0.5
Also the sums
.. math ::
\cos(z) + 2\cos(2z) + 3\cos(3z) + \ldots
and
.. math ::
\cos(z) + 2^n \cos(2z) + 3^n \cos(3z) + \ldots
for higher integer powers `n = -s` can be done in closed form. They are zero
when `n` is positive and even (`s` negative and even)::
>>> clcos(-1, z); 1/(2*cos(z)-2)
-0.2607829375240542480694126
-0.2607829375240542480694126
>>> clcos(-3, z); (2+cos(z))*csc(z/2)**4/8
0.1472635054979944390848006
0.1472635054979944390848006
>>> clcos(-2, z); clcos(-4, z); clcos(-6, z)
0.0
0.0
0.0
With `z = \pi`, the series reduces to that of the Riemann zeta function
(more generally, if `z = p \pi/q`, it is a finite sum over Hurwitz zeta
function values)::
>>> clcos(2.5, 0); zeta(2.5)
1.34148725725091717975677
1.34148725725091717975677
>>> clcos(2.5, pi); -altzeta(2.5)
-0.8671998890121841381913472
-0.8671998890121841381913472
Call with ``pi=True`` to multiply `z` by `\pi` exactly::
>>> clcos(-3, 2*pi)
2.997921055881167659267063e+102
>>> clcos(-3, 2, pi=True)
0.008333333333333333333333333
Evaluation for complex `s`, `z` in a nonconvergent case::
>>> s, z = -1-j, 1+2j
>>> clcos(s, z)
(0.9407430121562251476136807 + 0.715826296033590204557054j)
>>> extraprec(20)(nsum)(lambda k: cos(k*z)/k**s, [1,inf])
(0.9407430121562251476136807 + 0.715826296033590204557054j)
"""
whitm = r"""
Evaluates the Whittaker function `M(k,m,z)`, which gives a solution
to the Whittaker differential equation
.. math ::
\frac{d^2f}{dz^2} + \left(-\frac{1}{4}+\frac{k}{z}+
\frac{(\frac{1}{4}-m^2)}{z^2}\right) f = 0.
A second solution is given by :func:`~mpmath.whitw`.
The Whittaker functions are defined in Abramowitz & Stegun, section 13.1.
They are alternate forms of the confluent hypergeometric functions
`\,_1F_1` and `U`:
.. math ::
M(k,m,z) = e^{-\frac{1}{2}z} z^{\frac{1}{2}+m}
\,_1F_1(\tfrac{1}{2}+m-k, 1+2m, z)
W(k,m,z) = e^{-\frac{1}{2}z} z^{\frac{1}{2}+m}
U(\tfrac{1}{2}+m-k, 1+2m, z).
**Examples**
Evaluation for arbitrary real and complex arguments is supported::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> whitm(1, 1, 1)
0.7302596799460411820509668
>>> whitm(1, 1, -1)
(0.0 - 1.417977827655098025684246j)
>>> whitm(j, j/2, 2+3j)
(3.245477713363581112736478 - 0.822879187542699127327782j)
>>> whitm(2, 3, 100000)
4.303985255686378497193063e+21707
Evaluation at zero::
>>> whitm(1,-1,0); whitm(1,-0.5,0); whitm(1,0,0)
+inf
nan
0.0
We can verify that :func:`~mpmath.whitm` numerically satisfies the
differential equation for arbitrarily chosen values::
>>> k = mpf(0.25)
>>> m = mpf(1.5)
>>> f = lambda z: whitm(k,m,z)
>>> for z in [-1, 2.5, 3, 1+2j]:
... chop(diff(f,z,2) + (-0.25 + k/z + (0.25-m**2)/z**2)*f(z))
...
0.0
0.0
0.0
0.0
An integral involving both :func:`~mpmath.whitm` and :func:`~mpmath.whitw`,
verifying evaluation along the real axis::
>>> quad(lambda x: exp(-x)*whitm(3,2,x)*whitw(1,-2,x), [0,inf])
3.438869842576800225207341
>>> 128/(21*sqrt(pi))
3.438869842576800225207341
"""
whitw = r"""
Evaluates the Whittaker function `W(k,m,z)`, which gives a second
solution to the Whittaker differential equation. (See :func:`~mpmath.whitm`.)
**Examples**
Evaluation for arbitrary real and complex arguments is supported::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> whitw(1, 1, 1)
1.19532063107581155661012
>>> whitw(1, 1, -1)
(-0.9424875979222187313924639 - 0.2607738054097702293308689j)
>>> whitw(j, j/2, 2+3j)
(0.1782899315111033879430369 - 0.01609578360403649340169406j)
>>> whitw(2, 3, 100000)
1.887705114889527446891274e-21705
>>> whitw(-1, -1, 100)
1.905250692824046162462058e-24
Evaluation at zero::
>>> for m in [-1, -0.5, 0, 0.5, 1]:
... whitw(1, m, 0)
...
+inf
nan
0.0
nan
+inf
We can verify that :func:`~mpmath.whitw` numerically satisfies the
differential equation for arbitrarily chosen values::
>>> k = mpf(0.25)
>>> m = mpf(1.5)
>>> f = lambda z: whitw(k,m,z)
>>> for z in [-1, 2.5, 3, 1+2j]:
... chop(diff(f,z,2) + (-0.25 + k/z + (0.25-m**2)/z**2)*f(z))
...
0.0
0.0
0.0
0.0
"""
ber = r"""
Computes the Kelvin function ber, which for real arguments gives the real part
of the Bessel J function of a rotated argument
.. math ::
J_n\left(x e^{3\pi i/4}\right) = \mathrm{ber}_n(x) + i \mathrm{bei}_n(x).
The imaginary part is given by :func:`~mpmath.bei`.
**Plots**
.. literalinclude :: /plots/ber.py
.. image :: /plots/ber.png
**Examples**
Verifying the defining relation::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> n, x = 2, 3.5
>>> ber(n,x)
1.442338852571888752631129
>>> bei(n,x)
-0.948359035324558320217678
>>> besselj(n, x*root(1,8,3))
(1.442338852571888752631129 - 0.948359035324558320217678j)
The ber and bei functions are also defined by analytic continuation
for complex arguments::
>>> ber(1+j, 2+3j)
(4.675445984756614424069563 - 15.84901771719130765656316j)
>>> bei(1+j, 2+3j)
(15.83886679193707699364398 + 4.684053288183046528703611j)
"""
bei = r"""
Computes the Kelvin function bei, which for real arguments gives the
imaginary part of the Bessel J function of a rotated argument.
See :func:`~mpmath.ber`.
"""
ker = r"""
Computes the Kelvin function ker, which for real arguments gives the real part
of the (rescaled) Bessel K function of a rotated argument
.. math ::
e^{-\pi i/2} K_n\left(x e^{3\pi i/4}\right) = \mathrm{ker}_n(x) + i \mathrm{kei}_n(x).
The imaginary part is given by :func:`~mpmath.kei`.
**Plots**
.. literalinclude :: /plots/ker.py
.. image :: /plots/ker.png
**Examples**
Verifying the defining relation::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> n, x = 2, 4.5
>>> ker(n,x)
0.02542895201906369640249801
>>> kei(n,x)
-0.02074960467222823237055351
>>> exp(-n*pi*j/2) * besselk(n, x*root(1,8,1))
(0.02542895201906369640249801 - 0.02074960467222823237055351j)
The ker and kei functions are also defined by analytic continuation
for complex arguments::
>>> ker(1+j, 3+4j)
(1.586084268115490421090533 - 2.939717517906339193598719j)
>>> kei(1+j, 3+4j)
(-2.940403256319453402690132 - 1.585621643835618941044855j)
"""
kei = r"""
Computes the Kelvin function kei, which for real arguments gives the
imaginary part of the (rescaled) Bessel K function of a rotated argument.
See :func:`~mpmath.ker`.
"""
struveh = r"""
Gives the Struve function
.. math ::
\,\mathbf{H}_n(z) =
\sum_{k=0}^\infty \frac{(-1)^k}{\Gamma(k+\frac{3}{2})
\Gamma(k+n+\frac{3}{2})} {\left({\frac{z}{2}}\right)}^{2k+n+1}
which is a solution to the Struve differential equation
.. math ::
z^2 f''(z) + z f'(z) + (z^2-n^2) f(z) = \frac{2 z^{n+1}}{\pi (2n-1)!!}.
**Examples**
Evaluation for arbitrary real and complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> struveh(0, 3.5)
0.3608207733778295024977797
>>> struveh(-1, 10)
-0.255212719726956768034732
>>> struveh(1, -100.5)
0.5819566816797362287502246
>>> struveh(2.5, 10000000000000)
3153915652525200060.308937
>>> struveh(2.5, -10000000000000)
(0.0 - 3153915652525200060.308937j)
>>> struveh(1+j, 1000000+4000000j)
(-3.066421087689197632388731e+1737173 - 1.596619701076529803290973e+1737173j)
A Struve function of half-integer order is elementary; for example:
>>> z = 3
>>> struveh(0.5, 3)
0.9167076867564138178671595
>>> sqrt(2/(pi*z))*(1-cos(z))
0.9167076867564138178671595
Numerically verifying the differential equation::
>>> z = mpf(4.5)
>>> n = 3
>>> f = lambda z: struveh(n,z)
>>> lhs = z**2*diff(f,z,2) + z*diff(f,z) + (z**2-n**2)*f(z)
>>> rhs = 2*z**(n+1)/fac2(2*n-1)/pi
>>> lhs
17.40359302709875496632744
>>> rhs
17.40359302709875496632744
"""
struvel = r"""
Gives the modified Struve function
.. math ::
\,\mathbf{L}_n(z) = -i e^{-n\pi i/2} \mathbf{H}_n(i z)
which solves to the modified Struve differential equation
.. math ::
z^2 f''(z) + z f'(z) - (z^2+n^2) f(z) = \frac{2 z^{n+1}}{\pi (2n-1)!!}.
**Examples**
Evaluation for arbitrary real and complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> struvel(0, 3.5)
7.180846515103737996249972
>>> struvel(-1, 10)
2670.994904980850550721511
>>> struvel(1, -100.5)
1.757089288053346261497686e+42
>>> struvel(2.5, 10000000000000)
4.160893281017115450519948e+4342944819025
>>> struvel(2.5, -10000000000000)
(0.0 - 4.160893281017115450519948e+4342944819025j)
>>> struvel(1+j, 700j)
(-0.1721150049480079451246076 + 0.1240770953126831093464055j)
>>> struvel(1+j, 1000000+4000000j)
(-2.973341637511505389128708e+434290 - 5.164633059729968297147448e+434290j)
Numerically verifying the differential equation::
>>> z = mpf(3.5)
>>> n = 3
>>> f = lambda z: struvel(n,z)
>>> lhs = z**2*diff(f,z,2) + z*diff(f,z) - (z**2+n**2)*f(z)
>>> rhs = 2*z**(n+1)/fac2(2*n-1)/pi
>>> lhs
6.368850306060678353018165
>>> rhs
6.368850306060678353018165
"""
appellf1 = r"""
Gives the Appell F1 hypergeometric function of two variables,
.. math ::
F_1(a,b_1,b_2,c,x,y) = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty}
\frac{(a)_{m+n} (b_1)_m (b_2)_n}{(c)_{m+n}}
\frac{x^m y^n}{m! n!}.
This series is only generally convergent when `|x| < 1` and `|y| < 1`,
although :func:`~mpmath.appellf1` can evaluate an analytic continuation
with respecto to either variable, and sometimes both.
**Examples**
Evaluation is supported for real and complex parameters::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> appellf1(1,0,0.5,1,0.5,0.25)
1.154700538379251529018298
>>> appellf1(1,1+j,0.5,1,0.5,0.5j)
(1.138403860350148085179415 + 1.510544741058517621110615j)
For some integer parameters, the F1 series reduces to a polynomial::
>>> appellf1(2,-4,-3,1,2,5)
-816.0
>>> appellf1(-5,1,2,1,4,5)
-20528.0
The analytic continuation with respect to either `x` or `y`,
and sometimes with respect to both, can be evaluated::
>>> appellf1(2,3,4,5,100,0.5)
(0.0006231042714165329279738662 + 0.0000005769149277148425774499857j)
>>> appellf1('1.1', '0.3', '0.2+2j', '0.4', '0.2', 1.5+3j)
(-0.1782604566893954897128702 + 0.002472407104546216117161499j)
>>> appellf1(1,2,3,4,10,12)
-0.07122993830066776374929313
For certain arguments, F1 reduces to an ordinary hypergeometric function::
>>> appellf1(1,2,3,5,0.5,0.25)
1.547902270302684019335555
>>> 4*hyp2f1(1,2,5,'1/3')/3
1.547902270302684019335555
>>> appellf1(1,2,3,4,0,1.5)
(-1.717202506168937502740238 - 2.792526803190927323077905j)
>>> hyp2f1(1,3,4,1.5)
(-1.717202506168937502740238 - 2.792526803190927323077905j)
The F1 function satisfies a system of partial differential equations::
>>> a,b1,b2,c,x,y = map(mpf, [1,0.5,0.25,1.125,0.25,-0.25])
>>> F = lambda x,y: appellf1(a,b1,b2,c,x,y)
>>> chop(x*(1-x)*diff(F,(x,y),(2,0)) +
... y*(1-x)*diff(F,(x,y),(1,1)) +
... (c-(a+b1+1)*x)*diff(F,(x,y),(1,0)) -
... b1*y*diff(F,(x,y),(0,1)) -
... a*b1*F(x,y))
0.0
>>>
>>> chop(y*(1-y)*diff(F,(x,y),(0,2)) +
... x*(1-y)*diff(F,(x,y),(1,1)) +
... (c-(a+b2+1)*y)*diff(F,(x,y),(0,1)) -
... b2*x*diff(F,(x,y),(1,0)) -
... a*b2*F(x,y))
0.0
The Appell F1 function allows for closed-form evaluation of various
integrals, such as any integral of the form
`\int x^r (x+a)^p (x+b)^q dx`::
>>> def integral(a,b,p,q,r,x1,x2):
... a,b,p,q,r,x1,x2 = map(mpmathify, [a,b,p,q,r,x1,x2])
... f = lambda x: x**r * (x+a)**p * (x+b)**q
... def F(x):
... v = x**(r+1)/(r+1) * (a+x)**p * (b+x)**q
... v *= (1+x/a)**(-p)
... v *= (1+x/b)**(-q)
... v *= appellf1(r+1,-p,-q,2+r,-x/a,-x/b)
... return v
... print("Num. quad: %s" % quad(f, [x1,x2]))
... print("Appell F1: %s" % (F(x2)-F(x1)))
...
>>> integral('1/5','4/3','-2','3','1/2',0,1)
Num. quad: 9.073335358785776206576981
Appell F1: 9.073335358785776206576981
>>> integral('3/2','4/3','-2','3','1/2',0,1)
Num. quad: 1.092829171999626454344678
Appell F1: 1.092829171999626454344678
>>> integral('3/2','4/3','-2','3','1/2',12,25)
Num. quad: 1106.323225040235116498927
Appell F1: 1106.323225040235116498927
Also incomplete elliptic integrals fall into this category [1]::
>>> def E(z, m):
... if (pi/2).ae(z):
... return ellipe(m)
... return 2*round(re(z)/pi)*ellipe(m) + mpf(-1)**round(re(z)/pi)*\
... sin(z)*appellf1(0.5,0.5,-0.5,1.5,sin(z)**2,m*sin(z)**2)
...
>>> z, m = 1, 0.5
>>> E(z,m); quad(lambda t: sqrt(1-m*sin(t)**2), [0,pi/4,3*pi/4,z])
0.9273298836244400669659042
0.9273298836244400669659042
>>> z, m = 3, 2
>>> E(z,m); quad(lambda t: sqrt(1-m*sin(t)**2), [0,pi/4,3*pi/4,z])
(1.057495752337234229715836 + 1.198140234735592207439922j)
(1.057495752337234229715836 + 1.198140234735592207439922j)
**References**
1. [WolframFunctions]_ http://functions.wolfram.com/EllipticIntegrals/EllipticE2/26/01/
2. [SrivastavaKarlsson]_
3. [CabralRosetti]_
4. [Vidunas]_
5. [Slater]_
"""
angerj = r"""
Gives the Anger function
.. math ::
\mathbf{J}_{\nu}(z) = \frac{1}{\pi}
\int_0^{\pi} \cos(\nu t - z \sin t) dt
which is an entire function of both the parameter `\nu` and
the argument `z`. It solves the inhomogeneous Bessel differential
equation
.. math ::
f''(z) + \frac{1}{z}f'(z) + \left(1-\frac{\nu^2}{z^2}\right) f(z)
= \frac{(z-\nu)}{\pi z^2} \sin(\pi \nu).
**Examples**
Evaluation for real and complex parameter and argument::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> angerj(2,3)
0.4860912605858910769078311
>>> angerj(-3+4j, 2+5j)
(-5033.358320403384472395612 + 585.8011892476145118551756j)
>>> angerj(3.25, 1e6j)
(4.630743639715893346570743e+434290 - 1.117960409887505906848456e+434291j)
>>> angerj(-1.5, 1e6)
0.0002795719747073879393087011
The Anger function coincides with the Bessel J-function when `\nu`
is an integer::
>>> angerj(1,3); besselj(1,3)
0.3390589585259364589255146
0.3390589585259364589255146
>>> angerj(1.5,3); besselj(1.5,3)
0.4088969848691080859328847
0.4777182150870917715515015
Verifying the differential equation::
>>> v,z = mpf(2.25), 0.75
>>> f = lambda z: angerj(v,z)
>>> diff(f,z,2) + diff(f,z)/z + (1-(v/z)**2)*f(z)
-0.6002108774380707130367995
>>> (z-v)/(pi*z**2) * sinpi(v)
-0.6002108774380707130367995
Verifying the integral representation::
>>> angerj(v,z)
0.1145380759919333180900501
>>> quad(lambda t: cos(v*t-z*sin(t))/pi, [0,pi])
0.1145380759919333180900501
**References**
1. [DLMF]_ section 11.10: Anger-Weber Functions
"""
webere = r"""
Gives the Weber function
.. math ::
\mathbf{E}_{\nu}(z) = \frac{1}{\pi}
\int_0^{\pi} \sin(\nu t - z \sin t) dt
which is an entire function of both the parameter `\nu` and
the argument `z`. It solves the inhomogeneous Bessel differential
equation
.. math ::
f''(z) + \frac{1}{z}f'(z) + \left(1-\frac{\nu^2}{z^2}\right) f(z)
= -\frac{1}{\pi z^2} (z+\nu+(z-\nu)\cos(\pi \nu)).
**Examples**
Evaluation for real and complex parameter and argument::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> webere(2,3)
-0.1057668973099018425662646
>>> webere(-3+4j, 2+5j)
(-585.8081418209852019290498 - 5033.314488899926921597203j)
>>> webere(3.25, 1e6j)
(-1.117960409887505906848456e+434291 - 4.630743639715893346570743e+434290j)
>>> webere(3.25, 1e6)
-0.00002812518265894315604914453
Up to addition of a rational function of `z`, the Weber function coincides
with the Struve H-function when `\nu` is an integer::
>>> webere(1,3); 2/pi-struveh(1,3)
-0.3834897968188690177372881
-0.3834897968188690177372881
>>> webere(5,3); 26/(35*pi)-struveh(5,3)
0.2009680659308154011878075
0.2009680659308154011878075
Verifying the differential equation::
>>> v,z = mpf(2.25), 0.75
>>> f = lambda z: webere(v,z)
>>> diff(f,z,2) + diff(f,z)/z + (1-(v/z)**2)*f(z)
-1.097441848875479535164627
>>> -(z+v+(z-v)*cospi(v))/(pi*z**2)
-1.097441848875479535164627
Verifying the integral representation::
>>> webere(v,z)
0.1486507351534283744485421
>>> quad(lambda t: sin(v*t-z*sin(t))/pi, [0,pi])
0.1486507351534283744485421
**References**
1. [DLMF]_ section 11.10: Anger-Weber Functions
"""
lommels1 = r"""
Gives the Lommel function `s_{\mu,\nu}` or `s^{(1)}_{\mu,\nu}`
.. math ::
s_{\mu,\nu}(z) = \frac{z^{\mu+1}}{(\mu-\nu+1)(\mu+\nu+1)}
\,_1F_2\left(1; \frac{\mu-\nu+3}{2}, \frac{\mu+\nu+3}{2};
-\frac{z^2}{4} \right)
which solves the inhomogeneous Bessel equation
.. math ::
z^2 f''(z) + z f'(z) + (z^2-\nu^2) f(z) = z^{\mu+1}.
A second solution is given by :func:`~mpmath.lommels2`.
**Plots**
.. literalinclude :: /plots/lommels1.py
.. image :: /plots/lommels1.png
**Examples**
An integral representation::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> u,v,z = 0.25, 0.125, mpf(0.75)
>>> lommels1(u,v,z)
0.4276243877565150372999126
>>> (bessely(v,z)*quad(lambda t: t**u*besselj(v,t), [0,z]) - \
... besselj(v,z)*quad(lambda t: t**u*bessely(v,t), [0,z]))*(pi/2)
0.4276243877565150372999126
A special value::
>>> lommels1(v,v,z)
0.5461221367746048054932553
>>> gamma(v+0.5)*sqrt(pi)*power(2,v-1)*struveh(v,z)
0.5461221367746048054932553
Verifying the differential equation::
>>> f = lambda z: lommels1(u,v,z)
>>> z**2*diff(f,z,2) + z*diff(f,z) + (z**2-v**2)*f(z)
0.6979536443265746992059141
>>> z**(u+1)
0.6979536443265746992059141
**References**
1. [GradshteynRyzhik]_
2. [Weisstein]_ http://mathworld.wolfram.com/LommelFunction.html
"""
lommels2 = r"""
Gives the second Lommel function `S_{\mu,\nu}` or `s^{(2)}_{\mu,\nu}`
.. math ::
S_{\mu,\nu}(z) = s_{\mu,\nu}(z) + 2^{\mu-1}
\Gamma\left(\tfrac{1}{2}(\mu-\nu+1)\right)
\Gamma\left(\tfrac{1}{2}(\mu+\nu+1)\right) \times
\left[\sin(\tfrac{1}{2}(\mu-\nu)\pi) J_{\nu}(z) -
\cos(\tfrac{1}{2}(\mu-\nu)\pi) Y_{\nu}(z)
\right]
which solves the same differential equation as
:func:`~mpmath.lommels1`.
**Plots**
.. literalinclude :: /plots/lommels2.py
.. image :: /plots/lommels2.png
**Examples**
For large `|z|`, `S_{\mu,\nu} \sim z^{\mu-1}`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> lommels2(10,2,30000)
1.968299831601008419949804e+40
>>> power(30000,9)
1.9683e+40
A special value::
>>> u,v,z = 0.5, 0.125, mpf(0.75)
>>> lommels2(v,v,z)
0.9589683199624672099969765
>>> (struveh(v,z)-bessely(v,z))*power(2,v-1)*sqrt(pi)*gamma(v+0.5)
0.9589683199624672099969765
Verifying the differential equation::
>>> f = lambda z: lommels2(u,v,z)
>>> z**2*diff(f,z,2) + z*diff(f,z) + (z**2-v**2)*f(z)
0.6495190528383289850727924
>>> z**(u+1)
0.6495190528383289850727924
**References**
1. [GradshteynRyzhik]_
2. [Weisstein]_ http://mathworld.wolfram.com/LommelFunction.html
"""
appellf2 = r"""
Gives the Appell F2 hypergeometric function of two variables
.. math ::
F_2(a,b_1,b_2,c_1,c_2,x,y) = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty}
\frac{(a)_{m+n} (b_1)_m (b_2)_n}{(c_1)_m (c_2)_n}
\frac{x^m y^n}{m! n!}.
The series is generally absolutely convergent for `|x| + |y| < 1`.
**Examples**
Evaluation for real and complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> appellf2(1,2,3,4,5,0.25,0.125)
1.257417193533135344785602
>>> appellf2(1,-3,-4,2,3,2,3)
-42.8
>>> appellf2(0.5,0.25,-0.25,2,3,0.25j,0.25)
(0.9880539519421899867041719 + 0.01497616165031102661476978j)
>>> chop(appellf2(1,1+j,1-j,3j,-3j,0.25,0.25))
1.201311219287411337955192
>>> appellf2(1,1,1,4,6,0.125,16)
(-0.09455532250274744282125152 - 0.7647282253046207836769297j)
A transformation formula::
>>> a,b1,b2,c1,c2,x,y = map(mpf, [1,2,0.5,0.25,1.625,-0.125,0.125])
>>> appellf2(a,b1,b2,c1,c2,x,y)
0.2299211717841180783309688
>>> (1-x)**(-a)*appellf2(a,c1-b1,b2,c1,c2,x/(x-1),y/(1-x))
0.2299211717841180783309688
A system of partial differential equations satisfied by F2::
>>> a,b1,b2,c1,c2,x,y = map(mpf, [1,0.5,0.25,1.125,1.5,0.0625,-0.0625])
>>> F = lambda x,y: appellf2(a,b1,b2,c1,c2,x,y)
>>> chop(x*(1-x)*diff(F,(x,y),(2,0)) -
... x*y*diff(F,(x,y),(1,1)) +
... (c1-(a+b1+1)*x)*diff(F,(x,y),(1,0)) -
... b1*y*diff(F,(x,y),(0,1)) -
... a*b1*F(x,y))
0.0
>>> chop(y*(1-y)*diff(F,(x,y),(0,2)) -
... x*y*diff(F,(x,y),(1,1)) +
... (c2-(a+b2+1)*y)*diff(F,(x,y),(0,1)) -
... b2*x*diff(F,(x,y),(1,0)) -
... a*b2*F(x,y))
0.0
**References**
See references for :func:`~mpmath.appellf1`.
"""
appellf3 = r"""
Gives the Appell F3 hypergeometric function of two variables
.. math ::
F_3(a_1,a_2,b_1,b_2,c,x,y) = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty}
\frac{(a_1)_m (a_2)_n (b_1)_m (b_2)_n}{(c)_{m+n}}
\frac{x^m y^n}{m! n!}.
The series is generally absolutely convergent for `|x| < 1, |y| < 1`.
**Examples**
Evaluation for various parameters and variables::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> appellf3(1,2,3,4,5,0.5,0.25)
2.221557778107438938158705
>>> appellf3(1,2,3,4,5,6,0); hyp2f1(1,3,5,6)
(-0.5189554589089861284537389 - 0.1454441043328607980769742j)
(-0.5189554589089861284537389 - 0.1454441043328607980769742j)
>>> appellf3(1,-2,-3,1,1,4,6)
-17.4
>>> appellf3(1,2,-3,1,1,4,6)
(17.7876136773677356641825 + 19.54768762233649126154534j)
>>> appellf3(1,2,-3,1,1,6,4)
(85.02054175067929402953645 + 148.4402528821177305173599j)
>>> chop(appellf3(1+j,2,1-j,2,3,0.25,0.25))
1.719992169545200286696007
Many transformations and evaluations for special combinations
of the parameters are possible, e.g.:
>>> a,b,c,x,y = map(mpf, [0.5,0.25,0.125,0.125,-0.125])
>>> appellf3(a,c-a,b,c-b,c,x,y)
1.093432340896087107444363
>>> (1-y)**(a+b-c)*hyp2f1(a,b,c,x+y-x*y)
1.093432340896087107444363
>>> x**2*appellf3(1,1,1,1,3,x,-x)
0.01568646277445385390945083
>>> polylog(2,x**2)
0.01568646277445385390945083
>>> a1,a2,b1,b2,c,x = map(mpf, [0.5,0.25,0.125,0.5,4.25,0.125])
>>> appellf3(a1,a2,b1,b2,c,x,1)
1.03947361709111140096947
>>> gammaprod([c,c-a2-b2],[c-a2,c-b2])*hyp3f2(a1,b1,c-a2-b2,c-a2,c-b2,x)
1.03947361709111140096947
The Appell F3 function satisfies a pair of partial
differential equations::
>>> a1,a2,b1,b2,c,x,y = map(mpf, [0.5,0.25,0.125,0.5,0.625,0.0625,-0.0625])
>>> F = lambda x,y: appellf3(a1,a2,b1,b2,c,x,y)
>>> chop(x*(1-x)*diff(F,(x,y),(2,0)) +
... y*diff(F,(x,y),(1,1)) +
... (c-(a1+b1+1)*x)*diff(F,(x,y),(1,0)) -
... a1*b1*F(x,y))
0.0
>>> chop(y*(1-y)*diff(F,(x,y),(0,2)) +
... x*diff(F,(x,y),(1,1)) +
... (c-(a2+b2+1)*y)*diff(F,(x,y),(0,1)) -
... a2*b2*F(x,y))
0.0
**References**
See references for :func:`~mpmath.appellf1`.
"""
appellf4 = r"""
Gives the Appell F4 hypergeometric function of two variables
.. math ::
F_4(a,b,c_1,c_2,x,y) = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty}
\frac{(a)_{m+n} (b)_{m+n}}{(c_1)_m (c_2)_n}
\frac{x^m y^n}{m! n!}.
The series is generally absolutely convergent for
`\sqrt{|x|} + \sqrt{|y|} < 1`.
**Examples**
Evaluation for various parameters and arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> appellf4(1,1,2,2,0.25,0.125)
1.286182069079718313546608
>>> appellf4(-2,-3,4,5,4,5)
34.8
>>> appellf4(5,4,2,3,0.25j,-0.125j)
(-0.2585967215437846642163352 + 2.436102233553582711818743j)
Reduction to `\,_2F_1` in a special case::
>>> a,b,c,x,y = map(mpf, [0.5,0.25,0.125,0.125,-0.125])
>>> appellf4(a,b,c,a+b-c+1,x*(1-y),y*(1-x))
1.129143488466850868248364
>>> hyp2f1(a,b,c,x)*hyp2f1(a,b,a+b-c+1,y)
1.129143488466850868248364
A system of partial differential equations satisfied by F4::
>>> a,b,c1,c2,x,y = map(mpf, [1,0.5,0.25,1.125,0.0625,-0.0625])
>>> F = lambda x,y: appellf4(a,b,c1,c2,x,y)
>>> chop(x*(1-x)*diff(F,(x,y),(2,0)) -
... y**2*diff(F,(x,y),(0,2)) -
... 2*x*y*diff(F,(x,y),(1,1)) +
... (c1-(a+b+1)*x)*diff(F,(x,y),(1,0)) -
... ((a+b+1)*y)*diff(F,(x,y),(0,1)) -
... a*b*F(x,y))
0.0
>>> chop(y*(1-y)*diff(F,(x,y),(0,2)) -
... x**2*diff(F,(x,y),(2,0)) -
... 2*x*y*diff(F,(x,y),(1,1)) +
... (c2-(a+b+1)*y)*diff(F,(x,y),(0,1)) -
... ((a+b+1)*x)*diff(F,(x,y),(1,0)) -
... a*b*F(x,y))
0.0
**References**
See references for :func:`~mpmath.appellf1`.
"""
zeta = r"""
Computes the Riemann zeta function
.. math ::
\zeta(s) = 1+\frac{1}{2^s}+\frac{1}{3^s}+\frac{1}{4^s}+\ldots
or, with `a \ne 1`, the more general Hurwitz zeta function
.. math ::
\zeta(s,a) = \sum_{k=0}^\infty \frac{1}{(a+k)^s}.
Optionally, ``zeta(s, a, n)`` computes the `n`-th derivative with
respect to `s`,
.. math ::
\zeta^{(n)}(s,a) = (-1)^n \sum_{k=0}^\infty \frac{\log^n(a+k)}{(a+k)^s}.
Although these series only converge for `\Re(s) > 1`, the Riemann and Hurwitz
zeta functions are defined through analytic continuation for arbitrary
complex `s \ne 1` (`s = 1` is a pole).
The implementation uses three algorithms: the Borwein algorithm for
the Riemann zeta function when `s` is close to the real line;
the Riemann-Siegel formula for the Riemann zeta function when `s` is
large imaginary, and Euler-Maclaurin summation in all other cases.
The reflection formula for `\Re(s) < 0` is implemented in some cases.
The algorithm can be chosen with ``method = 'borwein'``,
``method='riemann-siegel'`` or ``method = 'euler-maclaurin'``.
The parameter `a` is usually a rational number `a = p/q`, and may be specified
as such by passing an integer tuple `(p, q)`. Evaluation is supported for
arbitrary complex `a`, but may be slow and/or inaccurate when `\Re(s) < 0` for
nonrational `a` or when computing derivatives.
**Examples**
Some values of the Riemann zeta function::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> zeta(2); pi**2 / 6
1.644934066848226436472415
1.644934066848226436472415
>>> zeta(0)
-0.5
>>> zeta(-1)
-0.08333333333333333333333333
>>> zeta(-2)
0.0
For large positive `s`, `\zeta(s)` rapidly approaches 1::
>>> zeta(50)
1.000000000000000888178421
>>> zeta(100)
1.0
>>> zeta(inf)
1.0
>>> 1-sum((zeta(k)-1)/k for k in range(2,85)); +euler
0.5772156649015328606065121
0.5772156649015328606065121
>>> nsum(lambda k: zeta(k)-1, [2, inf])
1.0
Evaluation is supported for complex `s` and `a`:
>>> zeta(-3+4j)
(-0.03373057338827757067584698 + 0.2774499251557093745297677j)
>>> zeta(2+3j, -1+j)
(389.6841230140842816370741 + 295.2674610150305334025962j)
The Riemann zeta function has so-called nontrivial zeros on
the critical line `s = 1/2 + it`::
>>> findroot(zeta, 0.5+14j); zetazero(1)
(0.5 + 14.13472514173469379045725j)
(0.5 + 14.13472514173469379045725j)
>>> findroot(zeta, 0.5+21j); zetazero(2)
(0.5 + 21.02203963877155499262848j)
(0.5 + 21.02203963877155499262848j)
>>> findroot(zeta, 0.5+25j); zetazero(3)
(0.5 + 25.01085758014568876321379j)
(0.5 + 25.01085758014568876321379j)
>>> chop(zeta(zetazero(10)))
0.0
Evaluation on and near the critical line is supported for large
heights `t` by means of the Riemann-Siegel formula (currently
for `a = 1`, `n \le 4`)::
>>> zeta(0.5+100000j)
(1.073032014857753132114076 + 5.780848544363503984261041j)
>>> zeta(0.75+1000000j)
(0.9535316058375145020351559 + 0.9525945894834273060175651j)
>>> zeta(0.5+10000000j)
(11.45804061057709254500227 - 8.643437226836021723818215j)
>>> zeta(0.5+100000000j, derivative=1)
(51.12433106710194942681869 + 43.87221167872304520599418j)
>>> zeta(0.5+100000000j, derivative=2)
(-444.2760822795430400549229 - 896.3789978119185981665403j)
>>> zeta(0.5+100000000j, derivative=3)
(3230.72682687670422215339 + 14374.36950073615897616781j)
>>> zeta(0.5+100000000j, derivative=4)
(-11967.35573095046402130602 - 218945.7817789262839266148j)
>>> zeta(1+10000000j) # off the line
(2.859846483332530337008882 + 0.491808047480981808903986j)
>>> zeta(1+10000000j, derivative=1)
(-4.333835494679647915673205 - 0.08405337962602933636096103j)
>>> zeta(1+10000000j, derivative=4)
(453.2764822702057701894278 - 581.963625832768189140995j)
For investigation of the zeta function zeros, the Riemann-Siegel
Z-function is often more convenient than working with the Riemann
zeta function directly (see :func:`~mpmath.siegelz`).
Some values of the Hurwitz zeta function::
>>> zeta(2, 3); -5./4 + pi**2/6
0.3949340668482264364724152
0.3949340668482264364724152
>>> zeta(2, (3,4)); pi**2 - 8*catalan
2.541879647671606498397663
2.541879647671606498397663
For positive integer values of `s`, the Hurwitz zeta function is
equivalent to a polygamma function (except for a normalizing factor)::
>>> zeta(4, (1,5)); psi(3, '1/5')/6
625.5408324774542966919938
625.5408324774542966919938
Evaluation of derivatives::
>>> zeta(0, 3+4j, 1); loggamma(3+4j) - ln(2*pi)/2
(-2.675565317808456852310934 + 4.742664438034657928194889j)
(-2.675565317808456852310934 + 4.742664438034657928194889j)
>>> zeta(2, 1, 20)
2432902008176640000.000242
>>> zeta(3+4j, 5.5+2j, 4)
(-0.140075548947797130681075 - 0.3109263360275413251313634j)
>>> zeta(0.5+100000j, 1, 4)
(-10407.16081931495861539236 + 13777.78669862804508537384j)
>>> zeta(-100+0.5j, (1,3), derivative=4)
(4.007180821099823942702249e+79 + 4.916117957092593868321778e+78j)
Generating a Taylor series at `s = 2` using derivatives::
>>> for k in range(11): print("%s * (s-2)^%i" % (zeta(2,1,k)/fac(k), k))
...
1.644934066848226436472415 * (s-2)^0
-0.9375482543158437537025741 * (s-2)^1
0.9946401171494505117104293 * (s-2)^2
-1.000024300473840810940657 * (s-2)^3
1.000061933072352565457512 * (s-2)^4
-1.000006869443931806408941 * (s-2)^5
1.000000173233769531820592 * (s-2)^6
-0.9999999569989868493432399 * (s-2)^7
0.9999999937218844508684206 * (s-2)^8
-0.9999999996355013916608284 * (s-2)^9
1.000000000004610645020747 * (s-2)^10
Evaluation at zero and for negative integer `s`::
>>> zeta(0, 10)
-9.5
>>> zeta(-2, (2,3)); mpf(1)/81
0.01234567901234567901234568
0.01234567901234567901234568
>>> zeta(-3+4j, (5,4))
(0.2899236037682695182085988 + 0.06561206166091757973112783j)
>>> zeta(-3.25, 1/pi)
-0.0005117269627574430494396877
>>> zeta(-3.5, pi, 1)
11.156360390440003294709
>>> zeta(-100.5, (8,3))
-4.68162300487989766727122e+77
>>> zeta(-10.5, (-8,3))
(-0.01521913704446246609237979 + 29907.72510874248161608216j)
>>> zeta(-1000.5, (-8,3))
(1.031911949062334538202567e+1770 + 1.519555750556794218804724e+426j)
>>> zeta(-1+j, 3+4j)
(-16.32988355630802510888631 - 22.17706465801374033261383j)
>>> zeta(-1+j, 3+4j, 2)
(32.48985276392056641594055 - 51.11604466157397267043655j)
>>> diff(lambda s: zeta(s, 3+4j), -1+j, 2)
(32.48985276392056641594055 - 51.11604466157397267043655j)
**References**
1. http://mathworld.wolfram.com/RiemannZetaFunction.html
2. http://mathworld.wolfram.com/HurwitzZetaFunction.html
3. http://www.cecm.sfu.ca/personal/pborwein/PAPERS/P155.pdf
"""
dirichlet = r"""
Evaluates the Dirichlet L-function
.. math ::
L(s,\chi) = \sum_{k=1}^\infty \frac{\chi(k)}{k^s}.
where `\chi` is a periodic sequence of length `q` which should be supplied
in the form of a list `[\chi(0), \chi(1), \ldots, \chi(q-1)]`.
Strictly, `\chi` should be a Dirichlet character, but any periodic
sequence will work.
For example, ``dirichlet(s, [1])`` gives the ordinary
Riemann zeta function and ``dirichlet(s, [-1,1])`` gives
the alternating zeta function (Dirichlet eta function).
Also the derivative with respect to `s` (currently only a first
derivative) can be evaluated.
**Examples**
The ordinary Riemann zeta function::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> dirichlet(3, [1]); zeta(3)
1.202056903159594285399738
1.202056903159594285399738
>>> dirichlet(1, [1])
+inf
The alternating zeta function::
>>> dirichlet(1, [-1,1]); ln(2)
0.6931471805599453094172321
0.6931471805599453094172321
The following defines the Dirichlet beta function
`\beta(s) = \sum_{k=0}^\infty \frac{(-1)^k}{(2k+1)^s}` and verifies
several values of this function::
>>> B = lambda s, d=0: dirichlet(s, [0, 1, 0, -1], d)
>>> B(0); 1./2
0.5
0.5
>>> B(1); pi/4
0.7853981633974483096156609
0.7853981633974483096156609
>>> B(2); +catalan
0.9159655941772190150546035
0.9159655941772190150546035
>>> B(2,1); diff(B, 2)
0.08158073611659279510291217
0.08158073611659279510291217
>>> B(-1,1); 2*catalan/pi
0.5831218080616375602767689
0.5831218080616375602767689
>>> B(0,1); log(gamma(0.25)**2/(2*pi*sqrt(2)))
0.3915943927068367764719453
0.3915943927068367764719454
>>> B(1,1); 0.25*pi*(euler+2*ln2+3*ln(pi)-4*ln(gamma(0.25)))
0.1929013167969124293631898
0.1929013167969124293631898
A custom L-series of period 3::
>>> dirichlet(2, [2,0,1])
0.7059715047839078092146831
>>> 2*nsum(lambda k: (3*k)**-2, [1,inf]) + \
... nsum(lambda k: (3*k+2)**-2, [0,inf])
0.7059715047839078092146831
"""
coulombf = r"""
Calculates the regular Coulomb wave function
.. math ::
F_l(\eta,z) = C_l(\eta) z^{l+1} e^{-iz} \,_1F_1(l+1-i\eta, 2l+2, 2iz)
where the normalization constant `C_l(\eta)` is as calculated by
:func:`~mpmath.coulombc`. This function solves the differential equation
.. math ::
f''(z) + \left(1-\frac{2\eta}{z}-\frac{l(l+1)}{z^2}\right) f(z) = 0.
A second linearly independent solution is given by the irregular
Coulomb wave function `G_l(\eta,z)` (see :func:`~mpmath.coulombg`)
and thus the general solution is
`f(z) = C_1 F_l(\eta,z) + C_2 G_l(\eta,z)` for arbitrary
constants `C_1`, `C_2`.
Physically, the Coulomb wave functions give the radial solution
to the Schrodinger equation for a point particle in a `1/z` potential; `z` is
then the radius and `l`, `\eta` are quantum numbers.
The Coulomb wave functions with real parameters are defined
in Abramowitz & Stegun, section 14. However, all parameters are permitted
to be complex in this implementation (see references).
**Plots**
.. literalinclude :: /plots/coulombf.py
.. image :: /plots/coulombf.png
.. literalinclude :: /plots/coulombf_c.py
.. image :: /plots/coulombf_c.png
**Examples**
Evaluation is supported for arbitrary magnitudes of `z`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> coulombf(2, 1.5, 3.5)
0.4080998961088761187426445
>>> coulombf(-2, 1.5, 3.5)
0.7103040849492536747533465
>>> coulombf(2, 1.5, '1e-10')
4.143324917492256448770769e-33
>>> coulombf(2, 1.5, 1000)
0.4482623140325567050716179
>>> coulombf(2, 1.5, 10**10)
-0.066804196437694360046619
Verifying the differential equation::
>>> l, eta, z = 2, 3, mpf(2.75)
>>> A, B = 1, 2
>>> f = lambda z: A*coulombf(l,eta,z) + B*coulombg(l,eta,z)
>>> chop(diff(f,z,2) + (1-2*eta/z - l*(l+1)/z**2)*f(z))
0.0
A Wronskian relation satisfied by the Coulomb wave functions::
>>> l = 2
>>> eta = 1.5
>>> F = lambda z: coulombf(l,eta,z)
>>> G = lambda z: coulombg(l,eta,z)
>>> for z in [3.5, -1, 2+3j]:
... chop(diff(F,z)*G(z) - F(z)*diff(G,z))
...
1.0
1.0
1.0
Another Wronskian relation::
>>> F = coulombf
>>> G = coulombg
>>> for z in [3.5, -1, 2+3j]:
... chop(F(l-1,eta,z)*G(l,eta,z)-F(l,eta,z)*G(l-1,eta,z) - l/sqrt(l**2+eta**2))
...
0.0
0.0
0.0
An integral identity connecting the regular and irregular wave functions::
>>> l, eta, z = 4+j, 2-j, 5+2j
>>> coulombf(l,eta,z) + j*coulombg(l,eta,z)
(0.7997977752284033239714479 + 0.9294486669502295512503127j)
>>> g = lambda t: exp(-t)*t**(l-j*eta)*(t+2*j*z)**(l+j*eta)
>>> j*exp(-j*z)*z**(-l)/fac(2*l+1)/coulombc(l,eta)*quad(g, [0,inf])
(0.7997977752284033239714479 + 0.9294486669502295512503127j)
Some test case with complex parameters, taken from Michel [2]::
>>> mp.dps = 15
>>> coulombf(1+0.1j, 50+50j, 100.156)
(-1.02107292320897e+15 - 2.83675545731519e+15j)
>>> coulombg(1+0.1j, 50+50j, 100.156)
(2.83675545731519e+15 - 1.02107292320897e+15j)
>>> coulombf(1e-5j, 10+1e-5j, 0.1+1e-6j)
(4.30566371247811e-14 - 9.03347835361657e-19j)
>>> coulombg(1e-5j, 10+1e-5j, 0.1+1e-6j)
(778709182061.134 + 18418936.2660553j)
The following reproduces a table in Abramowitz & Stegun, at twice
the precision::
>>> mp.dps = 10
>>> eta = 2; z = 5
>>> for l in [5, 4, 3, 2, 1, 0]:
... print("%s %s %s" % (l, coulombf(l,eta,z),
... diff(lambda z: coulombf(l,eta,z), z)))
...
5 0.09079533488 0.1042553261
4 0.2148205331 0.2029591779
3 0.4313159311 0.320534053
2 0.7212774133 0.3952408216
1 0.9935056752 0.3708676452
0 1.143337392 0.2937960375
**References**
1. I.J. Thompson & A.R. Barnett, "Coulomb and Bessel Functions of Complex
Arguments and Order", J. Comp. Phys., vol 64, no. 2, June 1986.
2. N. Michel, "Precise Coulomb wave functions for a wide range of
complex `l`, `\eta` and `z`", http://arxiv.org/abs/physics/0702051v1
"""
coulombg = r"""
Calculates the irregular Coulomb wave function
.. math ::
G_l(\eta,z) = \frac{F_l(\eta,z) \cos(\chi) - F_{-l-1}(\eta,z)}{\sin(\chi)}
where `\chi = \sigma_l - \sigma_{-l-1} - (l+1/2) \pi`
and `\sigma_l(\eta) = (\ln \Gamma(1+l+i\eta)-\ln \Gamma(1+l-i\eta))/(2i)`.
See :func:`~mpmath.coulombf` for additional information.
**Plots**
.. literalinclude :: /plots/coulombg.py
.. image :: /plots/coulombg.png
.. literalinclude :: /plots/coulombg_c.py
.. image :: /plots/coulombg_c.png
**Examples**
Evaluation is supported for arbitrary magnitudes of `z`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> coulombg(-2, 1.5, 3.5)
1.380011900612186346255524
>>> coulombg(2, 1.5, 3.5)
1.919153700722748795245926
>>> coulombg(-2, 1.5, '1e-10')
201126715824.7329115106793
>>> coulombg(-2, 1.5, 1000)
0.1802071520691149410425512
>>> coulombg(-2, 1.5, 10**10)
0.652103020061678070929794
The following reproduces a table in Abramowitz & Stegun,
at twice the precision::
>>> mp.dps = 10
>>> eta = 2; z = 5
>>> for l in [1, 2, 3, 4, 5]:
... print("%s %s %s" % (l, coulombg(l,eta,z),
... -diff(lambda z: coulombg(l,eta,z), z)))
...
1 1.08148276 0.6028279961
2 1.496877075 0.5661803178
3 2.048694714 0.7959909551
4 3.09408669 1.731802374
5 5.629840456 4.549343289
Evaluation close to the singularity at `z = 0`::
>>> mp.dps = 15
>>> coulombg(0,10,1)
3088184933.67358
>>> coulombg(0,10,'1e-10')
5554866000719.8
>>> coulombg(0,10,'1e-100')
5554866221524.1
Evaluation with a half-integer value for `l`::
>>> coulombg(1.5, 1, 10)
0.852320038297334
"""
coulombc = r"""
Gives the normalizing Gamow constant for Coulomb wave functions,
.. math ::
C_l(\eta) = 2^l \exp\left(-\pi \eta/2 + [\ln \Gamma(1+l+i\eta) +
\ln \Gamma(1+l-i\eta)]/2 - \ln \Gamma(2l+2)\right),
where the log gamma function with continuous imaginary part
away from the negative half axis (see :func:`~mpmath.loggamma`) is implied.
This function is used internally for the calculation of
Coulomb wave functions, and automatically cached to make multiple
evaluations with fixed `l`, `\eta` fast.
"""
ellipfun = r"""
Computes any of the Jacobi elliptic functions, defined
in terms of Jacobi theta functions as
.. math ::
\mathrm{sn}(u,m) = \frac{\vartheta_3(0,q)}{\vartheta_2(0,q)}
\frac{\vartheta_1(t,q)}{\vartheta_4(t,q)}
\mathrm{cn}(u,m) = \frac{\vartheta_4(0,q)}{\vartheta_2(0,q)}
\frac{\vartheta_2(t,q)}{\vartheta_4(t,q)}
\mathrm{dn}(u,m) = \frac{\vartheta_4(0,q)}{\vartheta_3(0,q)}
\frac{\vartheta_3(t,q)}{\vartheta_4(t,q)},
or more generally computes a ratio of two such functions. Here
`t = u/\vartheta_3(0,q)^2`, and `q = q(m)` denotes the nome (see
:func:`~mpmath.nome`). Optionally, you can specify the nome directly
instead of `m` by passing ``q=<value>``, or you can directly
specify the elliptic parameter `k` with ``k=<value>``.
The first argument should be a two-character string specifying the
function using any combination of ``'s'``, ``'c'``, ``'d'``, ``'n'``. These
letters respectively denote the basic functions
`\mathrm{sn}(u,m)`, `\mathrm{cn}(u,m)`, `\mathrm{dn}(u,m)`, and `1`.
The identifier specifies the ratio of two such functions.
For example, ``'ns'`` identifies the function
.. math ::
\mathrm{ns}(u,m) = \frac{1}{\mathrm{sn}(u,m)}
and ``'cd'`` identifies the function
.. math ::
\mathrm{cd}(u,m) = \frac{\mathrm{cn}(u,m)}{\mathrm{dn}(u,m)}.
If called with only the first argument, a function object
evaluating the chosen function for given arguments is returned.
**Examples**
Basic evaluation::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> ellipfun('cd', 3.5, 0.5)
-0.9891101840595543931308394
>>> ellipfun('cd', 3.5, q=0.25)
0.07111979240214668158441418
The sn-function is doubly periodic in the complex plane with periods
`4 K(m)` and `2 i K(1-m)` (see :func:`~mpmath.ellipk`)::
>>> sn = ellipfun('sn')
>>> sn(2, 0.25)
0.9628981775982774425751399
>>> sn(2+4*ellipk(0.25), 0.25)
0.9628981775982774425751399
>>> chop(sn(2+2*j*ellipk(1-0.25), 0.25))
0.9628981775982774425751399
The cn-function is doubly periodic with periods `4 K(m)` and `4 i K(1-m)`::
>>> cn = ellipfun('cn')
>>> cn(2, 0.25)
-0.2698649654510865792581416
>>> cn(2+4*ellipk(0.25), 0.25)
-0.2698649654510865792581416
>>> chop(cn(2+4*j*ellipk(1-0.25), 0.25))
-0.2698649654510865792581416
The dn-function is doubly periodic with periods `2 K(m)` and `4 i K(1-m)`::
>>> dn = ellipfun('dn')
>>> dn(2, 0.25)
0.8764740583123262286931578
>>> dn(2+2*ellipk(0.25), 0.25)
0.8764740583123262286931578
>>> chop(dn(2+4*j*ellipk(1-0.25), 0.25))
0.8764740583123262286931578
"""
jtheta = r"""
Computes the Jacobi theta function `\vartheta_n(z, q)`, where
`n = 1, 2, 3, 4`, defined by the infinite series:
.. math ::
\vartheta_1(z,q) = 2 q^{1/4} \sum_{n=0}^{\infty}
(-1)^n q^{n^2+n\,} \sin((2n+1)z)
\vartheta_2(z,q) = 2 q^{1/4} \sum_{n=0}^{\infty}
q^{n^{2\,} + n} \cos((2n+1)z)
\vartheta_3(z,q) = 1 + 2 \sum_{n=1}^{\infty}
q^{n^2\,} \cos(2 n z)
\vartheta_4(z,q) = 1 + 2 \sum_{n=1}^{\infty}
(-q)^{n^2\,} \cos(2 n z)
The theta functions are functions of two variables:
* `z` is the *argument*, an arbitrary real or complex number
* `q` is the *nome*, which must be a real or complex number
in the unit disk (i.e. `|q| < 1`). For `|q| \ll 1`, the
series converge very quickly, so the Jacobi theta functions
can efficiently be evaluated to high precision.
The compact notations `\vartheta_n(q) = \vartheta_n(0,q)`
and `\vartheta_n = \vartheta_n(0,q)` are also frequently
encountered. Finally, Jacobi theta functions are frequently
considered as functions of the half-period ratio `\tau`
and then usually denoted by `\vartheta_n(z|\tau)`.
Optionally, ``jtheta(n, z, q, derivative=d)`` with `d > 0` computes
a `d`-th derivative with respect to `z`.
**Examples and basic properties**
Considered as functions of `z`, the Jacobi theta functions may be
viewed as generalizations of the ordinary trigonometric functions
cos and sin. They are periodic functions::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> jtheta(1, 0.25, '0.2')
0.2945120798627300045053104
>>> jtheta(1, 0.25 + 2*pi, '0.2')
0.2945120798627300045053104
Indeed, the series defining the theta functions are essentially
trigonometric Fourier series. The coefficients can be retrieved
using :func:`~mpmath.fourier`::
>>> mp.dps = 10
>>> nprint(fourier(lambda x: jtheta(2, x, 0.5), [-pi, pi], 4))
([0.0, 1.68179, 0.0, 0.420448, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0])
The Jacobi theta functions are also so-called quasiperiodic
functions of `z` and `\tau`, meaning that for fixed `\tau`,
`\vartheta_n(z, q)` and `\vartheta_n(z+\pi \tau, q)` are the same
except for an exponential factor::
>>> mp.dps = 25
>>> tau = 3*j/10
>>> q = exp(pi*j*tau)
>>> z = 10
>>> jtheta(4, z+tau*pi, q)
(-0.682420280786034687520568 + 1.526683999721399103332021j)
>>> -exp(-2*j*z)/q * jtheta(4, z, q)
(-0.682420280786034687520568 + 1.526683999721399103332021j)
The Jacobi theta functions satisfy a huge number of other
functional equations, such as the following identity (valid for
any `q`)::
>>> q = mpf(3)/10
>>> jtheta(3,0,q)**4
6.823744089352763305137427
>>> jtheta(2,0,q)**4 + jtheta(4,0,q)**4
6.823744089352763305137427
Extensive listings of identities satisfied by the Jacobi theta
functions can be found in standard reference works.
The Jacobi theta functions are related to the gamma function
for special arguments::
>>> jtheta(3, 0, exp(-pi))
1.086434811213308014575316
>>> pi**(1/4.) / gamma(3/4.)
1.086434811213308014575316
:func:`~mpmath.jtheta` supports arbitrary precision evaluation and complex
arguments::
>>> mp.dps = 50
>>> jtheta(4, sqrt(2), 0.5)
2.0549510717571539127004115835148878097035750653737
>>> mp.dps = 25
>>> jtheta(4, 1+2j, (1+j)/5)
(7.180331760146805926356634 - 1.634292858119162417301683j)
Evaluation of derivatives::
>>> mp.dps = 25
>>> jtheta(1, 7, 0.25, 1); diff(lambda z: jtheta(1, z, 0.25), 7)
1.209857192844475388637236
1.209857192844475388637236
>>> jtheta(1, 7, 0.25, 2); diff(lambda z: jtheta(1, z, 0.25), 7, 2)
-0.2598718791650217206533052
-0.2598718791650217206533052
>>> jtheta(2, 7, 0.25, 1); diff(lambda z: jtheta(2, z, 0.25), 7)
-1.150231437070259644461474
-1.150231437070259644461474
>>> jtheta(2, 7, 0.25, 2); diff(lambda z: jtheta(2, z, 0.25), 7, 2)
-0.6226636990043777445898114
-0.6226636990043777445898114
>>> jtheta(3, 7, 0.25, 1); diff(lambda z: jtheta(3, z, 0.25), 7)
-0.9990312046096634316587882
-0.9990312046096634316587882
>>> jtheta(3, 7, 0.25, 2); diff(lambda z: jtheta(3, z, 0.25), 7, 2)
-0.1530388693066334936151174
-0.1530388693066334936151174
>>> jtheta(4, 7, 0.25, 1); diff(lambda z: jtheta(4, z, 0.25), 7)
0.9820995967262793943571139
0.9820995967262793943571139
>>> jtheta(4, 7, 0.25, 2); diff(lambda z: jtheta(4, z, 0.25), 7, 2)
0.3936902850291437081667755
0.3936902850291437081667755
**Possible issues**
For `|q| \ge 1` or `\Im(\tau) \le 0`, :func:`~mpmath.jtheta` raises
``ValueError``. This exception is also raised for `|q|` extremely
close to 1 (or equivalently `\tau` very close to 0), since the
series would converge too slowly::
>>> jtheta(1, 10, 0.99999999 * exp(0.5*j))
Traceback (most recent call last):
...
ValueError: abs(q) > THETA_Q_LIM = 1.000000
"""
eulernum = r"""
Gives the `n`-th Euler number, defined as the `n`-th derivative of
`\mathrm{sech}(t) = 1/\cosh(t)` evaluated at `t = 0`. Equivalently, the
Euler numbers give the coefficients of the Taylor series
.. math ::
\mathrm{sech}(t) = \sum_{n=0}^{\infty} \frac{E_n}{n!} t^n.
The Euler numbers are closely related to Bernoulli numbers
and Bernoulli polynomials. They can also be evaluated in terms of
Euler polynomials (see :func:`~mpmath.eulerpoly`) as `E_n = 2^n E_n(1/2)`.
**Examples**
Computing the first few Euler numbers and verifying that they
agree with the Taylor series::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> [eulernum(n) for n in range(11)]
[1.0, 0.0, -1.0, 0.0, 5.0, 0.0, -61.0, 0.0, 1385.0, 0.0, -50521.0]
>>> chop(diffs(sech, 0, 10))
[1.0, 0.0, -1.0, 0.0, 5.0, 0.0, -61.0, 0.0, 1385.0, 0.0, -50521.0]
Euler numbers grow very rapidly. :func:`~mpmath.eulernum` efficiently
computes numerical approximations for large indices::
>>> eulernum(50)
-6.053285248188621896314384e+54
>>> eulernum(1000)
3.887561841253070615257336e+2371
>>> eulernum(10**20)
4.346791453661149089338186e+1936958564106659551331
Comparing with an asymptotic formula for the Euler numbers::
>>> n = 10**5
>>> (-1)**(n//2) * 8 * sqrt(n/(2*pi)) * (2*n/(pi*e))**n
3.69919063017432362805663e+436961
>>> eulernum(n)
3.699193712834466537941283e+436961
Pass ``exact=True`` to obtain exact values of Euler numbers as integers::
>>> print(eulernum(50, exact=True))
-6053285248188621896314383785111649088103498225146815121
>>> print(eulernum(200, exact=True) % 10**10)
1925859625
>>> eulernum(1001, exact=True)
0
"""
eulerpoly = r"""
Evaluates the Euler polynomial `E_n(z)`, defined by the generating function
representation
.. math ::
\frac{2e^{zt}}{e^t+1} = \sum_{n=0}^\infty E_n(z) \frac{t^n}{n!}.
The Euler polynomials may also be represented in terms of
Bernoulli polynomials (see :func:`~mpmath.bernpoly`) using various formulas, for
example
.. math ::
E_n(z) = \frac{2}{n+1} \left(
B_n(z)-2^{n+1}B_n\left(\frac{z}{2}\right)
\right).
Special values include the Euler numbers `E_n = 2^n E_n(1/2)` (see
:func:`~mpmath.eulernum`).
**Examples**
Computing the coefficients of the first few Euler polynomials::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> for n in range(6):
... chop(taylor(lambda z: eulerpoly(n,z), 0, n))
...
[1.0]
[-0.5, 1.0]
[0.0, -1.0, 1.0]
[0.25, 0.0, -1.5, 1.0]
[0.0, 1.0, 0.0, -2.0, 1.0]
[-0.5, 0.0, 2.5, 0.0, -2.5, 1.0]
Evaluation for arbitrary `z`::
>>> eulerpoly(2,3)
6.0
>>> eulerpoly(5,4)
423.5
>>> eulerpoly(35, 11111111112)
3.994957561486776072734601e+351
>>> eulerpoly(4, 10+20j)
(-47990.0 - 235980.0j)
>>> eulerpoly(2, '-3.5e-5')
0.000035001225
>>> eulerpoly(3, 0.5)
0.0
>>> eulerpoly(55, -10**80)
-1.0e+4400
>>> eulerpoly(5, -inf)
-inf
>>> eulerpoly(6, -inf)
+inf
Computing Euler numbers::
>>> 2**26 * eulerpoly(26,0.5)
-4087072509293123892361.0
>>> eulernum(26)
-4087072509293123892361.0
Evaluation is accurate for large `n` and small `z`::
>>> eulerpoly(100, 0.5)
2.29047999988194114177943e+108
>>> eulerpoly(1000, 10.5)
3.628120031122876847764566e+2070
>>> eulerpoly(10000, 10.5)
1.149364285543783412210773e+30688
"""
spherharm = r"""
Evaluates the spherical harmonic `Y_l^m(\theta,\phi)`,
.. math ::
Y_l^m(\theta,\phi) = \sqrt{\frac{2l+1}{4\pi}\frac{(l-m)!}{(l+m)!}}
P_l^m(\cos \theta) e^{i m \phi}
where `P_l^m` is an associated Legendre function (see :func:`~mpmath.legenp`).
Here `\theta \in [0, \pi]` denotes the polar coordinate (ranging
from the north pole to the south pole) and `\phi \in [0, 2 \pi]` denotes the
azimuthal coordinate on a sphere. Care should be used since many different
conventions for spherical coordinate variables are used.
Usually spherical harmonics are considered for `l \in \mathbb{N}`,
`m \in \mathbb{Z}`, `|m| \le l`. More generally, `l,m,\theta,\phi`
are permitted to be complex numbers.
.. note ::
:func:`~mpmath.spherharm` returns a complex number, even the value is
purely real.
**Plots**
.. literalinclude :: /plots/spherharm40.py
`Y_{4,0}`:
.. image :: /plots/spherharm40.png
`Y_{4,1}`:
.. image :: /plots/spherharm41.png
`Y_{4,2}`:
.. image :: /plots/spherharm42.png
`Y_{4,3}`:
.. image :: /plots/spherharm43.png
`Y_{4,4}`:
.. image :: /plots/spherharm44.png
**Examples**
Some low-order spherical harmonics with reference values::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> theta = pi/4
>>> phi = pi/3
>>> spherharm(0,0,theta,phi); 0.5*sqrt(1/pi)*expj(0)
(0.2820947917738781434740397 + 0.0j)
(0.2820947917738781434740397 + 0.0j)
>>> spherharm(1,-1,theta,phi); 0.5*sqrt(3/(2*pi))*expj(-phi)*sin(theta)
(0.1221506279757299803965962 - 0.2115710938304086076055298j)
(0.1221506279757299803965962 - 0.2115710938304086076055298j)
>>> spherharm(1,0,theta,phi); 0.5*sqrt(3/pi)*cos(theta)*expj(0)
(0.3454941494713354792652446 + 0.0j)
(0.3454941494713354792652446 + 0.0j)
>>> spherharm(1,1,theta,phi); -0.5*sqrt(3/(2*pi))*expj(phi)*sin(theta)
(-0.1221506279757299803965962 - 0.2115710938304086076055298j)
(-0.1221506279757299803965962 - 0.2115710938304086076055298j)
With the normalization convention used, the spherical harmonics are orthonormal
on the unit sphere::
>>> sphere = [0,pi], [0,2*pi]
>>> dS = lambda t,p: fp.sin(t) # differential element
>>> Y1 = lambda t,p: fp.spherharm(l1,m1,t,p)
>>> Y2 = lambda t,p: fp.conj(fp.spherharm(l2,m2,t,p))
>>> l1 = l2 = 3; m1 = m2 = 2
>>> print(fp.quad(lambda t,p: Y1(t,p)*Y2(t,p)*dS(t,p), *sphere))
(1+0j)
>>> m2 = 1 # m1 != m2
>>> print(fp.chop(fp.quad(lambda t,p: Y1(t,p)*Y2(t,p)*dS(t,p), *sphere)))
0.0
Evaluation is accurate for large orders::
>>> spherharm(1000,750,0.5,0.25)
(3.776445785304252879026585e-102 - 5.82441278771834794493484e-102j)
Evaluation works with complex parameter values::
>>> spherharm(1+j, 2j, 2+3j, -0.5j)
(64.44922331113759992154992 + 1981.693919841408089681743j)
"""
scorergi = r"""
Evaluates the Scorer function
.. math ::
\operatorname{Gi}(z) =
\operatorname{Ai}(z) \int_0^z \operatorname{Bi}(t) dt +
\operatorname{Bi}(z) \int_z^{\infty} \operatorname{Ai}(t) dt
which gives a particular solution to the inhomogeneous Airy
differential equation `f''(z) - z f(z) = 1/\pi`. Another
particular solution is given by the Scorer Hi-function
(:func:`~mpmath.scorerhi`). The two functions are related as
`\operatorname{Gi}(z) + \operatorname{Hi}(z) = \operatorname{Bi}(z)`.
**Plots**
.. literalinclude :: /plots/gi.py
.. image :: /plots/gi.png
.. literalinclude :: /plots/gi_c.py
.. image :: /plots/gi_c.png
**Examples**
Some values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> scorergi(0); 1/(power(3,'7/6')*gamma('2/3'))
0.2049755424820002450503075
0.2049755424820002450503075
>>> diff(scorergi, 0); 1/(power(3,'5/6')*gamma('1/3'))
0.1494294524512754526382746
0.1494294524512754526382746
>>> scorergi(+inf); scorergi(-inf)
0.0
0.0
>>> scorergi(1)
0.2352184398104379375986902
>>> scorergi(-1)
-0.1166722172960152826494198
Evaluation for large arguments::
>>> scorergi(10)
0.03189600510067958798062034
>>> scorergi(100)
0.003183105228162961476590531
>>> scorergi(1000000)
0.0000003183098861837906721743873
>>> 1/(pi*1000000)
0.0000003183098861837906715377675
>>> scorergi(-1000)
-0.08358288400262780392338014
>>> scorergi(-100000)
0.02886866118619660226809581
>>> scorergi(50+10j)
(0.0061214102799778578790984 - 0.001224335676457532180747917j)
>>> scorergi(-50-10j)
(5.236047850352252236372551e+29 - 3.08254224233701381482228e+29j)
>>> scorergi(100000j)
(-8.806659285336231052679025e+6474077 + 8.684731303500835514850962e+6474077j)
Verifying the connection between Gi and Hi::
>>> z = 0.25
>>> scorergi(z) + scorerhi(z)
0.7287469039362150078694543
>>> airybi(z)
0.7287469039362150078694543
Verifying the differential equation::
>>> for z in [-3.4, 0, 2.5, 1+2j]:
... chop(diff(scorergi,z,2) - z*scorergi(z))
...
-0.3183098861837906715377675
-0.3183098861837906715377675
-0.3183098861837906715377675
-0.3183098861837906715377675
Verifying the integral representation::
>>> z = 0.5
>>> scorergi(z)
0.2447210432765581976910539
>>> Ai,Bi = airyai,airybi
>>> Bi(z)*(Ai(inf,-1)-Ai(z,-1)) + Ai(z)*(Bi(z,-1)-Bi(0,-1))
0.2447210432765581976910539
**References**
1. [DLMF]_ section 9.12: Scorer Functions
"""
scorerhi = r"""
Evaluates the second Scorer function
.. math ::
\operatorname{Hi}(z) =
\operatorname{Bi}(z) \int_{-\infty}^z \operatorname{Ai}(t) dt -
\operatorname{Ai}(z) \int_{-\infty}^z \operatorname{Bi}(t) dt
which gives a particular solution to the inhomogeneous Airy
differential equation `f''(z) - z f(z) = 1/\pi`. See also
:func:`~mpmath.scorergi`.
**Plots**
.. literalinclude :: /plots/hi.py
.. image :: /plots/hi.png
.. literalinclude :: /plots/hi_c.py
.. image :: /plots/hi_c.png
**Examples**
Some values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> scorerhi(0); 2/(power(3,'7/6')*gamma('2/3'))
0.4099510849640004901006149
0.4099510849640004901006149
>>> diff(scorerhi,0); 2/(power(3,'5/6')*gamma('1/3'))
0.2988589049025509052765491
0.2988589049025509052765491
>>> scorerhi(+inf); scorerhi(-inf)
+inf
0.0
>>> scorerhi(1)
0.9722051551424333218376886
>>> scorerhi(-1)
0.2206696067929598945381098
Evaluation for large arguments::
>>> scorerhi(10)
455641153.5163291358991077
>>> scorerhi(100)
6.041223996670201399005265e+288
>>> scorerhi(1000000)
7.138269638197858094311122e+289529652
>>> scorerhi(-10)
0.0317685352825022727415011
>>> scorerhi(-100)
0.003183092495767499864680483
>>> scorerhi(100j)
(-6.366197716545672122983857e-9 + 0.003183098861710582761688475j)
>>> scorerhi(50+50j)
(-5.322076267321435669290334e+63 + 1.478450291165243789749427e+65j)
>>> scorerhi(-1000-1000j)
(0.0001591549432510502796565538 - 0.000159154943091895334973109j)
Verifying the differential equation::
>>> for z in [-3.4, 0, 2, 1+2j]:
... chop(diff(scorerhi,z,2) - z*scorerhi(z))
...
0.3183098861837906715377675
0.3183098861837906715377675
0.3183098861837906715377675
0.3183098861837906715377675
Verifying the integral representation::
>>> z = 0.5
>>> scorerhi(z)
0.6095559998265972956089949
>>> Ai,Bi = airyai,airybi
>>> Bi(z)*(Ai(z,-1)-Ai(-inf,-1)) - Ai(z)*(Bi(z,-1)-Bi(-inf,-1))
0.6095559998265972956089949
"""
stirling1 = r"""
Gives the Stirling number of the first kind `s(n,k)`, defined by
.. math ::
x(x-1)(x-2)\cdots(x-n+1) = \sum_{k=0}^n s(n,k) x^k.
The value is computed using an integer recurrence. The implementation
is not optimized for approximating large values quickly.
**Examples**
Comparing with the generating function::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> taylor(lambda x: ff(x, 5), 0, 5)
[0.0, 24.0, -50.0, 35.0, -10.0, 1.0]
>>> [stirling1(5, k) for k in range(6)]
[0.0, 24.0, -50.0, 35.0, -10.0, 1.0]
Recurrence relation::
>>> n, k = 5, 3
>>> stirling1(n+1,k) + n*stirling1(n,k) - stirling1(n,k-1)
0.0
The matrices of Stirling numbers of first and second kind are inverses
of each other::
>>> A = matrix(5, 5); B = matrix(5, 5)
>>> for n in range(5):
... for k in range(5):
... A[n,k] = stirling1(n,k)
... B[n,k] = stirling2(n,k)
...
>>> A * B
[1.0 0.0 0.0 0.0 0.0]
[0.0 1.0 0.0 0.0 0.0]
[0.0 0.0 1.0 0.0 0.0]
[0.0 0.0 0.0 1.0 0.0]
[0.0 0.0 0.0 0.0 1.0]
Pass ``exact=True`` to obtain exact values of Stirling numbers as integers::
>>> stirling1(42, 5)
-2.864498971768501633736628e+50
>>> print stirling1(42, 5, exact=True)
-286449897176850163373662803014001546235808317440000
"""
stirling2 = r"""
Gives the Stirling number of the second kind `S(n,k)`, defined by
.. math ::
x^n = \sum_{k=0}^n S(n,k) x(x-1)(x-2)\cdots(x-k+1)
The value is computed using integer arithmetic to evaluate a power sum.
The implementation is not optimized for approximating large values quickly.
**Examples**
Comparing with the generating function::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> taylor(lambda x: sum(stirling2(5,k) * ff(x,k) for k in range(6)), 0, 5)
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0]
Recurrence relation::
>>> n, k = 5, 3
>>> stirling2(n+1,k) - k*stirling2(n,k) - stirling2(n,k-1)
0.0
Pass ``exact=True`` to obtain exact values of Stirling numbers as integers::
>>> stirling2(52, 10)
2.641822121003543906807485e+45
>>> print stirling2(52, 10, exact=True)
2641822121003543906807485307053638921722527655
"""
|
ammarkhann/FinalSeniorCode
|
lib/python2.7/site-packages/mpmath/function_docs.py
|
Python
|
mit
| 277,901
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author Mark McClain (DreamHost)
import sys
import mock
from neutron.db import migration
from neutron.db.migration import cli
from neutron.tests import base
class TestDbMigration(base.BaseTestCase):
def test_should_run_plugin_in_list(self):
self.assertTrue(migration.should_run(['foo'], ['foo', 'bar']))
self.assertFalse(migration.should_run(['foo'], ['bar']))
def test_should_run_plugin_wildcard(self):
self.assertTrue(migration.should_run(['foo'], ['*']))
class TestCli(base.BaseTestCase):
def setUp(self):
super(TestCli, self).setUp()
self.do_alembic_cmd_p = mock.patch.object(cli, 'do_alembic_command')
self.do_alembic_cmd = self.do_alembic_cmd_p.start()
self.addCleanup(self.do_alembic_cmd_p.stop)
self.addCleanup(cli.CONF.reset)
def _main_test_helper(self, argv, func_name, exp_args=(), exp_kwargs={}):
with mock.patch.object(sys, 'argv', argv):
cli.main()
self.do_alembic_cmd.assert_has_calls(
[mock.call(mock.ANY, func_name, *exp_args, **exp_kwargs)]
)
def test_stamp(self):
self._main_test_helper(
['prog', 'stamp', 'foo'],
'stamp',
('foo',),
{'sql': False}
)
self._main_test_helper(
['prog', 'stamp', 'foo', '--sql'],
'stamp',
('foo',),
{'sql': True}
)
def test_current(self):
self._main_test_helper(['prog', 'current'], 'current')
def test_history(self):
self._main_test_helper(['prog', 'history'], 'history')
def test_check_migration(self):
self._main_test_helper(['prog', 'check_migration'], 'branches')
def test_database_sync_revision(self):
self._main_test_helper(
['prog', 'revision', '--autogenerate', '-m', 'message'],
'revision',
(),
{'message': 'message', 'sql': False, 'autogenerate': True}
)
self._main_test_helper(
['prog', 'revision', '--sql', '-m', 'message'],
'revision',
(),
{'message': 'message', 'sql': True, 'autogenerate': False}
)
def test_upgrade(self):
self._main_test_helper(
['prog', 'upgrade', '--sql', 'head'],
'upgrade',
('head',),
{'sql': True}
)
self._main_test_helper(
['prog', 'upgrade', '--delta', '3'],
'upgrade',
('+3',),
{'sql': False}
)
def test_downgrade(self):
self._main_test_helper(
['prog', 'downgrade', '--sql', 'folsom'],
'downgrade',
('folsom',),
{'sql': True}
)
self._main_test_helper(
['prog', 'downgrade', '--delta', '2'],
'downgrade',
('-2',),
{'sql': False}
)
|
ntt-sic/neutron
|
neutron/tests/unit/test_db_migration.py
|
Python
|
apache-2.0
| 3,643
|
import asyncio
import functools
import os
import shutil
import tempfile
from unittest import mock
from unittest.mock import MagicMock
import pytest
from aiohttp import web
from aiohttp.web_urldispatcher import SystemRoute
@pytest.fixture(scope='function')
def tmp_dir_path(request):
"""
Give a path for a temporary directory
The directory is destroyed at the end of the test.
"""
# Temporary directory.
tmp_dir = tempfile.mkdtemp()
def teardown():
# Delete the whole directory:
shutil.rmtree(tmp_dir)
request.addfinalizer(teardown)
return tmp_dir
@pytest.mark.parametrize("show_index,status,data",
[(False, 403, None),
(True, 200,
b'<html>\n<head>\n<title>Index of /</title>\n'
b'</head>\n<body>\n<h1>Index of /</h1>\n<ul>\n'
b'<li><a href="/my_dir">my_dir/</a></li>\n'
b'<li><a href="/my_file">my_file</a></li>\n'
b'</ul>\n</body>\n</html>')])
@asyncio.coroutine
def test_access_root_of_static_handler(tmp_dir_path, loop, test_client,
show_index, status, data):
"""
Tests the operation of static file server.
Try to access the root of static file server, and make
sure that correct HTTP statuses are returned depending if we directory
index should be shown or not.
"""
# Put a file inside tmp_dir_path:
my_file_path = os.path.join(tmp_dir_path, 'my_file')
with open(my_file_path, 'w') as fw:
fw.write('hello')
my_dir_path = os.path.join(tmp_dir_path, 'my_dir')
os.mkdir(my_dir_path)
my_file_path = os.path.join(my_dir_path, 'my_file_in_dir')
with open(my_file_path, 'w') as fw:
fw.write('world')
app = web.Application(loop=loop)
# Register global static route:
app.router.add_static('/', tmp_dir_path, show_index=show_index)
client = yield from test_client(app)
# Request the root of the static directory.
r = yield from client.get('/')
assert r.status == status
if data:
assert r.headers['Content-Type'] == "text/html; charset=utf-8"
read_ = (yield from r.read())
assert read_ == data
yield from r.release()
@pytest.mark.parametrize('dir_name,filename,data', [
('', 'test file.txt', 'test text'),
('test dir name', 'test dir file .txt', 'test text file folder')
])
@asyncio.coroutine
def test_access_to_the_file_with_spaces(tmp_dir_path, loop, test_client,
dir_name, filename, data):
"""
Checks operation of static files with spaces
"""
my_dir_path = os.path.join(tmp_dir_path, dir_name)
if dir_name:
os.mkdir(my_dir_path)
my_file_path = os.path.join(my_dir_path, filename)
with open(my_file_path, 'w') as fw:
fw.write(data)
app = web.Application(loop=loop)
url = os.path.join('/', dir_name, filename)
app.router.add_static('/', tmp_dir_path)
client = yield from test_client(app)
r = yield from client.get(url)
assert r.status == 200
assert (yield from r.text()) == data
yield from r.release()
@asyncio.coroutine
def test_access_non_existing_resource(tmp_dir_path, loop, test_client):
"""
Tests accessing non-existing resource
Try to access a non-exiting resource and make sure that 404 HTTP status
returned.
"""
app = web.Application(loop=loop)
# Register global static route:
app.router.add_static('/', tmp_dir_path, show_index=True)
client = yield from test_client(app)
# Request the root of the static directory.
r = yield from client.get('/non_existing_resource')
assert r.status == 404
yield from r.release()
@asyncio.coroutine
def test_unauthorized_folder_access(tmp_dir_path, loop, test_client):
"""
Tests the unauthorized access to a folder of static file server.
Try to list a folder content of static file server when server does not
have permissions to do so for the folder.
"""
my_dir_path = os.path.join(tmp_dir_path, 'my_dir')
os.mkdir(my_dir_path)
app = web.Application(loop=loop)
with mock.patch('pathlib.Path.__new__') as path_constructor:
path = MagicMock()
path.joinpath.return_value = path
path.resolve.return_value = path
path.iterdir.return_value.__iter__.side_effect = PermissionError()
path_constructor.return_value = path
# Register global static route:
app.router.add_static('/', tmp_dir_path, show_index=True)
client = yield from test_client(app)
# Request the root of the static directory.
r = yield from client.get('/my_dir')
assert r.status == 403
yield from r.release()
@asyncio.coroutine
def test_access_symlink_loop(tmp_dir_path, loop, test_client):
"""
Tests the access to a looped symlink, which could not be resolved.
"""
my_dir_path = os.path.join(tmp_dir_path, 'my_symlink')
os.symlink(my_dir_path, my_dir_path)
app = web.Application(loop=loop)
# Register global static route:
app.router.add_static('/', tmp_dir_path, show_index=True)
client = yield from test_client(app)
# Request the root of the static directory.
r = yield from client.get('/my_symlink')
assert r.status == 404
yield from r.release()
@asyncio.coroutine
def test_access_special_resource(tmp_dir_path, loop, test_client):
"""
Tests the access to a resource that is neither a file nor a directory.
Checks that if a special resource is accessed (f.e. named pipe or UNIX
domain socket) then 404 HTTP status returned.
"""
app = web.Application(loop=loop)
with mock.patch('pathlib.Path.__new__') as path_constructor:
special = MagicMock()
special.is_dir.return_value = False
special.is_file.return_value = False
path = MagicMock()
path.joinpath.side_effect = lambda p: (special if p == 'special'
else path)
path.resolve.return_value = path
special.resolve.return_value = special
path_constructor.return_value = path
# Register global static route:
app.router.add_static('/', tmp_dir_path, show_index=True)
client = yield from test_client(app)
# Request the root of the static directory.
r = yield from client.get('/special')
assert r.status == 404
yield from r.release()
@asyncio.coroutine
def test_partialy_applied_handler(loop, test_client):
app = web.Application(loop=loop)
@asyncio.coroutine
def handler(data, request):
return web.Response(body=data)
app.router.add_route('GET', '/', functools.partial(handler, b'hello'))
client = yield from test_client(app)
r = yield from client.get('/')
data = (yield from r.read())
assert data == b'hello'
yield from r.release()
def test_system_route():
route = SystemRoute(web.HTTPCreated(reason='test'))
with pytest.raises(RuntimeError):
route.url()
with pytest.raises(RuntimeError):
route.url_for()
assert route.name is None
assert route.resource is None
assert "<SystemRoute 201: test>" == repr(route)
assert 201 == route.status
assert 'test' == route.reason
|
panda73111/aiohttp
|
tests/test_web_urldispatcher.py
|
Python
|
apache-2.0
| 7,407
|
#!/usr/bin/python
from tabulate import tabulate
class State:
#Constructor, add self on function for non-static methods / variables
def __init__(self, transition, name):
self.accept_state = False
self.accept_num = 0
self.transition = transition
self.next_state = []
self.name = name
class DFA:
tokenlist = []
def __init__(self, token, value):
self.token = token
self.value = value
#transition (general)
number = ['1','2','3','4','5','6','7','8','9','0']
letter = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
character = ['!','@','$','%','^','&','*','(',')','-','_','=','+','{','}','|',':',';','<','>','.',',','?','/','\'','\\']
space = "\s"
ident = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
#in
q102 = State(space,"Q102")
q102.accept_state = True
q102.accept_num = 28
#identifier
q101 = State(space,"Q101")
q101.accept_state = True
q101.accept_num = 1
q100 = State(letter,"Q100")
q100.transition.append(number)
q100.next_state.append(q100)
q100.next_state.append(q101)
q99 = State(ident,"Q99")
q99.next_state.append(q100)
q99.next_state.append(q101)
#comparison operation
q98 = State(space,"Q98")
q98.accept_state = True
q98.accept_num = 27
q97 = State(space,"Q97")
q97.accept_state = True
q97.accept_num = 26
q96 = State(['='],"Q96")
q96.next_state.append(q97)
q95 = State(['<'],"Q95")
q95.next_state.append(q96)
q95.next_state.append(q97)
q94 = State(['>'],"Q94")
q94.next_state.append(q96)
q94.next_state.append(q97)
q93 = State(['!'],"Q93")
q93.next_state.append(q96)
q92 = State(['='],"Q92")
q92.next_state.append(q96)
q92.next_state.append(q98)
#math operation
q91 = State(space,"Q91")
q91.accept_state = True
q91.accept_num = 25
q90 = State(['%'],"Q90")
q90.next_state.append(q91)
q89 = State(['/'],"Q89")
q89.next_state.append(q91)
q88 = State(['-'],"Q88")
q88.next_state.append(q91)
q87 = State(['*'],"Q87")
q87.next_state.append(q91)
q86 = State(['+'],"Q86")
q86.next_state.append(q91)
#close braces
q85 = State(['}'],"Q85")
q85.accept_state = True
q85.accept_num = 24
#open braces
q84 = State(['{'],"Q84")
q84.accept_state = True
q84.accept_num = 23
#close_parenthesis
q83 = State([')'],"Q83")
q83.accept_state = True
q83.accept_num = 22
#open_parenthesis
q82 = State(['('],"Q82")
q82.accept_state = True
q82.accept_num = 21
#function
q81 = State(space,"Q81")
q81.accept_state = True
q81.accept_num = 20
q80 = State(['n'],"Q80")
q80.next_state.append(q81)
q79 = State(['o'],"Q79")
q79.next_state.append(q80)
q78 = State(['i'],"Q78")
q78.next_state.append(q79)
q77 = State(['t'],"Q77")
q77.next_state.append(q78)
q76 = State(['c'],"Q76")
q76.next_state.append(q77)
q75 = State(['n'],"Q75")
q75.next_state.append(q76)
q74 = State(['u'],"Q74")
q74.next_state.append(q75)
#double
q73 = State(space,"Q73")
q73.accept_state = True
q73.accept_num = 19
q72 = State(number,"Q72")
q72.next_state.append(q73)
q72.next_state.append(q72)
q71 = State(['.'],"Q71")
q71.next_state.append(q72)
#integer
q70 = State(space,"Q70")
q70.accept_state = True
q70.accept_num = 18
q69 = State(number,"Q69")
q69.next_state.append(q70)
q69.next_state.append(q69)
q69.next_state.append(q71)
#NOT
q68 = State(space,"Q68")
q68.accept_state = True
q68.accept_num = 17
q67 = State(['t'],"Q67")
q67.next_state.append(q68)
q66 = State(['o'],"Q66")
q66.next_state.append(q67)
q65 = State(['n'],"Q65")
q65.next_state.append(q66)
#AND
q64 = State(space,"Q64")
q64.accept_state = True
q64.accept_num = 16
q63 = State(['d'],"Q63")
q63.next_state.append(q64)
q62 = State(['n'],"Q62")
q62.next_state.append(q63)
q61 = State(['a'],"Q61")
q61.next_state.append(q62)
#OR
q60 = State(space,"Q60")
q60.accept_state = True
q60.accept_num = 15
q59 = State(['r'],"Q59")
q59.next_state.append(q60)
q58 = State(['o'],"Q58")
q58.next_state.append(q59)
#BOOL
q57 = State(space,"Q57")
q57.accept_state = True
q57.accept_num = 14
q56 = State(['l'],"Q56")
q56.next_state.append(q57)
q55 = State(['o'],"Q55")
q55.next_state.append(q56)
q54 = State(['o'],"Q54")
q54.next_state.append(q55)
q53 = State(['b'],"Q53")
q53.next_state.append(q54)
#STR
q52 = State(space,"Q52")
q52.accept_state = True
q52.accept_num = 13
q51 = State(['r'],"Q51")
q51.next_state.append(q52)
q50 = State(['t'],"Q50")
q50.next_state.append(q51)
q49 = State(['s'],"Q47")
q49.next_state.append(q50)
#DBL
q48 = State(space,"Q48")
q48.accept_state = True
q48.accept_num = 12
q47 = State(['l'],"Q47")
q47.next_state.append(q48)
q46 = State(['b'],"Q46")
q46.next_state.append(q47)
q45 = State(['d'],"Q45")
q45.next_state.append(q46)
#INT
q44 = State(space,"Q44")
q44.accept_state = True
q44.accept_num = 11
q43 = State(['t'],"Q43")
q43.next_state.append(q44)
q42 = State(['n'],"Q42")
q42.next_state.append(q43)
q42.next_state.append(q102)
#FALSE
q41 = State(space,"Q41")
q41.accept_state = True
q41.accept_num = 10
q40 = State(['e'],"Q40")
q40.next_state.append(q41)
q39 = State(['s'],"Q39")
q39.next_state.append(q40)
q38 = State(['l'],"Q37")
q38.next_state.append(q39)
q37 = State(['a'],"Q37")
q37.next_state.append(q38)
#TRUE
q36 = State(space,"Q36")
q36.accept_state = True
q36.accept_num = 9
q35 = State(['e'],"Q35")
q35.next_state.append(q36)
q34 = State(['u'],"Q34")
q34.next_state.append(q35)
q33 = State(['r'],"Q33")
q33.next_state.append(q34)
q32 = State(['t'],"Q32")
q32.next_state.append(q33)
#WHILE
q31 = State(space,"Q31")
q31.accept_state = True
q31.accept_num = 8
q30 = State(['e'],"Q30")
q30.next_state.append(q31)
q29 = State(['l'],"Q29")
q29.next_state.append(q30)
q28 = State(['i'],"Q28")
q28.next_state.append(q29)
q27 = State(['h'],"Q27")
q27.next_state.append(q28)
q26 = State(['w'],"Q26")
q26.next_state.append(q27)
#GET
q25 = State(space,"Q25")
q25.accept_state = True
q25.accept_num = 7
q24 = State(['t'],"Q24")
q24.next_state.append(q25)
q23 = State(['e'],"Q23")
q23.next_state.append(q24)
q22 = State(['g'],"Q22")
q22.next_state.append(q23)
#PRINT
q21 = State(space,"Q21")
q21.accept_state = True
q21.accept_num = 6
q20 = State(['t'],"Q20")
q20.next_state.append(q21)
q19 = State(['n'],"Q19")
q19.next_state.append(q20)
q18 = State(['i'],"Q18")
q18.next_state.append(q19)
q17 = State(['r'],"Q17")
q17.next_state.append(q18)
q16 = State(['p'],"Q16")
q16.next_state.append(q17)
#ELSE IDENTIFIER
q15 = State(space,"Q15")
q15.accept_state = True
q15.accept_num = 5
q14 = State(['e'],"Q14")
q14.next_state.append(q15)
q13 = State(['s'],"Q13")
q13.next_state.append(q14)
q12 = State(['l'],"Q12")
q12.next_state.append(q13)
q11 = State(['e'],"Q11")
q11.next_state.append(q12)
#FOR IDENTIFIER
q10 = State(space,"Q10")
q10.accept_state = True
q10.accept_num = 4
q9 = State(['r'],"Q9")
q9.next_state.append(q10)
q8 = State(['o'],"Q8")
q8.next_state.append(q9)
q7 = State(['f'],"Q7")
q7.next_state.append(q8)
q7.next_state.append(q37)
q7.next_state.append(q74)
#IF IDENTIFIER
#Q6
q6 = State(space,"Q6")
q6.accept_state = True
q6.accept_num = 3
#Q5
q5 = State(['f'], "Q5")
q5.next_state.append(q6)
#Q4
q4 = State(['i'], "Q4")
q4.next_state.append(q5)
q4.next_state.append(q42)
#STRING IDENTIFIER
#Q3
q3 = State(['"'],"Q3")
q3.accept_state = True
q3.accept_num = 2
#Q2
q2 = State(letter,"Q2")
q2.transition.append(number)
q2.transition.append(character)
q2.next_state.append(q2)
q2.next_state.append(q3)
#Q1
q1 = State(['"'], "Q1")
q1.next_state.append(q2)
#QNEG (WHITESPACE IDENTIFIER)
qneg = State([' '], "QNEG")
qneg.accept_state = True
qneg.accept_num = -1
#START STATE
q0 = State(None, "Q0")
q0.next_state.append(qneg)
q0.next_state.append(q1)
q0.next_state.append(q7)
q0.next_state.append(q4)
q0.next_state.append(q7)
q0.next_state.append(q11)
q0.next_state.append(q16)
q0.next_state.append(q22)
q0.next_state.append(q26)
q0.next_state.append(q32)
q0.next_state.append(q45)
q0.next_state.append(q49)
q0.next_state.append(q53)
q0.next_state.append(q58)
q0.next_state.append(q61)
q0.next_state.append(q65)
q0.next_state.append(q69)
q0.next_state.append(q71)
q0.next_state.append(q74)
q0.next_state.append(q82)
q0.next_state.append(q83)
q0.next_state.append(q84)
q0.next_state.append(q85)
q0.next_state.append(q86)
q0.next_state.append(q87)
q0.next_state.append(q88)
q0.next_state.append(q89)
q0.next_state.append(q90)
q0.next_state.append(q92)
q0.next_state.append(q93)
q0.next_state.append(q94)
q0.next_state.append(q95)
q0.next_state.append(q99)
@staticmethod
def checknext(state, input_letter):
foundstate = False
for states in state.next_state:
#SPACE/WHITESPACE TESTING
if states.name == "Q2" or states.name == "Q6" or states.name == "Q10" or states.name == "Q15" or states.name == "Q21" or states.name == "Q25" or states.name == "Q31" or states.name == "QNEG" or states.name == "Q36" or states.name == "Q41" or states.name == "Q44" or states.name == "Q48" or states.name == "Q52" or states.name == "Q57" or states.name == "Q60" or states.name == "Q64" or states.name == "Q68" or states.name == "Q70" or states.name == "Q73" or states.name == "Q81" or states.name == "Q91" or states.name == "Q97" or states.name == "Q98" or states.name == "Q101" or states.name == "Q102":
if input_letter.isspace():
print("FOUND NEXT STATE BLANK")
foundstate = True
state = states
#CHARACTER TESTING
for transitions in states.transition:
for letters in transitions:
# print (letters)
if(letters == input_letter):
foundstate = True
print("FOUND NEXT STATE")
state = states
if foundstate == False:
state = None
return state
@staticmethod
def testinput(inputstr):
print("INPUT TO TEST", inputstr)
state = DFA.q0
startindex = 0
currindex = 0
for letter in inputstr:
print("Current State:",state.name)
print("Letter to test:",letter)
state = DFA.checknext(state, letter)
if state == None:
print("Unrecognized Character. Exiting")
return
if state.accept_state == True:
print("END STATE", state.accept_num)
tempstr = inputstr[startindex:currindex+1]
DFA.addtoken(state.accept_num, tempstr)
startindex = currindex+1
state = DFA.q0
currindex = currindex + 1
DFA.printtokenlist()
@staticmethod
def addtoken(accept_num, tokenval):
if(accept_num != -1):
if(accept_num == 1):
DFA.tokenlist.append(DFA("identifier", tokenval))
if(accept_num == 2):
DFA.tokenlist.append(DFA("string", tokenval))
if(accept_num == 3):
DFA.tokenlist.append(DFA("if", None))
if(accept_num == 4):
DFA.tokenlist.append(DFA("for", None))
if(accept_num == 5):
DFA.tokenlist.append(DFA("else", None))
if(accept_num == 6):
DFA.tokenlist.append(DFA("print", None))
if(accept_num == 7):
DFA.tokenlist.append(DFA("get", None))
if(accept_num == 8):
DFA.tokenlist.append(DFA("while", None))
if(accept_num == 9):
DFA.tokenlist.append(DFA("true", None))
if(accept_num == 10):
DFA.tokenlist.append(DFA("false", None))
if(accept_num == 11):
DFA.tokenlist.append(DFA("int", None))
if(accept_num == 12):
DFA.tokenlist.append(DFA("dbl", None))
if(accept_num == 13):
DFA.tokenlist.append(DFA("str", None))
if(accept_num == 14):
DFA.tokenlist.append(DFA("bool", None))
if(accept_num == 15):
DFA.tokenlist.append(DFA("or", None))
if(accept_num == 16):
DFA.tokenlist.append(DFA("and", None))
if(accept_num == 17):
DFA.tokenlist.append(DFA("not", None))
if(accept_num == 18):
DFA.tokenlist.append(DFA("integer literal", tokenval))
if(accept_num == 19):
DFA.tokenlist.append(DFA("double literal", tokenval))
if(accept_num == 20):
DFA.tokenlist.append(DFA("function", None))
if(accept_num == 21):
DFA.tokenlist.append(DFA("(", None))
if(accept_num == 22):
DFA.tokenlist.append(DFA(")", None))
if(accept_num == 23):
DFA.tokenlist.append(DFA("{", None))
if(accept_num == 24):
DFA.tokenlist.append(DFA("}", None))
if(accept_num == 25):
DFA.tokenlist.append(DFA("Mathematical Operation", tokenval))
if(accept_num == 26):
DFA.tokenlist.append(DFA("Comparison Operation", tokenval))
if(accept_num == 27):
DFA.tokenlist.append(DFA("=", None))
if(accept_num == 28):
DFA.tokenlist.append(DFA("int", None))
@staticmethod
def printtokenlist():
table=[]
for tokens in DFA.tokenlist:
table.append([tokens.token, tokens.value])
print (tabulate(table, headers=["Token","Value"]))
def loadfile(filename):
obj = open(filename, "r")
arr = obj.read()
obj.close()
return arr
def token_analyze(fa):
"Universal DFA For Regex"
DFA.testinput(fa)
return
def tokengenerator(filename):
"This function generates a token list for a source code based on the grammar at grammar.md"
fa = loadfile(filename)
token_analyze(fa)
return
#MAIN FUNCTION
tokengenerator("sourcecode.ds")
|
tjmonsi/cmsc129-2016-repo
|
submissions/exercise2/doron/tokengenerator.py
|
Python
|
mit
| 16,095
|
def grade(tid, answer):
if answer.find("failed_up_is_the_best_fail_you_are_ctf_champion") != -1:
return { "correct": False, "message": "It's not going to be the same as last year's...." }
if answer.find("yeee3ee3ew_sha44aal11l1l1l_bE#eeee_azzzzzsimmileitted!!") != -1:
return { "correct": True, "message": "Now send the writeup to <code>failed.down@gmail.com</code>" }
return { "correct": False, "message": "Keep... looking........ harder............." }
|
EasyCTF/easyctf-2015
|
api/problems/recon/ioexception/ioexception_grader.py
|
Python
|
mit
| 461
|
import os
import sqlite3
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.distributions.empirical_distribution import ECDF
from matplotlib.ticker import FuncFormatter
def thousands(x, pos):
if x>=1e9:
return '%.1fB' % (x*1e-9)
elif x>=1e6:
return '%.1fM' % (x*1e-6)
elif x>=1e3:
return '%.1fK' % (x*1e-3)
else:
return x
formatter = FuncFormatter(thousands)
def ecdf_for_plot(sample):
#x = np.linspace(min(sample), max(sample))
print "sample: ",type(sample)
x = sample.sort_values(ascending = False)
ecdf = ECDF(x)
# print ecdf
print "ecdf: ",type(ecdf)
y = ecdf(x)
#print y
print "y: ", type(y)
return (x,y)
res_dir = '/home/nsarafij/project/OpenWPM/analysis/results/'
db = res_dir + 'images.sqlite'
conn = sqlite3.connect(db)
query = 'SELECT * FROM Images'
df = pd.read_sql_query(query,conn)
df.columns = ['respDom_id' if x=='resp_domain' else x for x in df.columns]
query = 'SELECT * FROM Domain_DomainTwoPart'
df_domdom2 = pd.read_sql_query(query,conn)
df=df.merge(df_domdom2,left_on='site_id',right_on='domain_id',how='left')
df.drop('domain_id',axis=1,inplace=True)
df.columns = ['site_id2' if x=='domainTwoPart_id' else x for x in df.columns]
df=df.merge(df_domdom2,left_on='respDom_id',right_on='domain_id',how='left')
df.drop('domain_id',axis=1,inplace=True)
df.columns = ['respDom_id2' if x=='domainTwoPart_id' else x for x in df.columns]
query = 'SELECT * FROM DomainsTwoPart'
df_dom2 = pd.read_sql_query(query,conn)
df=df.merge(df_dom2, left_on = 'site_id2', right_on = 'id', how = 'left')
df.drop('id',inplace=True,axis=1)
df.columns = ['site_domain2' if x=='domainTwoPart' else x for x in df.columns]
df=df.merge(df_dom2, left_on = 'respDom_id2', right_on = 'id', how = 'left')
df.drop('id',inplace=True,axis=1)
df.columns = ['respDom_domain2' if x=='domainTwoPart' else x for x in df.columns]
query = 'SELECT * FROM Domain2Company'
df_dom2com = pd.read_sql_query(query,conn)
df=df.merge(df_dom2com,left_on='respDom_id2',right_on='domainTwoPart_id',how='left')
df.drop('domainTwoPart_id',axis=1,inplace=True)
query = 'SELECT * FROM Companies'
df_com = pd.read_sql_query(query,conn)
df=df.merge(df_com,left_on='company_id',right_on='id',how='left')
df.drop('id',axis=1,inplace=True)
#conn.close()
df1=df.loc[df['site_id2']==df['respDom_id2']]
df2=df.loc[df['site_id2']!=df['respDom_id2']]
df2.shape[0]/float(df.shape[0]) #0.6757349672921374
# how many sites and links have third-party images
sites = []
links = 0
for site_id in range(1,10001):
if site_id % 100 == 0: print site_id
df3=df2.loc[df2['site_id']==site_id]
df3_size = df3['link_id'].unique().shape[0]
links += df3_size
if df3_size: sites.append(site_id)
len(sites) #8343
8343/8965 = 0.9306190741773563
links #912363
912363/964315.
# distinct response domains
df['respDom_id2'].unique().size #29009
df1['respDom_id2'].unique().size #7863
df2['respDom_id2'].unique().size #23235
domains2 = df2[['respDom_id2','respDom_domain2']].groupby(['respDom_id2','respDom_domain2']).size().sort_values(ascending = False).reset_index()
domains2.to_csv('/home/nsarafij/project/OpenWPM/analysis/results/third-domains2_owners',index=False,encoding='utf-8')
# companies
############## considering third-party domains only
# all images: counts per each response domain
domains = df2['respDom_domain2'].value_counts()
total = df2.shape[0]
domains_cum = domains.cumsum()
dom_perc = domains/float(total)
dom_perc_cum = dom_perc.cumsum()
# all images: counts per each company
com = df2['company'].value_counts()
com_cum = com.cumsum()
com_perc = com/df2.shape[0]
com_perc_cum = com_perc.cumsum()
# all images - response domains
fig_dir = '/home/nsarafij/project/OpenWPM/analysis/figs_10k_domains/'
# cdf of number of third-party images per third-party domains
(x,y) = ecdf_for_plot(domains)
plt.figure()
plt.step(x,y)
plt.ylabel('cdf')
plt.xlabel('no of zero images per domain')
plt.grid(True)
plt.xscale('symlog')
plt.savefig(os.path.join(fig_dir,'third-domains2_cdf.png'))
plt.show()
# counts
fig, ax = plt.subplots()
plt.plot(range(1,domains.shape[0]+1),domains,marker='.')
plt.xscale('log')
plt.xlabel('domain rank')
plt.ylabel('count of images')
plt.xlim([1,domains.size])
ax.yaxis.set_major_formatter(formatter)
plt.grid(True)
fig.savefig(fig_dir + 'third-domain2_count.png',format='png')
# percentages
fig = plt.figure()
plt.plot(range(1,domains.shape[0]+1),dom_perc*100,marker='.')
plt.xscale('log')
plt.xlabel('domain rank')
plt.ylabel('percentage of total number of images')
plt.xlim([1,domains.size])
plt.grid(True)
fig.savefig(fig_dir + 'third-domain2_perc.png',format='png')
# cumulative counts
fig, ax = plt.subplots()
plt.plot(range(1,domains.shape[0]+1),domains_cum,marker='.')
plt.xscale('log')
plt.title('Cumulative Counts')
plt.xlabel('domain rank')
plt.ylabel('count of all images')
ax.yaxis.set_major_formatter(formatter)
plt.grid(True)
#fig.tight_layout()
fig.savefig(fig_dir + 'third-domain2_count_cum.png',format='png')
# cumulative percentages
fig = plt.figure()
plt.plot(range(1,domains.shape[0]+1),dom_perc_cum*100,marker='.')
plt.xscale('log')
plt.ylim([0,100])
plt.title('Cumulative Percentage Counts')
plt.xlabel('domain rank')
plt.ylabel('percentage of total number of images')
plt.grid(True)
#fig.tight_layout()
fig.savefig(fig_dir + 'third-domain2_perc_cum.png',format='png')
# top 30 domains - counts
n=30
x=np.arange(0.5,n)
fig, ax = plt.subplots()
plt.bar(x,domains[0:n],align='center')
plt.xlabel('domains')
plt.ylabel('count of images')
labels = list(domains.index[0:n])
plt.xticks(x, labels, rotation=80)
ax.yaxis.set_major_formatter(formatter)
fig.tight_layout()
plt.grid(True)
fig.savefig(fig_dir + 'third-domain2_count_top30.png',format='png')
# top 30 domains - percentages
fig = plt.figure()
plt.bar(x,dom_perc[0:n]*100,align='center')
plt.xlabel('domains')
plt.ylabel('percentage of total number of images')
labels = list(domains.index[0:n])
plt.xticks(x, labels, rotation=80)
fig.tight_layout()
plt.grid(True)
fig.savefig(fig_dir + 'third-domain2_perc_top30.png',format='png')
domcom = df2[['respDom_domain2','company']].groupby(['respDom_domain2','company']).size().reset_index(name='img_perc').sort_values('img_perc',ascending=False)
domcom['img_perc']=domcom['img_perc']/float(df2.shape[0])*100
table_dir = '/home/nsarafij/project/OpenWPM/analysis/tables_10k'
fhand = open(os.path.join(table_dir,'third-domain2company_perc_top30.txt'),'w+')
### table domains - companies
for i in range(0,n):
dom = domcom.iloc[i,0]
comp = domcom.iloc[i,1]
perc = domcom.iloc[i,2]
s = str(i+1) + ' & ' + dom + ' & ' + comp + ' & ' + '%.2f' % perc + '\\\\ \\hline'
print s
s = s.encode('UTF-8')
print s
fhand.write(s + '\n')
fhand.close()
### companies
# counts
fig_dir = '/home/nsarafij/project/OpenWPM/analysis/figs_10k_domains/'
fig, ax = plt.subplots()
plt.plot(range(1,com.shape[0]+1),com,marker='.')
plt.xscale('log')
plt.xlabel('company rank')
plt.ylabel('count of third-party images')
plt.xlim([1,com.size])
ax.yaxis.set_major_formatter(formatter)
plt.grid(True)
fig.savefig(fig_dir + 'third-company_count.png',format='png')
# percentages
fig = plt.figure()
plt.plot(range(1,com.shape[0]+1),com_perc*100,marker='.')
plt.xscale('log')
plt.xlabel('company rank')
plt.ylabel('percentage of third-party images')
plt.xlim([1,com.size])
plt.grid(True)
fig.savefig(fig_dir + 'third-company_perc.png',format='png')
# cumulative counts
fig, ax = plt.subplots()
plt.plot(range(1,com.shape[0]+1),com_cum,marker='.')
plt.xscale('log')
plt.title('Cumulative Counts')
plt.xlabel('company rank')
plt.ylabel('count of third-party images')
ax.yaxis.set_major_formatter(formatter)
plt.grid(True)
#fig.tight_layout()
fig.savefig(fig_dir + 'third-company_count_cum.png',format='png')
# cumulative percentages
fig = plt.figure()
plt.plot(range(1,com.shape[0]+1),com_perc_cum*100,marker='.')
plt.xscale('log')
plt.ylim([0,100])
plt.title('Cumulative Percentage Counts')
plt.xlabel('company rank')
plt.ylabel('percentage of third-party images')
plt.grid(True)
#fig.tight_layout()
fig.savefig(fig_dir + 'third-company_perc_cum.png',format='png')
# top 30 companies - counts
n=30
x=np.arange(0.5,n)
fig, ax = plt.subplots()
plt.bar(x,com[0:n],align='center')
plt.xlabel('company')
plt.ylabel('count of third-party images')
labels = list(com.index[0:n])
plt.xticks(x, labels, rotation=90)
ax.yaxis.set_major_formatter(formatter)
fig.tight_layout()
plt.grid(True)
fig.savefig(fig_dir + 'third-company_count_top30.png',format='png')
# top 30 companies - percentages
fig = plt.figure()
plt.bar(x,com_perc[0:n]*100,align='center')
plt.xlabel('company')
plt.ylabel('percentage of third-party images')
labels = list(com.index[0:n])
plt.xticks(x, labels, rotation=90)
fig.tight_layout()
plt.grid(True)
fig.savefig(fig_dir + 'third-company_perc_top30.png',format='png')
############################## 1-pixel images
df3all=df.ix[df['pixels']==1]
df3all.shape[0] #9906784
df3all.shape[0]/float(df.shape[0]) #0.31093023806156583
df3=df2.ix[df2['pixels']==1]
df3.shape[0] #9662147
df3.shape[0]/float(df3all.shape[0]) #0.9753061134672968
# 1-pixel images: counts per each response domain
dom_pix1 = df3['respDom_domain2'].value_counts()
dom_pix1_cum = dom_pix1.cumsum()
dom_pix1_perc = dom_pix1/float(df3.shape[0])
dom_pix1_perc_ = dom_pix1/float(dom_pix1_cum[dom_pix1_cum.size-1:dom_pix1_cum.size])
dom_pix1_perc_cum = dom_pix1_perc_.cumsum()
#dom_pix1_=pd.merge(pd.DataFrame(dom_pix1), df_dom, left_index=True, right_on='id')
# 1-pixel images: counts per each company
com_pix1 = df3['company'].value_counts()
com_pix1_cum = com_pix1.cumsum()
com_pix1_perc = com_pix1/float(df3.shape[0])
com_pix1_perc_ = com_pix1/float(com_pix1_cum[com_pix1_cum.size-1:com_pix1_cum.size])
com_pix1_perc_cum = com_pix1_perc_.cumsum()
### figures
fig_dir = '/home/nsarafij/project/OpenWPM/analysis/figs_10k_domains/'
# cdf of no of
(x,y) = ecdf_for_plot(dom_pix1)
plt.figure()
plt.step(x,y)
plt.ylabel('cdf')
plt.xlabel('no of 1-pixel third-party images per domain')
plt.grid(True)
plt.xscale('symlog')
plt.savefig(os.path.join(fig_dir,'third-domains2_cdf.png'))
# counts
fig, ax = plt.subplots()
plt.plot(dom_pix1,marker='.')
plt.xscale('symlog')
ax.yaxis.set_major_formatter(formatter)
plt.xlabel('domain rank')
plt.ylabel('count of images')
plt.title('1-pixel Images')
plt.grid(True)
fig.savefig(fig_dir + 'third-domain2_pix1_count.png',format='png')
# percentages
fig = plt.figure()
plt.plot(range(1,dom_pix1_perc.shape[0]+1),dom_pix1_perc*100,marker='.')
plt.xscale('symlog')
plt.xlabel('domain rank')
plt.ylabel('percentage of total number of images')
plt.title('1-pixel Images')
plt.grid(True)
fig.savefig(fig_dir + 'third-domain2_pix1_perc.png',format='png')
# cumulative counts
fig, ax = plt.subplots()
plt.plot(range(1,dom_pix1_perc.shape[0]+1),dom_pix1_cum,marker='.')
ax.yaxis.set_major_formatter(formatter)
plt.xscale('log')
plt.title('Cumulative Counts for 1-pixel Images')
plt.xlabel('domain rank')
plt.ylabel('count')
plt.grid(True)
fig.savefig(fig_dir + 'third-domain2_pix1_count_cum.png',format='png')
# cumulative percentages
fig = plt.figure()
plt.plot(range(1,dom_pix1_perc.shape[0]+1),dom_pix1_perc_cum*100,marker='.')
plt.xscale('log')
plt.title('Cumulative Percentage Counts for 1-pixel Images')
plt.xlabel('domain rank')
plt.ylabel('percentage of 1-pixel images')
plt.grid(True)
fig.savefig(fig_dir + 'third-domain2_pix1_perc_cum.png',format='png')
# top 30 domains - counts
n=30
x=np.arange(0.5,n)
fig, ax = plt.subplots()
plt.bar(x,dom_pix1[0:n],align='center')
ax.yaxis.set_major_formatter(formatter)
plt.xlabel('domains')
plt.ylabel('count of images')
labels = list(dom_pix1.index[0:n])
plt.xticks(x, labels, rotation=80)
plt.title('1-pixel Images')
plt.grid(True)
fig.tight_layout()
fig.savefig(fig_dir + 'third-domain2_pix1_count_top30.png',format='png')
# top 20 domains - percentages
fig = plt.figure()
plt.bar(x,dom_pix1_perc[0:n]*100,align='center')
plt.xlabel('domains')
plt.ylabel('percentage of 1-pixel images')
labels = list(dom_pix1.index[0:n])
plt.xticks(x, labels, rotation=80)
plt.title('1-pixel Images')
plt.grid(True)
fig.tight_layout()
fig.savefig(fig_dir + 'third-domain2_pix1_perc_top30.png',format='png')
plt.show()
### table domains - companies
domcom = df3[['respDom_domain2','company']].groupby(['respDom_domain2','company']).size().reset_index(name='img_perc').sort_values('img_perc',ascending=False)
domcom['img_perc']=domcom['img_perc']/float(df3.shape[0])*100
table_dir = '/home/nsarafij/project/OpenWPM/analysis/tables_10k'
fhand = open(os.path.join(table_dir,'third-domain2company_pix1_perc_top30.txt'),'w+')
for i in range(0,n):
dom = domcom.iloc[i,0]
com = domcom.iloc[i,1]
perc = domcom.iloc[i,2]
s = str(i+1) + ' & ' + dom + ' & ' + com + ' & ' + '%.2f' % perc + '\\\\ \\hline'
print s
s = s.encode('UTF-8')
print s
fhand.write(s + '\n')
fhand.close()
### companies
# counts
fig_dir = '/home/nsarafij/project/OpenWPM/analysis/figs_10k_domains/'
fig, ax = plt.subplots()
plt.plot(range(1,com_pix1.shape[0]+1),com_pix1,marker='.')
plt.xscale('log')
plt.xlabel('company rank')
plt.ylabel('count of third-party images')
plt.xlim([1,com_pix1.size])
ax.yaxis.set_major_formatter(formatter)
plt.grid(True)
fig.savefig(fig_dir + 'third-company_pix1_count.png',format='png')
# percentages
fig = plt.figure()
plt.plot(range(1,com_pix1.shape[0]+1),com_pix1_perc*100,marker='.')
plt.xscale('log')
plt.xlabel('company rank')
plt.ylabel('percentage of third-party images')
plt.xlim([1,com_pix1.size])
plt.grid(True)
fig.savefig(fig_dir + 'third-company_pix1_perc.png',format='png')
# cumulative counts
fig, ax = plt.subplots()
plt.plot(range(1,com_pix1.shape[0]+1),com_pix1_cum,marker='.')
plt.xscale('log')
plt.title('Cumulative Counts')
plt.xlabel('company rank')
plt.ylabel('count of third-party images')
ax.yaxis.set_major_formatter(formatter)
plt.grid(True)
#fig.tight_layout()
fig.savefig(fig_dir + 'third-company_pix1_count_cum.png',format='png')
# cumulative percentages
fig = plt.figure()
plt.plot(range(1,com_pix1.shape[0]+1),com_pix1_perc_cum*100,marker='.')
plt.xscale('log')
plt.ylim([0,100])
plt.title('Cumulative Percentage Counts')
plt.xlabel('company rank')
plt.ylabel('percentage of third-party images')
plt.grid(True)
#fig.tight_layout()
fig.savefig(fig_dir + 'third-company_pix1_perc_cum.png',format='png')
# top 30 companies - counts
n=30
x=np.arange(0.5,n)
fig, ax = plt.subplots()
plt.bar(x,com_pix1[0:n],align='center')
plt.xlabel('company')
plt.ylabel('count of third-party images')
labels = list(com_pix1.index[0:n])
plt.xticks(x, labels, rotation=90)
ax.yaxis.set_major_formatter(formatter)
fig.tight_layout()
plt.grid(True)
fig.savefig(fig_dir + 'third-company_pix1_count_top30.png',format='png')
# top 30 companies - percentages
fig = plt.figure()
plt.bar(x,com_pix1_perc[0:n]*100,align='center')
plt.xlabel('company')
plt.ylabel('percentage of third-party images')
labels = list(com_pix1.index[0:n])
plt.xticks(x, labels, rotation=90)
fig.tight_layout()
plt.grid(True)
fig.savefig(fig_dir + 'third-company_pix1_perc_top30.png',format='png')
plt.show()
### table companies
table_dir = '/home/nsarafij/project/OpenWPM/analysis/tables_10k'
fhand = open(os.path.join(table_dir,'third-company_pix1_perc_top30.txt'),'w+')
for i in range(0,n):
com = com_pix1_perc.index[i]
perc = com_pix1_perc[i]*100
s = str(i+1) + ' & ' + com + ' & ' + '%.3f' % perc + '\\\\ \\hline'
print s
s = s.encode('UTF-8')
print s
fhand.write(s + '\n')
fhand.close()
conn.close()
'''
fig1 = plt.figure(1)
plt.hist(df['size'], bins=100, color='lightblue',label ='all')
plt.title('Histogram of Images Sizes, no of sites = 100, max no of links = 300')
plt.xlabel('size [bytes]')
plt.ylabel('no of images')
fig2 = plt.figure(2)
plt.hist(df['size'], bins=100, range=(0,50000), color='lightblue',label ='all')
plt.title('Histogram of Images Sizes, no of sites = 100, max no of links = 300')
plt.xlabel('size [bytes]')
plt.ylabel('no of images')
fig3 = plt.figure(3)
plt.hist(df['size'], bins=100, range=(0,100), color='lightblue',label='all')
plt.title('Histogram of Images Sizes, no of sites = 100, max no of links = 300')
plt.xlabel('size [bytes]')
plt.ylabel('no of images')
fig = plt.figure(1)
plt.hist(l, bins=100, color='red',label =r'size $\neq$ content-length')
#plt.title('Histogram of Images Sizes (size != content-length), no of sites = 100, max no of links = 300')
#plt.xlabel('size [bytes]')
#plt.ylabel('no of images')
#fig.savefig('img_sizes_hist4.eps',format='eps')
plt.legend()
fig = plt.figure(2)
plt.hist(l, bins=100, range=(0,50000), color='red',label =r'size $\neq$ content-length')
#plt.title('Histogram of Images Sizes (size != content-length), no of sites = 100, max no of links = 300')
#plt.xlabel('size [bytes]')
#plt.ylabel('no of images')
#fig.savefig('img_sizes_hist5.eps',format='eps')
plt.legend()
fig = plt.figure(3)
plt.hist(l, bins=100, range=(0,100), color='red',label =r'size $\neq$ content-length')
#plt.title('Histogram of Images Sizes (size != content-length), no of sites = 100, max no of links = 300')
#plt.xlabel('size [bytes]')
#plt.ylabel('no of images')
#fig.savefig('img_sizes_hist6.eps',format='eps')
plt.legend()
#plt.show()
fig1.savefig('figs/img_sizes_hist1.eps',format='eps')
fig2.savefig('figs/img_sizes_hist2.eps',format='eps')
fig3.savefig('figs/img_sizes_hist3.eps',format='eps')
fig1 = plt.figure(1)
plt.plot(dom_perc,marker='o')
plt.title('Image Counts for Response Domains, no of sites = 100, max no of links = 300')
plt.xlabel('domains')
plt.ylabel('percentage of images')
fig2 = plt.figure(2)
plt.plot(dom_perc,marker='o')
plt.title('Image Counts for Response Domains, no of sites = 100, max no of links = 300')
plt.xlabel('domains')
plt.ylabel('percentage of images')
#plt.ylim([10,30000])
plt.xlim([0,100])
fig3 = plt.figure(3)
plt.plot(domains,marker='o')
plt.title('Image Counts for Response Domains, no of sites = 100, max no of links = 300')
plt.xlabel('domains')
plt.ylabel('percentage of images')
#plt.ylim([10,30000])
plt.xlim([0,10])
#plt.ylim([10,30000])
#plt.xlim([0,10])
fig5 = plt.figure(5)
plt.plot(dom_perc_cum,marker='o')
plt.title('Image Cumulative Percentage Counts for Response Domains \n no of sites = 100, max no of links = 300')
plt.xlabel('domains')
plt.ylabel('percentage of images')
#plt.ylim([10,30000])
plt.xlim([0,100])
fig6 = plt.figure(6)
plt.plot(dom_perc_cum,marker='o')
plt.title('Image Cumulative Percentage Counts for Response Domains \n no of sites = 100, max no of links = 300')
plt.xlabel('domains')
plt.ylabel('percentage of images')
#plt.ylim([10,30000])
plt.xlim([0,10])
#plt.show()
for i in range(1,7):
fig_file = 'figs/img_domains_' + str(i) +'.eps'
s = "fig{}.savefig('".format(i) + fig_file + "',format='eps')"
print s
exec s
'''
|
natasasdj/OpenWPM
|
analysis/12_images_third-domains2.py
|
Python
|
gpl-3.0
| 19,012
|
__version_info__ = {
'major': 2,
'minor': 2,
'micro': 0,
'releaselevel': 'final',
'serial': 0
}
def get_version(short=False):
assert __version_info__['releaselevel'] in ('alpha', 'beta', 'final')
vers = ["%(major)i.%(minor)i" % __version_info__, ]
if __version_info__['micro']:
vers.append(".%(micro)i" % __version_info__)
if __version_info__['releaselevel'] != 'final' and not short:
vers.append(
'%s%i' % (
__version_info__['releaselevel'][0],
__version_info__['serial'])
)
return ''.join(vers)
__version__ = get_version()
default_app_config = 'explorer.apps.ExplorerAppConfig'
|
epantry/django-sql-explorer
|
explorer/__init__.py
|
Python
|
mit
| 693
|
"""
Virtualization installation functions for image based deployment
Copyright 2008 Red Hat, Inc.
Bryan Kearney <bkearney@redhat.com>
Original version based on virt-image
David Lutterkort <dlutter@redhat.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import os, sys, time, stat
import shutil
import random
import exceptions
import errno
import virtinst
try:
from virtinst import ImageParser, Guest, CapabilitiesParser, VirtualNetworkInterface
except:
# if this fails, this is ok, the user just won't be able to use image objects...
# keeping this dynamic allows this to work on older EL.
pass
import libvirt
import app as koan
#FIXME this was copied
def random_mac():
"""
from xend/server/netif.py
Generate a random MAC address.
Uses OUI 00-16-3E, allocated to
Xensource, Inc. Last 3 fields are random.
return: MAC address string
"""
mac = [ 0x00, 0x16, 0x3e,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
return ':'.join(map(lambda x: "%02x" % x, mac))
def transform_arch(arch):
if arch == "i386":
return "i686"
else:
return arch
def copy_image(original_file, new_location):
shutil.copyfile(original_file, new_location)
return new_location
def process_disk(image, boot, file, location, target):
image_location = copy_image(file, location)
# Create the disk
disk = ImageParser.Disk()
disk.format = "raw"
disk.file = image_location
disk.use = "user"
disk.id = image_location
image.storage[disk.id] = disk
#Create the drive
drive = ImageParser.Drive()
drive.id = image_location
drive.target = target
drive.disk = disk
boot.disks.append(drive)
#dev api
#boot.drives.append(drive)
def process_networks(domain, guest, profile_data, bridge):
# Create a bridge or default network for every requested nic. If there are more
# bridges then nics discard the last one.
domain.interface = int(profile_data["network_count"])
bridges = []
#use the provided bridge first
guest_bridge = bridge
if guest_bridge is None:
guest_bridge = profile_data["virt_bridge"]
# Look for commas
if (guest_bridge is not None) and (len(guest_bridge.strip()) > 0):
if guest_bridge.find(",") == -1:
bridges.append(guest_bridge)
else:
bridges == guest_bridge.split(",")
for cnt in range(0,domain.interface):
if cnt < len(bridges):
nic = VirtualNetworkInterface(random_mac(), type="bridge", bridge = bridges[cnt])
#dev api
#nic = VirtualNetworkInterface(random_mac(), type="bridge", bridge = bridge, conn=guest.conn)
else:
default_network = virtinst.util.default_network()
#dev api
#default_network = virtinst.util.default_network(guest.conn)
nic = VirtualNetworkInterface(random_mac(), type=default_network[0], network=default_network[1])
guest.nics.append(nic)
def start_install(name=None, ram=None, disks=None,
uuid=None,
extra=None,
vcpus=None,
profile_data=None, arch=None, no_gfx=False, fullvirt=False, bridge=None, virt_type=None):
#FIXME how to do a non-default connection
#Can we drive off of virt-type?
connection = None
if (virt_type is None ) or (virt_type == "auto"):
connection = virtinst.util.default_connection()
elif virt_type.lower()[0:3] == "xen":
connection = "xen"
else:
connection = "qemu:///system"
connection = libvirt.open(connection)
capabilities = virtinst.CapabilitiesParser.parse(connection.getCapabilities())
image_arch = transform_arch(arch)
image = ImageParser.Image()
#dev api
#image = ImageParser.Image(filename="") #FIXME, ImageParser should take in None
image.name = name
domain = ImageParser.Domain()
domain.vcpu = vcpus
domain.memory = ram
image.domain = domain
boot = ImageParser.Boot()
boot.type = "hvm" #FIXME HARDCODED
boot.loader = "hd" #FIXME HARDCODED
boot.arch = image_arch
domain.boots.append(boot)
#FIXME Several issues. Single Disk, type is hardcoded
#And there is no way to provision with access to "file"
process_disk(image, boot, profile_data["file"], disks[0][0], "hda")
#FIXME boot_index??
installer = virtinst.ImageInstaller(boot_index = 0, image=image, capabilities=capabilities)
guest = virtinst.FullVirtGuest(connection = connection, installer=installer, arch=image_arch)
extra = extra.replace("&","&")
guest.extraargs = extra
guest.set_name(name)
guest.set_memory(ram)
guest.set_vcpus(vcpus)
if not no_gfx:
guest.set_graphics("vnc")
else:
guest.set_graphics(False)
if uuid is not None:
guest.set_uuid(uuid)
process_networks(domain, guest, profile_data, bridge)
guest.start_install()
return "use virt-manager or reconnect with virsh console %s" % name
|
remotesyssupport/koan
|
koan/imagecreate.py
|
Python
|
gpl-2.0
| 5,995
|
import json
from functools import partial
import factory
from factory.django import DjangoModelFactory
# Imported to re-export
# pylint: disable=unused-import
from student.tests.factories import UserFactory # Imported to re-export
from student.tests.factories import GroupFactory # Imported to re-export
from student.tests.factories import CourseEnrollmentAllowedFactory # Imported to re-export
from student.tests.factories import RegistrationFactory # Imported to re-export
# pylint: enable=unused-import
from student.tests.factories import UserProfileFactory as StudentUserProfileFactory
from courseware.models import StudentModule, XModuleUserStateSummaryField
from courseware.models import XModuleStudentInfoField, XModuleStudentPrefsField
from courseware.roles import (
CourseInstructorRole,
CourseStaffRole,
CourseBetaTesterRole,
GlobalStaff,
OrgStaffRole,
OrgInstructorRole,
)
from xmodule.modulestore import Location
location = partial(Location, 'i4x', 'edX', 'test_course', 'problem')
class UserProfileFactory(StudentUserProfileFactory):
courseware = 'course.xml'
class InstructorFactory(UserFactory):
"""
Given a course Location, returns a User object with instructor
permissions for `course`.
"""
last_name = "Instructor"
@factory.post_generation
def course(self, create, extracted, **kwargs):
if extracted is None:
raise ValueError("Must specify a course location for a course instructor user")
CourseInstructorRole(extracted).add_users(self)
class StaffFactory(UserFactory):
"""
Given a course Location, returns a User object with staff
permissions for `course`.
"""
last_name = "Staff"
@factory.post_generation
def course(self, create, extracted, **kwargs):
if extracted is None:
raise ValueError("Must specify a course location for a course staff user")
CourseStaffRole(extracted).add_users(self)
class BetaTesterFactory(UserFactory):
"""
Given a course Location, returns a User object with beta-tester
permissions for `course`.
"""
last_name = "Beta-Tester"
@factory.post_generation
def course(self, create, extracted, **kwargs):
if extracted is None:
raise ValueError("Must specify a course location for a beta-tester user")
CourseBetaTesterRole(extracted).add_users(self)
class OrgStaffFactory(UserFactory):
"""
Given a course Location, returns a User object with org-staff
permissions for `course`.
"""
last_name = "Org-Staff"
@factory.post_generation
def course(self, create, extracted, **kwargs):
if extracted is None:
raise ValueError("Must specify a course location for an org-staff user")
OrgStaffRole(extracted).add_users(self)
class OrgInstructorFactory(UserFactory):
"""
Given a course Location, returns a User object with org-instructor
permissions for `course`.
"""
last_name = "Org-Instructor"
@factory.post_generation
def course(self, create, extracted, **kwargs):
if extracted is None:
raise ValueError("Must specify a course location for an org-instructor user")
OrgInstructorRole(extracted).add_users(self)
class GlobalStaffFactory(UserFactory):
"""
Returns a User object with global staff access
"""
last_name = "GlobalStaff"
@factory.post_generation
def set_staff(self, create, extracted, **kwargs):
GlobalStaff().add_users(self)
class StudentModuleFactory(DjangoModelFactory):
FACTORY_FOR = StudentModule
module_type = "problem"
student = factory.SubFactory(UserFactory)
course_id = "MITx/999/Robot_Super_Course"
state = None
grade = None
max_grade = None
done = 'na'
class UserStateSummaryFactory(DjangoModelFactory):
FACTORY_FOR = XModuleUserStateSummaryField
field_name = 'existing_field'
value = json.dumps('old_value')
usage_id = location('usage_id').url()
class StudentPrefsFactory(DjangoModelFactory):
FACTORY_FOR = XModuleStudentPrefsField
field_name = 'existing_field'
value = json.dumps('old_value')
student = factory.SubFactory(UserFactory)
module_type = 'mock_problem'
class StudentInfoFactory(DjangoModelFactory):
FACTORY_FOR = XModuleStudentInfoField
field_name = 'existing_field'
value = json.dumps('old_value')
student = factory.SubFactory(UserFactory)
|
mjg2203/edx-platform-seas
|
lms/djangoapps/courseware/tests/factories.py
|
Python
|
agpl-3.0
| 4,471
|
import sys
import threading
import traceback
import warnings
from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding
from _pydev_bundle.pydev_imports import xmlrpclib, _queue
from _pydevd_bundle.pydevd_constants import Null, IS_PY3K
Queue = _queue.Queue
#This may happen in IronPython (in Python it shouldn't happen as there are
#'fast' replacements that are used in xmlrpclib.py)
warnings.filterwarnings(
'ignore', 'The xmllib module is obsolete.*', DeprecationWarning)
file_system_encoding = getfilesystemencoding()
#=======================================================================================================================
# _ServerHolder
#=======================================================================================================================
class _ServerHolder:
'''
Helper so that we don't have to use a global here.
'''
SERVER = None
#=======================================================================================================================
# set_server
#=======================================================================================================================
def set_server(server):
_ServerHolder.SERVER = server
#=======================================================================================================================
# ParallelNotification
#=======================================================================================================================
class ParallelNotification(object):
def __init__(self, method, args):
self.method = method
self.args = args
def to_tuple(self):
return self.method, self.args
#=======================================================================================================================
# KillServer
#=======================================================================================================================
class KillServer(object):
pass
#=======================================================================================================================
# ServerFacade
#=======================================================================================================================
class ServerFacade(object):
def __init__(self, notifications_queue):
self.notifications_queue = notifications_queue
def notifyTestsCollected(self, *args):
self.notifications_queue.put_nowait(ParallelNotification('notifyTestsCollected', args))
def notifyConnected(self, *args):
self.notifications_queue.put_nowait(ParallelNotification('notifyConnected', args))
def notifyTestRunFinished(self, *args):
self.notifications_queue.put_nowait(ParallelNotification('notifyTestRunFinished', args))
def notifyStartTest(self, *args):
self.notifications_queue.put_nowait(ParallelNotification('notifyStartTest', args))
def notifyTest(self, *args):
new_args = []
for arg in args:
new_args.append(_encode_if_needed(arg))
args = tuple(new_args)
self.notifications_queue.put_nowait(ParallelNotification('notifyTest', args))
#=======================================================================================================================
# ServerComm
#=======================================================================================================================
class ServerComm(threading.Thread):
def __init__(self, notifications_queue, port, daemon=False):
threading.Thread.__init__(self)
self.setDaemon(daemon) # If False, wait for all the notifications to be passed before exiting!
self.finished = False
self.notifications_queue = notifications_queue
from _pydev_bundle import pydev_localhost
# It is necessary to specify an encoding, that matches
# the encoding of all bytes-strings passed into an
# XMLRPC call: "All 8-bit strings in the data structure are assumed to use the
# packet encoding. Unicode strings are automatically converted,
# where necessary."
# Byte strings most likely come from file names.
encoding = file_system_encoding
if encoding == "mbcs":
# Windos symbolic name for the system encoding CP_ACP.
# We need to convert it into a encoding that is recognized by Java.
# Unfortunately this is not always possible. You could use
# GetCPInfoEx and get a name similar to "windows-1251". Then
# you need a table to translate on a best effort basis. Much to complicated.
# ISO-8859-1 is good enough.
encoding = "ISO-8859-1"
self.server = xmlrpclib.Server('http://%s:%s' % (pydev_localhost.get_localhost(), port),
encoding=encoding)
def run(self):
while True:
kill_found = False
commands = []
command = self.notifications_queue.get(block=True)
if isinstance(command, KillServer):
kill_found = True
else:
assert isinstance(command, ParallelNotification)
commands.append(command.to_tuple())
try:
while True:
command = self.notifications_queue.get(block=False) #No block to create a batch.
if isinstance(command, KillServer):
kill_found = True
else:
assert isinstance(command, ParallelNotification)
commands.append(command.to_tuple())
except:
pass #That's OK, we're getting it until it becomes empty so that we notify multiple at once.
if commands:
try:
self.server.notifyCommands(commands)
except:
traceback.print_exc()
if kill_found:
self.finished = True
return
#=======================================================================================================================
# initialize_server
#=======================================================================================================================
def initialize_server(port, daemon=False):
if _ServerHolder.SERVER is None:
if port is not None:
notifications_queue = Queue()
_ServerHolder.SERVER = ServerFacade(notifications_queue)
_ServerHolder.SERVER_COMM = ServerComm(notifications_queue, port, daemon)
_ServerHolder.SERVER_COMM.start()
else:
#Create a null server, so that we keep the interface even without any connection.
_ServerHolder.SERVER = Null()
_ServerHolder.SERVER_COMM = Null()
try:
_ServerHolder.SERVER.notifyConnected()
except:
traceback.print_exc()
#=======================================================================================================================
# notifyTest
#=======================================================================================================================
def notifyTestsCollected(tests_count):
assert tests_count is not None
try:
_ServerHolder.SERVER.notifyTestsCollected(tests_count)
except:
traceback.print_exc()
#=======================================================================================================================
# notifyStartTest
#=======================================================================================================================
def notifyStartTest(file, test):
'''
@param file: the tests file (c:/temp/test.py)
@param test: the test ran (i.e.: TestCase.test1)
'''
assert file is not None
if test is None:
test = '' #Could happen if we have an import error importing module.
try:
_ServerHolder.SERVER.notifyStartTest(file, test)
except:
traceback.print_exc()
def _encode_if_needed(obj):
# In the java side we expect strings to be ISO-8859-1 (org.python.pydev.debug.pyunit.PyUnitServer.initializeDispatches().new Dispatch() {...}.getAsStr(Object))
if not IS_PY3K:
if isinstance(obj, str):
try:
return xmlrpclib.Binary(obj.decode(sys.stdin.encoding).encode('ISO-8859-1', 'xmlcharrefreplace'))
except:
return xmlrpclib.Binary(obj)
elif isinstance(obj, unicode):
return xmlrpclib.Binary(obj.encode('ISO-8859-1', 'xmlcharrefreplace'))
else:
if isinstance(obj, str): # Unicode in py3
return xmlrpclib.Binary(obj.encode('ISO-8859-1', 'xmlcharrefreplace'))
elif isinstance(obj, bytes):
try:
return xmlrpclib.Binary(obj.decode(sys.stdin.encoding).encode('ISO-8859-1', 'xmlcharrefreplace'))
except:
return xmlrpclib.Binary(obj) #bytes already
return obj
#=======================================================================================================================
# notifyTest
#=======================================================================================================================
def notifyTest(cond, captured_output, error_contents, file, test, time):
'''
@param cond: ok, fail, error
@param captured_output: output captured from stdout
@param captured_output: output captured from stderr
@param file: the tests file (c:/temp/test.py)
@param test: the test ran (i.e.: TestCase.test1)
@param time: float with the number of seconds elapsed
'''
assert cond is not None
assert captured_output is not None
assert error_contents is not None
assert file is not None
if test is None:
test = '' #Could happen if we have an import error importing module.
assert time is not None
try:
captured_output = _encode_if_needed(captured_output)
error_contents = _encode_if_needed(error_contents)
_ServerHolder.SERVER.notifyTest(cond, captured_output, error_contents, file, test, time)
except:
traceback.print_exc()
#=======================================================================================================================
# notifyTestRunFinished
#=======================================================================================================================
def notifyTestRunFinished(total_time):
assert total_time is not None
try:
_ServerHolder.SERVER.notifyTestRunFinished(total_time)
except:
traceback.print_exc()
#=======================================================================================================================
# force_server_kill
#=======================================================================================================================
def force_server_kill():
_ServerHolder.SERVER_COMM.notifications_queue.put_nowait(KillServer())
|
fabioz/Pydev
|
plugins/org.python.pydev.core/pysrc/_pydev_runfiles/pydev_runfiles_xml_rpc.py
|
Python
|
epl-1.0
| 11,034
|
##############################################################################
#
# Copyright (c) 2004 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test hookup
$Id$
"""
import unittest
def test_suite():
from zope.testing import doctest
return doctest.DocFileSuite('property.txt')
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
Donkyhotay/MoonPy
|
zope/cachedescriptors/tests.py
|
Python
|
gpl-3.0
| 856
|
#!/usr/bin/env python
#
# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(name='closure_linter',
version='2.2.6',
description='Closure Linter',
license='Apache',
author='The Closure Linter Authors',
author_email='opensource@google.com',
url='http://code.google.com/p/closure-linter',
install_requires=['python-gflags'],
package_dir={'closure_linter': 'closure_linter'},
packages=['closure_linter', 'closure_linter.common'],
entry_points = {
'console_scripts': [
'gjslint = closure_linter.gjslint:main',
'fixjsstyle = closure_linter.fixjsstyle:main'
]
}
)
|
teeple/pns_server
|
work/install/node-v0.10.25/tools/closure_linter/setup.py
|
Python
|
gpl-2.0
| 1,323
|
# -*- coding: utf-8 -*-
import copy
import datetime
import mock
import pytest
import elastalert.alerts
import elastalert.ruletypes
from elastalert.config import get_file_paths
from elastalert.config import load_configuration
from elastalert.config import load_modules
from elastalert.config import load_options
from elastalert.config import load_rules
from elastalert.util import EAException
test_config = {'rules_folder': 'test_folder',
'run_every': {'minutes': 10},
'buffer_time': {'minutes': 10},
'es_host': 'elasticsearch.test',
'es_port': 12345,
'writeback_index': 'test_index'}
test_rule = {'es_host': 'test_host',
'es_port': 12345,
'name': 'testrule',
'type': 'spike',
'spike_height': 2,
'spike_type': 'up',
'timeframe': {'minutes': 10},
'index': 'test_index',
'query_key': 'testkey',
'compare_key': 'comparekey',
'filter': [{'term': {'key': 'value'}}],
'alert': 'email',
'use_count_query': True,
'doc_type': 'blsh',
'email': 'test@test.test',
'aggregation': {'hours': 2},
'include': ['comparekey', '@timestamp']}
test_args = mock.Mock()
test_args.config = 'test_config'
test_args.rule = None
def test_import_rules():
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy['type'] = 'testing.test.RuleType'
with mock.patch('elastalert.config.yaml_loader') as mock_open:
mock_open.return_value = test_rule_copy
# Test that type is imported
with mock.patch('__builtin__.__import__') as mock_import:
mock_import.return_value = elastalert.ruletypes
load_configuration('test_config', test_config)
assert mock_import.call_args_list[0][0][0] == 'testing.test'
assert mock_import.call_args_list[0][0][3] == ['RuleType']
# Test that alerts are imported
test_rule_copy = copy.deepcopy(test_rule)
mock_open.return_value = test_rule_copy
test_rule_copy['alert'] = 'testing2.test2.Alerter'
with mock.patch('__builtin__.__import__') as mock_import:
mock_import.return_value = elastalert.alerts
load_configuration('test_config', test_config)
assert mock_import.call_args_list[0][0][0] == 'testing2.test2'
assert mock_import.call_args_list[0][0][3] == ['Alerter']
def test_load_inline_alert_rule():
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy['alert'] = [
{
'email': {
'email': 'foo@bar.baz'
}
},
{
'email': {
'email': 'baz@foo.bar'
}
}
]
test_config_copy = copy.deepcopy(test_config)
with mock.patch('elastalert.config.yaml_loader') as mock_open:
mock_open.side_effect = [test_config_copy, test_rule_copy]
load_modules(test_rule_copy)
assert isinstance(test_rule_copy['alert'][0], elastalert.alerts.EmailAlerter)
assert isinstance(test_rule_copy['alert'][1], elastalert.alerts.EmailAlerter)
assert 'foo@bar.baz' in test_rule_copy['alert'][0].rule['email']
assert 'baz@foo.bar' in test_rule_copy['alert'][1].rule['email']
def test_load_rules():
test_rule_copy = copy.deepcopy(test_rule)
test_config_copy = copy.deepcopy(test_config)
with mock.patch('elastalert.config.yaml_loader') as mock_open:
mock_open.side_effect = [test_config_copy, test_rule_copy]
with mock.patch('os.listdir') as mock_ls:
mock_ls.return_value = ['testrule.yaml']
rules = load_rules(test_args)
assert isinstance(rules['rules'][0]['type'], elastalert.ruletypes.RuleType)
assert isinstance(rules['rules'][0]['alert'][0], elastalert.alerts.Alerter)
assert isinstance(rules['rules'][0]['timeframe'], datetime.timedelta)
assert isinstance(rules['run_every'], datetime.timedelta)
for included_key in ['comparekey', 'testkey', '@timestamp']:
assert included_key in rules['rules'][0]['include']
# Assert include doesn't contain duplicates
assert rules['rules'][0]['include'].count('@timestamp') == 1
assert rules['rules'][0]['include'].count('comparekey') == 1
def test_load_default_host_port():
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy.pop('es_host')
test_rule_copy.pop('es_port')
test_config_copy = copy.deepcopy(test_config)
with mock.patch('elastalert.config.yaml_loader') as mock_open:
mock_open.side_effect = [test_config_copy, test_rule_copy]
with mock.patch('os.listdir') as mock_ls:
mock_ls.return_value = ['testrule.yaml']
rules = load_rules(test_args)
# Assert include doesn't contain duplicates
assert rules['es_port'] == 12345
assert rules['es_host'] == 'elasticsearch.test'
def test_compound_query_key():
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy.pop('use_count_query')
test_rule_copy['query_key'] = ['field1', 'field2']
load_options(test_rule_copy, test_config)
assert 'field1' in test_rule_copy['include']
assert 'field2' in test_rule_copy['include']
assert test_rule_copy['query_key'] == 'field1,field2'
assert test_rule_copy['compound_query_key'] == ['field1', 'field2']
def test_raises_on_missing_config():
optional_keys = ('aggregation', 'use_count_query', 'query_key', 'compare_key', 'filter', 'include', 'es_host', 'es_port')
test_rule_copy = copy.deepcopy(test_rule)
for key in test_rule_copy.keys():
test_rule_copy = copy.deepcopy(test_rule)
test_config_copy = copy.deepcopy(test_config)
test_rule_copy.pop(key)
# Non required keys
if key in optional_keys:
continue
with mock.patch('elastalert.config.yaml_loader') as mock_open:
mock_open.side_effect = [test_config_copy, test_rule_copy]
with mock.patch('os.listdir') as mock_ls:
mock_ls.return_value = ['testrule.yaml']
with pytest.raises(EAException):
load_rules(test_args)
def test_raises_on_bad_generate_kibana_filters():
test_rule['generate_kibana_link'] = True
bad_filters = [[{'not': {'terms': {'blah': 'blah'}}}],
[{'terms': {'blah': 'blah'}}],
[{'query': {'not_querystring': 'this:that'}}],
[{'query': {'wildcard': 'this*that'}}],
[{'blah': 'blah'}]]
good_filters = [[{'term': {'field': 'value'}}],
[{'not': {'term': {'this': 'that'}}}],
[{'not': {'query': {'query_string': {'query': 'this:that'}}}}],
[{'query': {'query_string': {'query': 'this:that'}}}],
[{'range': {'blah': {'from': 'a', 'to': 'b'}}}],
[{'not': {'range': {'blah': {'from': 'a', 'to': 'b'}}}}]]
# Test that all the good filters work, but fail with a bad filter added
for good in good_filters:
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy['filter'] = good
with mock.patch('elastalert.config.yaml_loader') as mock_open:
mock_open.return_value = test_rule_copy
load_configuration('blah', test_config)
for bad in bad_filters:
test_rule_copy['filter'] = good + bad
with pytest.raises(EAException):
load_configuration('blah', test_config)
def test_get_file_paths():
conf = {'scan_subdirectories': True, 'rules_folder': 'root'}
walk_paths = (('root', ('folder_a', 'folder_b'), ('rule.yaml',)),
('root/folder_a', (), ('a.yaml', 'ab.yaml')),
('root/folder_b', (), ('b.yaml',)))
with mock.patch('os.walk') as mock_walk:
mock_walk.return_value = walk_paths
paths = get_file_paths(conf)
assert 'root/rule.yaml' in paths
assert 'root/folder_a/a.yaml' in paths
assert 'root/folder_a/ab.yaml' in paths
assert 'root/folder_b/b.yaml' in paths
assert len(paths) == 4
|
amoennin/elastalert
|
tests/config_test.py
|
Python
|
apache-2.0
| 8,299
|
"""
WSGI config for superlists project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "superlists.settings")
application = get_wsgi_application()
|
xueyaodeai/DjangoWebsite
|
superlists/wsgi.py
|
Python
|
mit
| 398
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from distutils.command.build import build
from setuptools import setup, find_packages
base_dir = os.path.dirname(__file__)
about = {}
with open(os.path.join(base_dir, "cryptography", "__about__.py")) as f:
exec(f.read(), about)
CFFI_DEPENDENCY = "cffi>=0.6"
SIX_DEPENDENCY = "six>=1.4.1"
requirements = [
CFFI_DEPENDENCY,
SIX_DEPENDENCY
]
class cffi_build(build):
"""
This class exists, instead of just providing ``ext_modules=[...]`` directly
in ``setup()`` because importing cryptography requires we have several
packages installed first.
By doing the imports here we ensure that packages listed in
``setup_requires`` are already installed.
"""
def finalize_options(self):
from cryptography.hazmat.bindings.openssl.binding import Binding
from cryptography.hazmat.primitives import constant_time, padding
self.distribution.ext_modules = [
Binding().ffi.verifier.get_extension(),
constant_time._ffi.verifier.get_extension(),
padding._ffi.verifier.get_extension()
]
build.finalize_options(self)
with open(os.path.join(base_dir, "README.rst")) as f:
long_description = f.read()
setup(
name=about["__title__"],
version=about["__version__"],
description=about["__summary__"],
long_description=long_description,
license=about["__license__"],
url=about["__uri__"],
author=about["__author__"],
author_email=about["__email__"],
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Operating System :: POSIX :: BSD",
"Operating System :: POSIX :: Linux",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Security :: Cryptography",
],
packages=find_packages(exclude=["tests", "tests.*"]),
install_requires=requirements,
setup_requires=requirements,
# for cffi
zip_safe=False,
ext_package="cryptography",
cmdclass={
"build": cffi_build,
}
)
|
glyph/cryptography
|
setup.py
|
Python
|
apache-2.0
| 3,258
|
import wx
import wx.html
from ..utils.generic_class import GenericClass
from ..utils.constants import control, dtype
from ..utils.validator import CharValidator
import pkg_resources as p
class NonLinearTimeSeriesAnalysis(wx.html.HtmlWindow):
def __init__(self, parent, counter = 0):
from urllib2 import urlopen
wx.html.HtmlWindow.__init__(self, parent, style= wx.html.HW_SCROLLBAR_AUTO)
self.SetStandardFonts()
self.counter = counter
#self.LoadPage(p.resource_filename('CPAC', 'GUI/resources/html/nuisance.html'))
# try:
# code = urlopen("http://fcp-indi.github.io/docs/user/nuisance.html").code
# if (code / 100 < 4):
# self.LoadPage('http://fcp-indi.github.io/docs/user/nuisance.html')
# else:
# self.LoadFile('html/nuisance.html')
# except:
# self.LoadFile('html/nuisance.html')
def get_counter(self):
return self.counter
class Preanalysis(wx.ScrolledWindow):
def __init__(self, parent, counter = 0):
wx.ScrolledWindow.__init__(self, parent)
self.counter = counter
self.page = GenericClass(self, "NonLinearTimeSeriesAnalysis")
self.page.add(label="Run NonLinearTimeSeriesAnalysis",
control=control.CHOICE_BOX,
name='run_nltsa',
type=dtype.LSTR,
comment="Run NonLinearTimeSeriesAnalysis",
values=["Off","On"],
wkf_switch = True)
# self.page.add(label="Voxelwise / ROI extraction",
# control=control.CHOICE_BOX,
# name='voxel_roi_pre',
# type=dtype.LSTR,
# comment="Run Information Theory Measures voxelwise or after ROI timeseries extraction",
# values=["Voxelwise","ROI"],
# wkf_switch = True)
#
# self.page.add(label="fMRI image",
# control=control.COMBO_BOX,
# name='input_image_pre',
# type=dtype.STR,
# comment="fMRI image for calculation")
#
# self.page.add(label="Parcellation Mask",
# control=control.COMBO_BOX,
# name='input_mask_pre',
# type=dtype.STR,
# comment="Parcellation Mask if you want to calculate")
self.page.add(label = "Preanalysis Measures",
#control = control.CHECKLISTBOX_COMBO,
control = control.CHECKLIST_BOX,
name = "measures_pre",
type = dtype.LBOOL,
values = ['Correlation', 'Partial Correlation','Phase Syncrhonization Index','Phase Locking Value'],
comment = "Select which preanalysis measures to apply:\n"\
"corr = Entropy\n"\
"pcorr = Conditional Entropy\n"\
"PSI = Phase Syncrhonization Index\n"\
"PLV = Phase Locking Value\n",
size = (300,120),
combo_type =1)
self.page.add(label = "IT Measures",
#control = control.CHECKLISTBOX_COMBO,
control = control.CHECKLIST_BOX,
name = "measures_IT",
type = dtype.LBOOL,
values = ['Entropy', 'Conditional Entropy','Mutual Information','Transfer Entropy','Entropy Correlation Coefficient'],
comment = "Select which IT measures to apply:\n"\
"ent = Entropy\n"\
"condent = Conditional Entropy\n"\
"mi = Mutual Information\n"\
"te = Transfer Entropy\n"\
"ecc = Entropy Correlation Coefficient\n",
size = (300,120),
combo_type =1)
self.page.add(label = "SFD Measures:",
#control = control.CHECKLISTBOX_COMBO,
control = control.CHECKLIST_BOX,
name = "measures_SFD",
type = dtype.LBOOL,
values = ['DFA', 'Fractality','Avalanches'],
comment = "Select which IT measures to apply:\n"\
"dfa = Detrended Fluctuation Analysis\n"\
"fractal = Fractality\n"\
"aval = Avalanches\n",
size = (300,120),
combo_type =1)
self.page.add(label="Output Options ",
control=control.CHECKLIST_BOX,
name="output_options_pre",
type=dtype.LBOOL,
values=['CSV', 'NUMPY'],
comment="By default, results are written as NIFTI files. Additional output formats are as a .csv spreadsheet or a Numpy array.")
self.page.set_sizer()
parent.get_page_list().append(self)
def get_counter(self):
return self.counter
|
roijo/C-PAC_complexitytools
|
CPAC/GUI/interface/pages/nonlinearts.py
|
Python
|
bsd-3-clause
| 5,523
|
__author__ = 'antonioirizar'
from boto3 import resource
from boto3.exceptions import ResourceNotExistsError
from boto3.session import Session
from botonio.create_service_aws import Instance
class User:
def __init__(self, name, user_id):
self.name = name
self.user_id = user_id
self._state = 0
self.account = None
self._credentials = {}
self._client = None
self._instance = None
def process_message(self, message):
if self._state == 0:
self._state = 1
return 'Hola %s soy botonio, tu asistente para AWS o tu peor pesadilla!\n Antes de empezar tenemos que configurar algunas cositas, cual es el id de tu cuenta de AWS.' % self.name
if self._state == 1:
try:
if len(str(message)) != 12:
raise ValueError
self.account = int(message)
self._state = 2
return 'Bien ya tenemos tu numero de cuenta, prosigamos, dame tu access key'
except ValueError:
return 'Eso no es un numero de cuenta valido!! Vuelve a intentarlo, paquete.'
if self._state == 2:
self._credentials['aws_access_key_id'] = message
self._state = 3
return 'Y por ultimo dame tu secret key'
if self._state == 3:
self._credentials['aws_secret_access_key'] = message
self._state = 4
return 'Bien ya tengo todo para minear bitcoins gratis.\n Bueno que servicio quieres usar de AWS?'
if self._state == 4:
try:
service = message.lower()
resource(service)
except ResourceNotExistsError as e:
str_excp = str(e)
services = str_excp.split('The available resources are:')[1]
return 'Vamos a calmarnos. Eso no existe en AWS que yo sepa. Servicios Existentes:%s ' % services
if service != 'ec2':
return 'Bueno vale ... Soy un poco inutil y no se usar ese servicios. Hazlo tu con boto, Vago! Hasta nunca.'
session_boto3 = Session(**self._credentials)
self._client = session_boto3.resource('ec2', region_name='eu-west-1')
self._state = 5
return 'Muy bien quieres una maquina de amazon, pero antes de continuar, necesito saber el tamaño'
if self._state == 5:
str_lower = message.lower()
if str_lower.find('grande'):
self._instance = Instance('t2.micro')
self._state = 6
return 'Cutre! No conozco a nadie tan rata como tu pero bueno.\n ¿Cuantas maquinas quieres?', 1
elif str_lower.find('pequeña'):
self._instance = Instance('t2.nano')
self._state = 6
return 'Animal! Estas seguro de que necesitas esa maquina. Aunque amazon estara encantado.\n¿Cuantas maquinas quieres?', 1
else:
return 'Pero que quieres decir aprende hablar!'
if self._state == 6:
try:
if int(message) > 5:
return "Eso son demsaidas maquinas para arrancar!!!!!! pone menos."
self._instance.number_instances = int(message)
self._client.create_instances(**self._instance.configure())
self._state = 4
return 'Arrancando!!!!!\n ¿Que servicio quieres usar de AWS?'
except KeyError:
return 'Pon numeros... cansino!'
|
antonioIrizar/telegram-bot-AWS
|
botonio/user.py
|
Python
|
gpl-3.0
| 3,555
|
class GameStatus:
Error = -1
Quit = 0
LaunchMapbuilder = 1
LoadMap = 2
def main(world):
player = world.player
while(True):
print
print
print player.currentArea.entranceText
print
print 'The area contains:'
for feature in player.currentArea.features:
print feature.description
print 'What do you want to do?'
actionlist = []
for feature in player.currentArea.features:
for action in feature.actions:
print '{0}. {1}'.format(len(actionlist)+1, action.description)
actionlist.append(action)
try:
choice = ''
while choice == '':
choice = raw_input('> ')
except KeyboardInterrupt, EOFError:
return GameStatus.Quit
print
if choice.lower() in ['quit', 'q']:
return GameStatus.Quit
else:
status = actionlist[int(choice)-1].trigger(player)
if status is not None:
return status
if __name__ == '__main__':
main()
|
stevenvergenz/pyVenture
|
pyVenture/game/main.py
|
Python
|
gpl-3.0
| 903
|
import seaborn as sns
import matplotlib.pyplot as plt
def neg_plot(data):
sns.set(style="whitegrid")
sns.set_context("notebook", font_scale=3, rc={"lines.linewidth": 0.3})
sns.set_color_codes("bright")
temp = (1500 / float(data.neg.max()))
size = data.neg * temp
size = 10000 * (data.neg ** 1.9)
g = sns.PairGrid(data, hue="egg_account", palette="Reds", y_vars="influence_score", x_vars="reach_score" , size=12, aspect=3)
g.map(plt.scatter, s=size);
g.set(xscale="symlog")
g.add_legend(title="Egg Account", label_order=['True','False'], bbox_to_anchor=(0.9, 0.55), fontsize=38,
prop={'weight':'roman','size':'small'})
plt.title('Are negative tweets amplified by influence and reach?', fontsize=48, y=1.12, color="gray");
plt.suptitle('bigger bubble = more negative tweet', verticalalignment='top', fontsize=38, y=1.01, color="gray")
plt.xlabel('more reach - >', fontsize=38, labelpad=30, color="gray");
plt.ylabel('higher influence - >', fontsize=38, labelpad=30, color="gray");
plt.axhline(linewidth=2.5, color="black");
plt.axvline(linewidth=2.5, color="black");
plt.ylim(0,);
plt.xlim(0,);
plt.tick_params(axis='both', which='major', pad=25)
|
S0MEC0DE/v1
|
somecode/neg_plot.py
|
Python
|
mit
| 1,247
|
# -*- coding: utf-8 -*-
import wx
import random
from pagina import pagina
class pagina12 ( pagina ) :
def __init__ ( self , parent , grandParent , index ) :
pagina.__init__ ( self, parent , grandParent , index )
self.caricaImmagineDiSfondo ("12_Gioco_carte/Forma schermo.png")
self.caricaImmagine ("12_Gioco_carte/carte_prosp/0001.png",0b0001,1150,800)
self.caricaImmagine ("12_Gioco_carte/carte_prosp/0010.png",0b0010,1150,800)
self.caricaImmagine ("12_Gioco_carte/carte_prosp/0011.png",0b0011,1150,800)
self.caricaImmagine ("12_Gioco_carte/carte_prosp/0100.png",0b0100,1150,800)
self.caricaImmagine ("12_Gioco_carte/carte_prosp/0101.png",0b0101,1150,800)
self.caricaImmagine ("12_Gioco_carte/carte_prosp/0110.png",0b0110,1150,800)
self.caricaImmagine ("12_Gioco_carte/carte_prosp/0111.png",0b0111,1150,800)
self.caricaImmagine ("12_Gioco_carte/carte_prosp/1000.png",0b1000,1150,800)
self.caricaImmagine ("12_Gioco_carte/carte_prosp/1001.png",0b1001,1150,800)
self.caricaImmagine ("12_Gioco_carte/carte_prosp/1010.png",0b1010,1150,800)
self.caricaImmagine ("12_Gioco_carte/carte_prosp/1011.png",0b1011,1150,800)
self.caricaImmagine ("12_Gioco_carte/carte_prosp/1100.png",0b1100,1150,800)
self.caricaImmagine ("12_Gioco_carte/carte_prosp/1101.png",0b1101,1150,800)
self.caricaImmagine ("12_Gioco_carte/carte_prosp/1110.png",0b1110,1150,800)
self.caricaImmagine ("12_Gioco_carte/carte_prosp/1111.png",0b1111,1150,800)
self.caricaImmagine ("12_Gioco_carte/carte_prosp/la_tua_carta.png",20,1080,700)
self.caricaImmagine ("12_Gioco_carte/carte_prosp/non_hai.png",21,950,750)
self.puntiToccatiSalva = [0,0,0,0]
self.time = 5000;
self.bPuntiToccati = False
self.stato = 0
self.nliguetta1 = 17
self.nliguetta2 = 14
self.nliguetta3 = 11
self.nliguetta4 = 8
self.indovinatoSi = 3
self.indovinatoNo = 5
def controllo (self, event) :
n= self.puntiToccatiSalva[3]+ (self.puntiToccatiSalva[2]<<1) + (self.puntiToccatiSalva[1]<<2) + (self.puntiToccatiSalva[0]<<3)
self.mostraImmagine(20,10000)
self.mostraImmagine(n,10000)
self.puntiToccatiSalva = [0,0,0,0]
self.stato = 1
self.temporizzatore = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.resetta, self.temporizzatore)
self.temporizzatore.Start( self.time, wx.TIMER_ONE_SHOT)
def routineVisibile (self, puntiToccati) :
if self.stato == 0 :
self.bPuntiToccati = False
if puntiToccati[self.nliguetta1] == 1 :
self.puntiToccatiSalva[0] = 1
self.bPuntiToccati = True
elif puntiToccati[self.nliguetta2] == 1 :
self.puntiToccatiSalva[1] = 1
self.bPuntiToccati = True
elif puntiToccati[self.nliguetta3] == 1 :
self.puntiToccatiSalva[2] = 1
self.bPuntiToccati = True
elif puntiToccati[self.nliguetta4] == 1 :
self.puntiToccatiSalva[3] = 1
self.bPuntiToccati = True
if self.bPuntiToccati :
self.nascondiImmagine(21)
self.temporizzatore = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.controllo, self.temporizzatore)
self.temporizzatore.Start( self.time, wx.TIMER_ONE_SHOT)
elif self.stato == 1 :
if puntiToccati[self.indovinatoSi] == 1 :
self.resetta()
elif puntiToccati[self.indovinatoNo] == 1 :
self.resetta()
self.mostraImmagine(21,10000)
def resetta (self, event=None) :
self.stato = 0
self.temporizzatore.Stop()
self.nascondiTutte()
def routineInvisibile (self) :
print ("Routine invisibile -- 0")
def hide (self):
self.nascondiTutte()
|
melfnt/librotecnologico
|
pagina/pagina12.py
|
Python
|
gpl-3.0
| 3,551
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides sx typing classes.
"""
from logging import getLogger
from suds import *
from suds.mx import *
from suds.sax import Namespace as NS
from suds.sax.text import Text
log = getLogger(__name__)
class Typer:
"""
Provides XML node typing as either automatic or manual.
@cvar types: A dict of class to xs type mapping.
@type types: dict
"""
types = {
int : ('int', NS.xsdns),
int : ('long', NS.xsdns),
float : ('float', NS.xsdns),
str : ('string', NS.xsdns),
str : ('string', NS.xsdns),
Text : ('string', NS.xsdns),
bool : ('boolean', NS.xsdns),
}
@classmethod
def auto(cls, node, value=None):
"""
Automatically set the node's xsi:type attribute based on either I{value}'s
class or the class of the node's text. When I{value} is an unmapped class,
the default type (xs:any) is set.
@param node: An XML node
@type node: L{sax.element.Element}
@param value: An object that is or would be the node's text.
@type value: I{any}
@return: The specified node.
@rtype: L{sax.element.Element}
"""
if value is None:
value = node.getText()
if isinstance(value, Object):
known = cls.known(value)
if known.name is None:
return node
tm = (known.name, known.namespace())
else:
tm = cls.types.get(value.__class__, cls.types.get(str))
cls.manual(node, *tm)
return node
@classmethod
def manual(cls, node, tval, ns=None):
"""
Set the node's xsi:type attribute based on either I{value}'s
class or the class of the node's text. Then adds the referenced
prefix(s) to the node's prefix mapping.
@param node: An XML node
@type node: L{sax.element.Element}
@param tval: The name of the schema type.
@type tval: str
@param ns: The XML namespace of I{tval}.
@type ns: (prefix, uri)
@return: The specified node.
@rtype: L{sax.element.Element}
"""
xta = ':'.join((NS.xsins[0], 'type'))
node.addPrefix(NS.xsins[0], NS.xsins[1])
if ns is None:
node.set(xta, tval)
else:
ns = cls.genprefix(node, ns)
qname = ':'.join((ns[0], tval))
node.set(xta, qname)
node.addPrefix(ns[0], ns[1])
return node
@classmethod
def genprefix(cls, node, ns):
"""
Generate a prefix.
@param node: An XML node on which the prefix will be used.
@type node: L{sax.element.Element}
@param ns: A namespace needing an unique prefix.
@type ns: (prefix, uri)
@return: The I{ns} with a new prefix.
"""
for n in range(1, 1024):
p = 'ns%d' % n
u = node.resolvePrefix(p, default=None)
if u is None or u == ns[1]:
return (p, ns[1])
raise Exception('auto prefix, exhausted')
@classmethod
def known(cls, object):
try:
md = object.__metadata__
known = md.sxtype
return known
except:
pass
|
obsoleter/suds
|
suds/mx/typer.py
|
Python
|
lgpl-3.0
| 4,234
|
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio/grpc_core_dependencies.py.template`!!!
CORE_SOURCE_FILES = [
'src/core/ext/filters/census/grpc_context.cc',
'src/core/ext/filters/client_channel/backend_metric.cc',
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_channelz.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc',
'src/core/ext/filters/client_channel/client_channel_plugin.cc',
'src/core/ext/filters/client_channel/config_selector.cc',
'src/core/ext/filters/client_channel/dynamic_filters.cc',
'src/core/ext/filters/client_channel/global_subchannel_pool.cc',
'src/core/ext/filters/client_channel/health/health_check_client.cc',
'src/core/ext/filters/client_channel/http_connect_handshaker.cc',
'src/core/ext/filters/client_channel/http_proxy.cc',
'src/core/ext/filters/client_channel/lb_policy.cc',
'src/core/ext/filters/client_channel/lb_policy/address_filtering.cc',
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/priority/priority.cc',
'src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.cc',
'src/core/ext/filters/client_channel/lb_policy/rls/rls.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/cds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_impl.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_manager.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_resolver.cc',
'src/core/ext/filters/client_channel/lb_policy_registry.cc',
'src/core/ext/filters/client_channel/local_subchannel_pool.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/resolver.cc',
'src/core/ext/filters/client_channel/resolver/binder/binder_resolver.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_event_engine.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_event_engine.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/dns_resolver_selection.cc',
'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc',
'src/core/ext/filters/client_channel/resolver/google_c2p/google_c2p_resolver.cc',
'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc',
'src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc',
'src/core/ext/filters/client_channel/resolver_registry.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc',
'src/core/ext/filters/client_channel/retry_filter.cc',
'src/core/ext/filters/client_channel/retry_service_config.cc',
'src/core/ext/filters/client_channel/retry_throttle.cc',
'src/core/ext/filters/client_channel/server_address.cc',
'src/core/ext/filters/client_channel/service_config_channel_arg_filter.cc',
'src/core/ext/filters/client_channel/subchannel.cc',
'src/core/ext/filters/client_channel/subchannel_pool_interface.cc',
'src/core/ext/filters/client_idle/client_idle_filter.cc',
'src/core/ext/filters/client_idle/idle_filter_state.cc',
'src/core/ext/filters/deadline/deadline_filter.cc',
'src/core/ext/filters/fault_injection/fault_injection_filter.cc',
'src/core/ext/filters/fault_injection/service_config_parser.cc',
'src/core/ext/filters/http/client/http_client_filter.cc',
'src/core/ext/filters/http/client_authority_filter.cc',
'src/core/ext/filters/http/http_filters_plugin.cc',
'src/core/ext/filters/http/message_compress/message_compress_filter.cc',
'src/core/ext/filters/http/message_compress/message_decompress_filter.cc',
'src/core/ext/filters/http/server/http_server_filter.cc',
'src/core/ext/filters/max_age/max_age_filter.cc',
'src/core/ext/filters/message_size/message_size_filter.cc',
'src/core/ext/service_config/service_config.cc',
'src/core/ext/service_config/service_config_parser.cc',
'src/core/ext/transport/chttp2/alpn/alpn.cc',
'src/core/ext/transport/chttp2/client/chttp2_connector.cc',
'src/core/ext/transport/chttp2/client/insecure/channel_create.cc',
'src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc',
'src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc',
'src/core/ext/transport/chttp2/server/chttp2_server.cc',
'src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc',
'src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc',
'src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc',
'src/core/ext/transport/chttp2/transport/bin_decoder.cc',
'src/core/ext/transport/chttp2/transport/bin_encoder.cc',
'src/core/ext/transport/chttp2/transport/chttp2_plugin.cc',
'src/core/ext/transport/chttp2/transport/chttp2_transport.cc',
'src/core/ext/transport/chttp2/transport/context_list.cc',
'src/core/ext/transport/chttp2/transport/flow_control.cc',
'src/core/ext/transport/chttp2/transport/frame_data.cc',
'src/core/ext/transport/chttp2/transport/frame_goaway.cc',
'src/core/ext/transport/chttp2/transport/frame_ping.cc',
'src/core/ext/transport/chttp2/transport/frame_rst_stream.cc',
'src/core/ext/transport/chttp2/transport/frame_settings.cc',
'src/core/ext/transport/chttp2/transport/frame_window_update.cc',
'src/core/ext/transport/chttp2/transport/hpack_encoder.cc',
'src/core/ext/transport/chttp2/transport/hpack_encoder_table.cc',
'src/core/ext/transport/chttp2/transport/hpack_parser.cc',
'src/core/ext/transport/chttp2/transport/hpack_parser_table.cc',
'src/core/ext/transport/chttp2/transport/hpack_utils.cc',
'src/core/ext/transport/chttp2/transport/http2_settings.cc',
'src/core/ext/transport/chttp2/transport/huffsyms.cc',
'src/core/ext/transport/chttp2/transport/parsing.cc',
'src/core/ext/transport/chttp2/transport/stream_lists.cc',
'src/core/ext/transport/chttp2/transport/stream_map.cc',
'src/core/ext/transport/chttp2/transport/varint.cc',
'src/core/ext/transport/chttp2/transport/writing.cc',
'src/core/ext/transport/inproc/inproc_plugin.cc',
'src/core/ext/transport/inproc/inproc_transport.cc',
'src/core/ext/upb-generated/envoy/admin/v3/config_dump.upb.c',
'src/core/ext/upb-generated/envoy/annotations/deprecation.upb.c',
'src/core/ext/upb-generated/envoy/annotations/resource.upb.c',
'src/core/ext/upb-generated/envoy/config/accesslog/v3/accesslog.upb.c',
'src/core/ext/upb-generated/envoy/config/bootstrap/v3/bootstrap.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/circuit_breaker.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/cluster.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/filter.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/outlier_detection.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/address.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/backoff.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/base.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/config_source.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/event_service_config.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/extension.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/grpc_service.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/health_check.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/http_uri.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/protocol.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/proxy_protocol.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/resolver.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/socket_option.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/substitution_format_string.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/udp_socket_config.upb.c',
'src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint.upb.c',
'src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint_components.upb.c',
'src/core/ext/upb-generated/envoy/config/endpoint/v3/load_report.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/api_listener.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/listener.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/listener_components.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/quic_config.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/udp_listener_config.upb.c',
'src/core/ext/upb-generated/envoy/config/metrics/v3/stats.upb.c',
'src/core/ext/upb-generated/envoy/config/overload/v3/overload.upb.c',
'src/core/ext/upb-generated/envoy/config/rbac/v3/rbac.upb.c',
'src/core/ext/upb-generated/envoy/config/route/v3/route.upb.c',
'src/core/ext/upb-generated/envoy/config/route/v3/route_components.upb.c',
'src/core/ext/upb-generated/envoy/config/route/v3/scoped_route.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/http_tracer.upb.c',
'src/core/ext/upb-generated/envoy/extensions/clusters/aggregate/v3/cluster.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/common/fault/v3/fault.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/http/fault/v3/fault.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/http/router/v3/router.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/cert.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/common.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/secret.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/tls.upb.c',
'src/core/ext/upb-generated/envoy/service/cluster/v3/cds.upb.c',
'src/core/ext/upb-generated/envoy/service/discovery/v3/ads.upb.c',
'src/core/ext/upb-generated/envoy/service/discovery/v3/discovery.upb.c',
'src/core/ext/upb-generated/envoy/service/endpoint/v3/eds.upb.c',
'src/core/ext/upb-generated/envoy/service/listener/v3/lds.upb.c',
'src/core/ext/upb-generated/envoy/service/load_stats/v3/lrs.upb.c',
'src/core/ext/upb-generated/envoy/service/route/v3/rds.upb.c',
'src/core/ext/upb-generated/envoy/service/route/v3/srds.upb.c',
'src/core/ext/upb-generated/envoy/service/status/v3/csds.upb.c',
'src/core/ext/upb-generated/envoy/type/http/v3/path_transformation.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/metadata.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/node.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/number.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/path.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/regex.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/string.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/struct.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/value.upb.c',
'src/core/ext/upb-generated/envoy/type/metadata/v3/metadata.upb.c',
'src/core/ext/upb-generated/envoy/type/tracing/v3/custom_tag.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/http.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/percent.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/range.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/semantic_version.upb.c',
'src/core/ext/upb-generated/google/api/annotations.upb.c',
'src/core/ext/upb-generated/google/api/expr/v1alpha1/checked.upb.c',
'src/core/ext/upb-generated/google/api/expr/v1alpha1/syntax.upb.c',
'src/core/ext/upb-generated/google/api/http.upb.c',
'src/core/ext/upb-generated/google/protobuf/any.upb.c',
'src/core/ext/upb-generated/google/protobuf/descriptor.upb.c',
'src/core/ext/upb-generated/google/protobuf/duration.upb.c',
'src/core/ext/upb-generated/google/protobuf/empty.upb.c',
'src/core/ext/upb-generated/google/protobuf/struct.upb.c',
'src/core/ext/upb-generated/google/protobuf/timestamp.upb.c',
'src/core/ext/upb-generated/google/protobuf/wrappers.upb.c',
'src/core/ext/upb-generated/google/rpc/status.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/gcp/altscontext.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/gcp/handshaker.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/gcp/transport_security_common.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/health/v1/health.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lookup/v1/rls.upb.c',
'src/core/ext/upb-generated/udpa/annotations/migrate.upb.c',
'src/core/ext/upb-generated/udpa/annotations/security.upb.c',
'src/core/ext/upb-generated/udpa/annotations/sensitive.upb.c',
'src/core/ext/upb-generated/udpa/annotations/status.upb.c',
'src/core/ext/upb-generated/udpa/annotations/versioning.upb.c',
'src/core/ext/upb-generated/validate/validate.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/status.upb.c',
'src/core/ext/upb-generated/xds/core/v3/authority.upb.c',
'src/core/ext/upb-generated/xds/core/v3/collection_entry.upb.c',
'src/core/ext/upb-generated/xds/core/v3/context_params.upb.c',
'src/core/ext/upb-generated/xds/core/v3/resource.upb.c',
'src/core/ext/upb-generated/xds/core/v3/resource_locator.upb.c',
'src/core/ext/upb-generated/xds/core/v3/resource_name.upb.c',
'src/core/ext/upb-generated/xds/data/orca/v3/orca_load_report.upb.c',
'src/core/ext/upb-generated/xds/type/v3/typed_struct.upb.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/config_dump.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/annotations/deprecation.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/annotations/resource.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/accesslog/v3/accesslog.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/bootstrap/v3/bootstrap.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/circuit_breaker.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/cluster.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/filter.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/outlier_detection.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/address.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/backoff.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/base.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/config_source.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/event_service_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/extension.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/grpc_service.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/health_check.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/http_uri.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/protocol.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/proxy_protocol.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/resolver.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/socket_option.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/substitution_format_string.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/udp_socket_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/endpoint.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/endpoint_components.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/load_report.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/api_listener.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener_components.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/quic_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/udp_listener_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/metrics/v3/stats.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/overload/v3/overload.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/route/v3/route.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/route/v3/route_components.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/route/v3/scoped_route.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/http_tracer.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/clusters/aggregate/v3/cluster.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/common/fault/v3/fault.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/http/fault/v3/fault.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/http/router/v3/router.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/cert.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/common.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/secret.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/tls.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/cluster/v3/cds.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/discovery/v3/ads.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/discovery/v3/discovery.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/endpoint/v3/eds.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/listener/v3/lds.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/load_stats/v3/lrs.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/route/v3/rds.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/route/v3/srds.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/status/v3/csds.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/http/v3/path_transformation.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/metadata.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/node.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/number.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/path.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/regex.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/string.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/struct.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/value.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/metadata/v3/metadata.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/tracing/v3/custom_tag.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/http.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/percent.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/range.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/semantic_version.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/annotations.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/http.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/any.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/descriptor.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/duration.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/empty.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/struct.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/timestamp.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/wrappers.upbdefs.c',
'src/core/ext/upbdefs-generated/google/rpc/status.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/migrate.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/security.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/sensitive.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/status.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/versioning.upbdefs.c',
'src/core/ext/upbdefs-generated/validate/validate.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/status.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/authority.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/collection_entry.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/context_params.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/resource.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/resource_locator.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/resource_name.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/type/v3/typed_struct.upbdefs.c',
'src/core/ext/xds/certificate_provider_registry.cc',
'src/core/ext/xds/certificate_provider_store.cc',
'src/core/ext/xds/file_watcher_certificate_provider_factory.cc',
'src/core/ext/xds/xds_api.cc',
'src/core/ext/xds/xds_bootstrap.cc',
'src/core/ext/xds/xds_certificate_provider.cc',
'src/core/ext/xds/xds_channel_stack_modifier.cc',
'src/core/ext/xds/xds_client.cc',
'src/core/ext/xds/xds_client_stats.cc',
'src/core/ext/xds/xds_http_fault_filter.cc',
'src/core/ext/xds/xds_http_filters.cc',
'src/core/ext/xds/xds_server_config_fetcher.cc',
'src/core/lib/address_utils/parse_address.cc',
'src/core/lib/address_utils/sockaddr_utils.cc',
'src/core/lib/avl/avl.cc',
'src/core/lib/backoff/backoff.cc',
'src/core/lib/channel/channel_args.cc',
'src/core/lib/channel/channel_stack.cc',
'src/core/lib/channel/channel_stack_builder.cc',
'src/core/lib/channel/channel_trace.cc',
'src/core/lib/channel/channelz.cc',
'src/core/lib/channel/channelz_registry.cc',
'src/core/lib/channel/connected_channel.cc',
'src/core/lib/channel/handshaker.cc',
'src/core/lib/channel/handshaker_registry.cc',
'src/core/lib/channel/status_util.cc',
'src/core/lib/compression/compression.cc',
'src/core/lib/compression/compression_args.cc',
'src/core/lib/compression/compression_internal.cc',
'src/core/lib/compression/message_compress.cc',
'src/core/lib/compression/stream_compression.cc',
'src/core/lib/compression/stream_compression_gzip.cc',
'src/core/lib/compression/stream_compression_identity.cc',
'src/core/lib/config/core_configuration.cc',
'src/core/lib/debug/stats.cc',
'src/core/lib/debug/stats_data.cc',
'src/core/lib/debug/trace.cc',
'src/core/lib/event_engine/endpoint_config.cc',
'src/core/lib/event_engine/event_engine.cc',
'src/core/lib/event_engine/sockaddr.cc',
'src/core/lib/gpr/alloc.cc',
'src/core/lib/gpr/atm.cc',
'src/core/lib/gpr/cpu_iphone.cc',
'src/core/lib/gpr/cpu_linux.cc',
'src/core/lib/gpr/cpu_posix.cc',
'src/core/lib/gpr/cpu_windows.cc',
'src/core/lib/gpr/env_linux.cc',
'src/core/lib/gpr/env_posix.cc',
'src/core/lib/gpr/env_windows.cc',
'src/core/lib/gpr/log.cc',
'src/core/lib/gpr/log_android.cc',
'src/core/lib/gpr/log_linux.cc',
'src/core/lib/gpr/log_posix.cc',
'src/core/lib/gpr/log_windows.cc',
'src/core/lib/gpr/murmur_hash.cc',
'src/core/lib/gpr/string.cc',
'src/core/lib/gpr/string_posix.cc',
'src/core/lib/gpr/string_util_windows.cc',
'src/core/lib/gpr/string_windows.cc',
'src/core/lib/gpr/sync.cc',
'src/core/lib/gpr/sync_abseil.cc',
'src/core/lib/gpr/sync_posix.cc',
'src/core/lib/gpr/sync_windows.cc',
'src/core/lib/gpr/time.cc',
'src/core/lib/gpr/time_posix.cc',
'src/core/lib/gpr/time_precise.cc',
'src/core/lib/gpr/time_windows.cc',
'src/core/lib/gpr/tmpfile_msys.cc',
'src/core/lib/gpr/tmpfile_posix.cc',
'src/core/lib/gpr/tmpfile_windows.cc',
'src/core/lib/gpr/wrap_memcpy.cc',
'src/core/lib/gprpp/arena.cc',
'src/core/lib/gprpp/examine_stack.cc',
'src/core/lib/gprpp/fork.cc',
'src/core/lib/gprpp/global_config_env.cc',
'src/core/lib/gprpp/host_port.cc',
'src/core/lib/gprpp/mpscq.cc',
'src/core/lib/gprpp/stat_posix.cc',
'src/core/lib/gprpp/stat_windows.cc',
'src/core/lib/gprpp/status_helper.cc',
'src/core/lib/gprpp/thd_posix.cc',
'src/core/lib/gprpp/thd_windows.cc',
'src/core/lib/gprpp/time_util.cc',
'src/core/lib/http/format_request.cc',
'src/core/lib/http/httpcli.cc',
'src/core/lib/http/httpcli_security_connector.cc',
'src/core/lib/http/parser.cc',
'src/core/lib/iomgr/buffer_list.cc',
'src/core/lib/iomgr/call_combiner.cc',
'src/core/lib/iomgr/cfstream_handle.cc',
'src/core/lib/iomgr/combiner.cc',
'src/core/lib/iomgr/dualstack_socket_posix.cc',
'src/core/lib/iomgr/endpoint.cc',
'src/core/lib/iomgr/endpoint_cfstream.cc',
'src/core/lib/iomgr/endpoint_pair_event_engine.cc',
'src/core/lib/iomgr/endpoint_pair_posix.cc',
'src/core/lib/iomgr/endpoint_pair_windows.cc',
'src/core/lib/iomgr/error.cc',
'src/core/lib/iomgr/error_cfstream.cc',
'src/core/lib/iomgr/ev_apple.cc',
'src/core/lib/iomgr/ev_epoll1_linux.cc',
'src/core/lib/iomgr/ev_epollex_linux.cc',
'src/core/lib/iomgr/ev_poll_posix.cc',
'src/core/lib/iomgr/ev_posix.cc',
'src/core/lib/iomgr/ev_windows.cc',
'src/core/lib/iomgr/event_engine/closure.cc',
'src/core/lib/iomgr/event_engine/endpoint.cc',
'src/core/lib/iomgr/event_engine/iomgr.cc',
'src/core/lib/iomgr/event_engine/pollset.cc',
'src/core/lib/iomgr/event_engine/resolved_address_internal.cc',
'src/core/lib/iomgr/event_engine/resolver.cc',
'src/core/lib/iomgr/event_engine/tcp.cc',
'src/core/lib/iomgr/event_engine/timer.cc',
'src/core/lib/iomgr/exec_ctx.cc',
'src/core/lib/iomgr/executor.cc',
'src/core/lib/iomgr/executor/mpmcqueue.cc',
'src/core/lib/iomgr/executor/threadpool.cc',
'src/core/lib/iomgr/fork_posix.cc',
'src/core/lib/iomgr/fork_windows.cc',
'src/core/lib/iomgr/gethostname_fallback.cc',
'src/core/lib/iomgr/gethostname_host_name_max.cc',
'src/core/lib/iomgr/gethostname_sysconf.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_posix.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_unsupported.cc',
'src/core/lib/iomgr/internal_errqueue.cc',
'src/core/lib/iomgr/iocp_windows.cc',
'src/core/lib/iomgr/iomgr.cc',
'src/core/lib/iomgr/iomgr_custom.cc',
'src/core/lib/iomgr/iomgr_internal.cc',
'src/core/lib/iomgr/iomgr_posix.cc',
'src/core/lib/iomgr/iomgr_posix_cfstream.cc',
'src/core/lib/iomgr/iomgr_windows.cc',
'src/core/lib/iomgr/is_epollexclusive_available.cc',
'src/core/lib/iomgr/load_file.cc',
'src/core/lib/iomgr/lockfree_event.cc',
'src/core/lib/iomgr/polling_entity.cc',
'src/core/lib/iomgr/pollset.cc',
'src/core/lib/iomgr/pollset_custom.cc',
'src/core/lib/iomgr/pollset_set.cc',
'src/core/lib/iomgr/pollset_set_custom.cc',
'src/core/lib/iomgr/pollset_set_windows.cc',
'src/core/lib/iomgr/pollset_windows.cc',
'src/core/lib/iomgr/resolve_address.cc',
'src/core/lib/iomgr/resolve_address_custom.cc',
'src/core/lib/iomgr/resolve_address_posix.cc',
'src/core/lib/iomgr/resolve_address_windows.cc',
'src/core/lib/iomgr/resource_quota.cc',
'src/core/lib/iomgr/socket_factory_posix.cc',
'src/core/lib/iomgr/socket_mutator.cc',
'src/core/lib/iomgr/socket_utils_common_posix.cc',
'src/core/lib/iomgr/socket_utils_linux.cc',
'src/core/lib/iomgr/socket_utils_posix.cc',
'src/core/lib/iomgr/socket_utils_windows.cc',
'src/core/lib/iomgr/socket_windows.cc',
'src/core/lib/iomgr/tcp_client.cc',
'src/core/lib/iomgr/tcp_client_cfstream.cc',
'src/core/lib/iomgr/tcp_client_custom.cc',
'src/core/lib/iomgr/tcp_client_posix.cc',
'src/core/lib/iomgr/tcp_client_windows.cc',
'src/core/lib/iomgr/tcp_custom.cc',
'src/core/lib/iomgr/tcp_posix.cc',
'src/core/lib/iomgr/tcp_server.cc',
'src/core/lib/iomgr/tcp_server_custom.cc',
'src/core/lib/iomgr/tcp_server_posix.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_common.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc',
'src/core/lib/iomgr/tcp_server_windows.cc',
'src/core/lib/iomgr/tcp_windows.cc',
'src/core/lib/iomgr/time_averaged_stats.cc',
'src/core/lib/iomgr/timer.cc',
'src/core/lib/iomgr/timer_custom.cc',
'src/core/lib/iomgr/timer_generic.cc',
'src/core/lib/iomgr/timer_heap.cc',
'src/core/lib/iomgr/timer_manager.cc',
'src/core/lib/iomgr/unix_sockets_posix.cc',
'src/core/lib/iomgr/unix_sockets_posix_noop.cc',
'src/core/lib/iomgr/wakeup_fd_eventfd.cc',
'src/core/lib/iomgr/wakeup_fd_nospecial.cc',
'src/core/lib/iomgr/wakeup_fd_pipe.cc',
'src/core/lib/iomgr/wakeup_fd_posix.cc',
'src/core/lib/iomgr/work_serializer.cc',
'src/core/lib/json/json_reader.cc',
'src/core/lib/json/json_util.cc',
'src/core/lib/json/json_writer.cc',
'src/core/lib/matchers/matchers.cc',
'src/core/lib/profiling/basic_timers.cc',
'src/core/lib/profiling/stap_timers.cc',
'src/core/lib/security/authorization/authorization_policy_provider_vtable.cc',
'src/core/lib/security/authorization/evaluate_args.cc',
'src/core/lib/security/authorization/sdk_server_authz_filter.cc',
'src/core/lib/security/context/security_context.cc',
'src/core/lib/security/credentials/alts/alts_credentials.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment_linux.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment_no_op.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment_windows.cc',
'src/core/lib/security/credentials/alts/grpc_alts_credentials_client_options.cc',
'src/core/lib/security/credentials/alts/grpc_alts_credentials_options.cc',
'src/core/lib/security/credentials/alts/grpc_alts_credentials_server_options.cc',
'src/core/lib/security/credentials/composite/composite_credentials.cc',
'src/core/lib/security/credentials/credentials.cc',
'src/core/lib/security/credentials/credentials_metadata.cc',
'src/core/lib/security/credentials/external/aws_external_account_credentials.cc',
'src/core/lib/security/credentials/external/aws_request_signer.cc',
'src/core/lib/security/credentials/external/external_account_credentials.cc',
'src/core/lib/security/credentials/external/file_external_account_credentials.cc',
'src/core/lib/security/credentials/external/url_external_account_credentials.cc',
'src/core/lib/security/credentials/fake/fake_credentials.cc',
'src/core/lib/security/credentials/google_default/credentials_generic.cc',
'src/core/lib/security/credentials/google_default/google_default_credentials.cc',
'src/core/lib/security/credentials/iam/iam_credentials.cc',
'src/core/lib/security/credentials/insecure/insecure_credentials.cc',
'src/core/lib/security/credentials/jwt/json_token.cc',
'src/core/lib/security/credentials/jwt/jwt_credentials.cc',
'src/core/lib/security/credentials/jwt/jwt_verifier.cc',
'src/core/lib/security/credentials/local/local_credentials.cc',
'src/core/lib/security/credentials/oauth2/oauth2_credentials.cc',
'src/core/lib/security/credentials/plugin/plugin_credentials.cc',
'src/core/lib/security/credentials/ssl/ssl_credentials.cc',
'src/core/lib/security/credentials/tls/grpc_tls_certificate_distributor.cc',
'src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.cc',
'src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc',
'src/core/lib/security/credentials/tls/tls_credentials.cc',
'src/core/lib/security/credentials/tls/tls_utils.cc',
'src/core/lib/security/credentials/xds/xds_credentials.cc',
'src/core/lib/security/security_connector/alts/alts_security_connector.cc',
'src/core/lib/security/security_connector/fake/fake_security_connector.cc',
'src/core/lib/security/security_connector/insecure/insecure_security_connector.cc',
'src/core/lib/security/security_connector/load_system_roots_fallback.cc',
'src/core/lib/security/security_connector/load_system_roots_linux.cc',
'src/core/lib/security/security_connector/local/local_security_connector.cc',
'src/core/lib/security/security_connector/security_connector.cc',
'src/core/lib/security/security_connector/ssl/ssl_security_connector.cc',
'src/core/lib/security/security_connector/ssl_utils.cc',
'src/core/lib/security/security_connector/ssl_utils_config.cc',
'src/core/lib/security/security_connector/tls/tls_security_connector.cc',
'src/core/lib/security/transport/client_auth_filter.cc',
'src/core/lib/security/transport/secure_endpoint.cc',
'src/core/lib/security/transport/security_handshaker.cc',
'src/core/lib/security/transport/server_auth_filter.cc',
'src/core/lib/security/transport/tsi_error.cc',
'src/core/lib/security/util/json_util.cc',
'src/core/lib/slice/b64.cc',
'src/core/lib/slice/percent_encoding.cc',
'src/core/lib/slice/slice.cc',
'src/core/lib/slice/slice_api.cc',
'src/core/lib/slice/slice_buffer.cc',
'src/core/lib/slice/slice_intern.cc',
'src/core/lib/slice/slice_refcount.cc',
'src/core/lib/slice/slice_split.cc',
'src/core/lib/slice/slice_string_helpers.cc',
'src/core/lib/slice/static_slice.cc',
'src/core/lib/surface/api_trace.cc',
'src/core/lib/surface/builtins.cc',
'src/core/lib/surface/byte_buffer.cc',
'src/core/lib/surface/byte_buffer_reader.cc',
'src/core/lib/surface/call.cc',
'src/core/lib/surface/call_details.cc',
'src/core/lib/surface/call_log_batch.cc',
'src/core/lib/surface/channel.cc',
'src/core/lib/surface/channel_init.cc',
'src/core/lib/surface/channel_ping.cc',
'src/core/lib/surface/channel_stack_type.cc',
'src/core/lib/surface/completion_queue.cc',
'src/core/lib/surface/completion_queue_factory.cc',
'src/core/lib/surface/event_string.cc',
'src/core/lib/surface/init.cc',
'src/core/lib/surface/init_secure.cc',
'src/core/lib/surface/lame_client.cc',
'src/core/lib/surface/metadata_array.cc',
'src/core/lib/surface/server.cc',
'src/core/lib/surface/validate_metadata.cc',
'src/core/lib/surface/version.cc',
'src/core/lib/transport/bdp_estimator.cc',
'src/core/lib/transport/byte_stream.cc',
'src/core/lib/transport/connectivity_state.cc',
'src/core/lib/transport/error_utils.cc',
'src/core/lib/transport/metadata.cc',
'src/core/lib/transport/metadata_batch.cc',
'src/core/lib/transport/pid_controller.cc',
'src/core/lib/transport/static_metadata.cc',
'src/core/lib/transport/status_conversion.cc',
'src/core/lib/transport/status_metadata.cc',
'src/core/lib/transport/timeout_encoding.cc',
'src/core/lib/transport/transport.cc',
'src/core/lib/transport/transport_op_string.cc',
'src/core/lib/uri/uri_parser.cc',
'src/core/plugin_registry/grpc_plugin_registry.cc',
'src/core/tsi/alts/crypt/aes_gcm.cc',
'src/core/tsi/alts/crypt/gsec.cc',
'src/core/tsi/alts/frame_protector/alts_counter.cc',
'src/core/tsi/alts/frame_protector/alts_crypter.cc',
'src/core/tsi/alts/frame_protector/alts_frame_protector.cc',
'src/core/tsi/alts/frame_protector/alts_record_protocol_crypter_common.cc',
'src/core/tsi/alts/frame_protector/alts_seal_privacy_integrity_crypter.cc',
'src/core/tsi/alts/frame_protector/alts_unseal_privacy_integrity_crypter.cc',
'src/core/tsi/alts/frame_protector/frame_handler.cc',
'src/core/tsi/alts/handshaker/alts_handshaker_client.cc',
'src/core/tsi/alts/handshaker/alts_shared_resource.cc',
'src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc',
'src/core/tsi/alts/handshaker/alts_tsi_utils.cc',
'src/core/tsi/alts/handshaker/transport_security_common_api.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc',
'src/core/tsi/fake_transport_security.cc',
'src/core/tsi/local_transport_security.cc',
'src/core/tsi/ssl/session_cache/ssl_session_boringssl.cc',
'src/core/tsi/ssl/session_cache/ssl_session_cache.cc',
'src/core/tsi/ssl/session_cache/ssl_session_openssl.cc',
'src/core/tsi/ssl_transport_security.cc',
'src/core/tsi/transport_security.cc',
'src/core/tsi/transport_security_grpc.cc',
'third_party/abseil-cpp/absl/base/internal/cycleclock.cc',
'third_party/abseil-cpp/absl/base/internal/exponential_biased.cc',
'third_party/abseil-cpp/absl/base/internal/low_level_alloc.cc',
'third_party/abseil-cpp/absl/base/internal/raw_logging.cc',
'third_party/abseil-cpp/absl/base/internal/spinlock.cc',
'third_party/abseil-cpp/absl/base/internal/spinlock_wait.cc',
'third_party/abseil-cpp/absl/base/internal/sysinfo.cc',
'third_party/abseil-cpp/absl/base/internal/thread_identity.cc',
'third_party/abseil-cpp/absl/base/internal/throw_delegate.cc',
'third_party/abseil-cpp/absl/base/internal/unscaledcycleclock.cc',
'third_party/abseil-cpp/absl/base/log_severity.cc',
'third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.cc',
'third_party/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc',
'third_party/abseil-cpp/absl/container/internal/raw_hash_set.cc',
'third_party/abseil-cpp/absl/debugging/internal/address_is_readable.cc',
'third_party/abseil-cpp/absl/debugging/internal/demangle.cc',
'third_party/abseil-cpp/absl/debugging/internal/elf_mem_image.cc',
'third_party/abseil-cpp/absl/debugging/internal/vdso_support.cc',
'third_party/abseil-cpp/absl/debugging/stacktrace.cc',
'third_party/abseil-cpp/absl/debugging/symbolize.cc',
'third_party/abseil-cpp/absl/hash/internal/city.cc',
'third_party/abseil-cpp/absl/hash/internal/hash.cc',
'third_party/abseil-cpp/absl/hash/internal/wyhash.cc',
'third_party/abseil-cpp/absl/numeric/int128.cc',
'third_party/abseil-cpp/absl/status/status.cc',
'third_party/abseil-cpp/absl/status/status_payload_printer.cc',
'third_party/abseil-cpp/absl/status/statusor.cc',
'third_party/abseil-cpp/absl/strings/ascii.cc',
'third_party/abseil-cpp/absl/strings/charconv.cc',
'third_party/abseil-cpp/absl/strings/cord.cc',
'third_party/abseil-cpp/absl/strings/escaping.cc',
'third_party/abseil-cpp/absl/strings/internal/charconv_bigint.cc',
'third_party/abseil-cpp/absl/strings/internal/charconv_parse.cc',
'third_party/abseil-cpp/absl/strings/internal/cord_internal.cc',
'third_party/abseil-cpp/absl/strings/internal/cord_rep_ring.cc',
'third_party/abseil-cpp/absl/strings/internal/escaping.cc',
'third_party/abseil-cpp/absl/strings/internal/memutil.cc',
'third_party/abseil-cpp/absl/strings/internal/ostringstream.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/arg.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/bind.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/extension.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/float_conversion.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/output.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/parser.cc',
'third_party/abseil-cpp/absl/strings/internal/utf8.cc',
'third_party/abseil-cpp/absl/strings/match.cc',
'third_party/abseil-cpp/absl/strings/numbers.cc',
'third_party/abseil-cpp/absl/strings/str_cat.cc',
'third_party/abseil-cpp/absl/strings/str_replace.cc',
'third_party/abseil-cpp/absl/strings/str_split.cc',
'third_party/abseil-cpp/absl/strings/string_view.cc',
'third_party/abseil-cpp/absl/strings/substitute.cc',
'third_party/abseil-cpp/absl/synchronization/barrier.cc',
'third_party/abseil-cpp/absl/synchronization/blocking_counter.cc',
'third_party/abseil-cpp/absl/synchronization/internal/create_thread_identity.cc',
'third_party/abseil-cpp/absl/synchronization/internal/graphcycles.cc',
'third_party/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc',
'third_party/abseil-cpp/absl/synchronization/internal/waiter.cc',
'third_party/abseil-cpp/absl/synchronization/mutex.cc',
'third_party/abseil-cpp/absl/synchronization/notification.cc',
'third_party/abseil-cpp/absl/time/civil_time.cc',
'third_party/abseil-cpp/absl/time/clock.cc',
'third_party/abseil-cpp/absl/time/duration.cc',
'third_party/abseil-cpp/absl/time/format.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/civil_time_detail.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_fixed.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_format.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_if.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_impl.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_info.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_posix.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/zone_info_source.cc',
'third_party/abseil-cpp/absl/time/time.cc',
'third_party/abseil-cpp/absl/types/bad_optional_access.cc',
'third_party/abseil-cpp/absl/types/bad_variant_access.cc',
'third_party/address_sorting/address_sorting.c',
'third_party/address_sorting/address_sorting_posix.c',
'third_party/address_sorting/address_sorting_windows.c',
'third_party/boringssl-with-bazel/err_data.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_bitstr.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_bool.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_d2i_fp.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_dup.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_enum.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_gentm.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_i2d_fp.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_int.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_mbstr.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_object.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_octet.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_print.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_strex.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_strnid.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_time.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_type.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_utctm.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_utf8.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/asn1_lib.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/asn1_par.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/asn_pack.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/f_enum.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/f_int.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/f_string.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_dec.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_enc.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_fre.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_new.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_typ.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_utl.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/time_support.c',
'third_party/boringssl-with-bazel/src/crypto/base64/base64.c',
'third_party/boringssl-with-bazel/src/crypto/bio/bio.c',
'third_party/boringssl-with-bazel/src/crypto/bio/bio_mem.c',
'third_party/boringssl-with-bazel/src/crypto/bio/connect.c',
'third_party/boringssl-with-bazel/src/crypto/bio/fd.c',
'third_party/boringssl-with-bazel/src/crypto/bio/file.c',
'third_party/boringssl-with-bazel/src/crypto/bio/hexdump.c',
'third_party/boringssl-with-bazel/src/crypto/bio/pair.c',
'third_party/boringssl-with-bazel/src/crypto/bio/printf.c',
'third_party/boringssl-with-bazel/src/crypto/bio/socket.c',
'third_party/boringssl-with-bazel/src/crypto/bio/socket_helper.c',
'third_party/boringssl-with-bazel/src/crypto/blake2/blake2.c',
'third_party/boringssl-with-bazel/src/crypto/bn_extra/bn_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/bn_extra/convert.c',
'third_party/boringssl-with-bazel/src/crypto/buf/buf.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/asn1_compat.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/ber.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/cbb.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/cbs.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/unicode.c',
'third_party/boringssl-with-bazel/src/crypto/chacha/chacha.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/cipher_extra.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/derive_key.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesccm.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesctrhmac.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesgcmsiv.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_chacha20poly1305.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_null.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_rc2.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_rc4.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_tls.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/tls_cbc.c',
'third_party/boringssl-with-bazel/src/crypto/cmac/cmac.c',
'third_party/boringssl-with-bazel/src/crypto/conf/conf.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-aarch64-fuchsia.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-aarch64-linux.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-aarch64-win.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-arm-linux.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-arm.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-intel.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-ppc64le.c',
'third_party/boringssl-with-bazel/src/crypto/crypto.c',
'third_party/boringssl-with-bazel/src/crypto/curve25519/curve25519.c',
'third_party/boringssl-with-bazel/src/crypto/curve25519/spake25519.c',
'third_party/boringssl-with-bazel/src/crypto/dh_extra/dh_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/dh_extra/params.c',
'third_party/boringssl-with-bazel/src/crypto/digest_extra/digest_extra.c',
'third_party/boringssl-with-bazel/src/crypto/dsa/dsa.c',
'third_party/boringssl-with-bazel/src/crypto/dsa/dsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/ec_extra/ec_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/ec_extra/ec_derive.c',
'third_party/boringssl-with-bazel/src/crypto/ec_extra/hash_to_curve.c',
'third_party/boringssl-with-bazel/src/crypto/ecdh_extra/ecdh_extra.c',
'third_party/boringssl-with-bazel/src/crypto/ecdsa_extra/ecdsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/engine/engine.c',
'third_party/boringssl-with-bazel/src/crypto/err/err.c',
'third_party/boringssl-with-bazel/src/crypto/evp/digestsign.c',
'third_party/boringssl-with-bazel/src/crypto/evp/evp.c',
'third_party/boringssl-with-bazel/src/crypto/evp/evp_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/evp_ctx.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_dsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ec.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ec_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ed25519.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ed25519_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_rsa.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_rsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_x25519.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_x25519_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/pbkdf.c',
'third_party/boringssl-with-bazel/src/crypto/evp/print.c',
'third_party/boringssl-with-bazel/src/crypto/evp/scrypt.c',
'third_party/boringssl-with-bazel/src/crypto/evp/sign.c',
'third_party/boringssl-with-bazel/src/crypto/ex_data.c',
'third_party/boringssl-with-bazel/src/crypto/fipsmodule/bcm.c',
'third_party/boringssl-with-bazel/src/crypto/fipsmodule/fips_shared_support.c',
'third_party/boringssl-with-bazel/src/crypto/hkdf/hkdf.c',
'third_party/boringssl-with-bazel/src/crypto/hpke/hpke.c',
'third_party/boringssl-with-bazel/src/crypto/hrss/hrss.c',
'third_party/boringssl-with-bazel/src/crypto/lhash/lhash.c',
'third_party/boringssl-with-bazel/src/crypto/mem.c',
'third_party/boringssl-with-bazel/src/crypto/obj/obj.c',
'third_party/boringssl-with-bazel/src/crypto/obj/obj_xref.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_all.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_info.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_lib.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_oth.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_pk8.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_pkey.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_x509.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_xaux.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs7/pkcs7.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs7/pkcs7_x509.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs8/p5_pbev2.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs8/pkcs8.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs8/pkcs8_x509.c',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305.c',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305_arm.c',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305_vec.c',
'third_party/boringssl-with-bazel/src/crypto/pool/pool.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/deterministic.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/forkunsafe.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/fuchsia.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/passive.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/rand_extra.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/windows.c',
'third_party/boringssl-with-bazel/src/crypto/rc4/rc4.c',
'third_party/boringssl-with-bazel/src/crypto/refcount_c11.c',
'third_party/boringssl-with-bazel/src/crypto/refcount_lock.c',
'third_party/boringssl-with-bazel/src/crypto/rsa_extra/rsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/rsa_extra/rsa_print.c',
'third_party/boringssl-with-bazel/src/crypto/siphash/siphash.c',
'third_party/boringssl-with-bazel/src/crypto/stack/stack.c',
'third_party/boringssl-with-bazel/src/crypto/thread.c',
'third_party/boringssl-with-bazel/src/crypto/thread_none.c',
'third_party/boringssl-with-bazel/src/crypto/thread_pthread.c',
'third_party/boringssl-with-bazel/src/crypto/thread_win.c',
'third_party/boringssl-with-bazel/src/crypto/trust_token/pmbtoken.c',
'third_party/boringssl-with-bazel/src/crypto/trust_token/trust_token.c',
'third_party/boringssl-with-bazel/src/crypto/trust_token/voprf.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_digest.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_sign.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_verify.c',
'third_party/boringssl-with-bazel/src/crypto/x509/algorithm.c',
'third_party/boringssl-with-bazel/src/crypto/x509/asn1_gen.c',
'third_party/boringssl-with-bazel/src/crypto/x509/by_dir.c',
'third_party/boringssl-with-bazel/src/crypto/x509/by_file.c',
'third_party/boringssl-with-bazel/src/crypto/x509/i2d_pr.c',
'third_party/boringssl-with-bazel/src/crypto/x509/name_print.c',
'third_party/boringssl-with-bazel/src/crypto/x509/rsa_pss.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_crl.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_req.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_x509.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_x509a.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_att.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_cmp.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_d2.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_def.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_ext.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_lu.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_obj.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_req.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_set.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_trs.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_txt.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_v3.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_vfy.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_vpm.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509cset.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509name.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509rset.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509spki.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_algor.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_all.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_attrib.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_crl.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_exten.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_info.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_name.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_pkey.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_pubkey.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_req.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_sig.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_spki.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_val.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_x509.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_x509a.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_cache.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_data.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_lib.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_map.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_node.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_tree.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_akey.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_akeya.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_alt.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_bcons.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_bitst.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_conf.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_cpols.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_crld.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_enum.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_extku.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_genn.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ia5.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_info.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_int.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_lib.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ncons.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ocsp.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pci.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pcia.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pcons.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pmaps.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_prn.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_purp.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_skey.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_utl.c',
'third_party/boringssl-with-bazel/src/ssl/bio_ssl.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_both.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_pkt.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_srtp.cc',
'third_party/boringssl-with-bazel/src/ssl/dtls_method.cc',
'third_party/boringssl-with-bazel/src/ssl/dtls_record.cc',
'third_party/boringssl-with-bazel/src/ssl/encrypted_client_hello.cc',
'third_party/boringssl-with-bazel/src/ssl/extensions.cc',
'third_party/boringssl-with-bazel/src/ssl/handoff.cc',
'third_party/boringssl-with-bazel/src/ssl/handshake.cc',
'third_party/boringssl-with-bazel/src/ssl/handshake_client.cc',
'third_party/boringssl-with-bazel/src/ssl/handshake_server.cc',
'third_party/boringssl-with-bazel/src/ssl/s3_both.cc',
'third_party/boringssl-with-bazel/src/ssl/s3_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/s3_pkt.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_aead_ctx.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_asn1.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_buffer.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_cert.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_cipher.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_file.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_key_share.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_privkey.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_session.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_stat.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_transcript.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_versions.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_x509.cc',
'third_party/boringssl-with-bazel/src/ssl/t1_enc.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_both.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_client.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_enc.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_server.cc',
'third_party/boringssl-with-bazel/src/ssl/tls_method.cc',
'third_party/boringssl-with-bazel/src/ssl/tls_record.cc',
'third_party/cares/cares/ares__close_sockets.c',
'third_party/cares/cares/ares__get_hostent.c',
'third_party/cares/cares/ares__read_line.c',
'third_party/cares/cares/ares__timeval.c',
'third_party/cares/cares/ares_cancel.c',
'third_party/cares/cares/ares_create_query.c',
'third_party/cares/cares/ares_data.c',
'third_party/cares/cares/ares_destroy.c',
'third_party/cares/cares/ares_expand_name.c',
'third_party/cares/cares/ares_expand_string.c',
'third_party/cares/cares/ares_fds.c',
'third_party/cares/cares/ares_free_hostent.c',
'third_party/cares/cares/ares_free_string.c',
'third_party/cares/cares/ares_getenv.c',
'third_party/cares/cares/ares_gethostbyaddr.c',
'third_party/cares/cares/ares_gethostbyname.c',
'third_party/cares/cares/ares_getnameinfo.c',
'third_party/cares/cares/ares_getopt.c',
'third_party/cares/cares/ares_getsock.c',
'third_party/cares/cares/ares_init.c',
'third_party/cares/cares/ares_library_init.c',
'third_party/cares/cares/ares_llist.c',
'third_party/cares/cares/ares_mkquery.c',
'third_party/cares/cares/ares_nowarn.c',
'third_party/cares/cares/ares_options.c',
'third_party/cares/cares/ares_parse_a_reply.c',
'third_party/cares/cares/ares_parse_aaaa_reply.c',
'third_party/cares/cares/ares_parse_mx_reply.c',
'third_party/cares/cares/ares_parse_naptr_reply.c',
'third_party/cares/cares/ares_parse_ns_reply.c',
'third_party/cares/cares/ares_parse_ptr_reply.c',
'third_party/cares/cares/ares_parse_soa_reply.c',
'third_party/cares/cares/ares_parse_srv_reply.c',
'third_party/cares/cares/ares_parse_txt_reply.c',
'third_party/cares/cares/ares_platform.c',
'third_party/cares/cares/ares_process.c',
'third_party/cares/cares/ares_query.c',
'third_party/cares/cares/ares_search.c',
'third_party/cares/cares/ares_send.c',
'third_party/cares/cares/ares_strcasecmp.c',
'third_party/cares/cares/ares_strdup.c',
'third_party/cares/cares/ares_strerror.c',
'third_party/cares/cares/ares_strsplit.c',
'third_party/cares/cares/ares_timeout.c',
'third_party/cares/cares/ares_version.c',
'third_party/cares/cares/ares_writev.c',
'third_party/cares/cares/bitncmp.c',
'third_party/cares/cares/inet_net_pton.c',
'third_party/cares/cares/inet_ntop.c',
'third_party/cares/cares/windows_port.c',
'third_party/re2/re2/bitstate.cc',
'third_party/re2/re2/compile.cc',
'third_party/re2/re2/dfa.cc',
'third_party/re2/re2/filtered_re2.cc',
'third_party/re2/re2/mimics_pcre.cc',
'third_party/re2/re2/nfa.cc',
'third_party/re2/re2/onepass.cc',
'third_party/re2/re2/parse.cc',
'third_party/re2/re2/perl_groups.cc',
'third_party/re2/re2/prefilter.cc',
'third_party/re2/re2/prefilter_tree.cc',
'third_party/re2/re2/prog.cc',
'third_party/re2/re2/re2.cc',
'third_party/re2/re2/regexp.cc',
'third_party/re2/re2/set.cc',
'third_party/re2/re2/simplify.cc',
'third_party/re2/re2/stringpiece.cc',
'third_party/re2/re2/tostring.cc',
'third_party/re2/re2/unicode_casefold.cc',
'third_party/re2/re2/unicode_groups.cc',
'third_party/re2/util/pcre.cc',
'third_party/re2/util/rune.cc',
'third_party/re2/util/strutil.cc',
'third_party/upb/upb/decode.c',
'third_party/upb/upb/decode_fast.c',
'third_party/upb/upb/def.c',
'third_party/upb/upb/encode.c',
'third_party/upb/upb/msg.c',
'third_party/upb/upb/reflection.c',
'third_party/upb/upb/table.c',
'third_party/upb/upb/text_encode.c',
'third_party/upb/upb/upb.c',
'third_party/zlib/adler32.c',
'third_party/zlib/compress.c',
'third_party/zlib/crc32.c',
'third_party/zlib/deflate.c',
'third_party/zlib/gzclose.c',
'third_party/zlib/gzlib.c',
'third_party/zlib/gzread.c',
'third_party/zlib/gzwrite.c',
'third_party/zlib/infback.c',
'third_party/zlib/inffast.c',
'third_party/zlib/inflate.c',
'third_party/zlib/inftrees.c',
'third_party/zlib/trees.c',
'third_party/zlib/uncompr.c',
'third_party/zlib/zutil.c',
]
ASM_SOURCE_FILES = {
'crypto_ios_aarch64': [
'third_party/boringssl-with-bazel/ios-aarch64/crypto/chacha/chacha-armv8.S',
'third_party/boringssl-with-bazel/ios-aarch64/crypto/fipsmodule/aesv8-armx64.S',
'third_party/boringssl-with-bazel/ios-aarch64/crypto/fipsmodule/armv8-mont.S',
'third_party/boringssl-with-bazel/ios-aarch64/crypto/fipsmodule/ghash-neon-armv8.S',
'third_party/boringssl-with-bazel/ios-aarch64/crypto/fipsmodule/ghashv8-armx64.S',
'third_party/boringssl-with-bazel/ios-aarch64/crypto/fipsmodule/sha1-armv8.S',
'third_party/boringssl-with-bazel/ios-aarch64/crypto/fipsmodule/sha256-armv8.S',
'third_party/boringssl-with-bazel/ios-aarch64/crypto/fipsmodule/sha512-armv8.S',
'third_party/boringssl-with-bazel/ios-aarch64/crypto/fipsmodule/vpaes-armv8.S',
'third_party/boringssl-with-bazel/ios-aarch64/crypto/test/trampoline-armv8.S',
],
'crypto_ios_arm': [
'third_party/boringssl-with-bazel/ios-arm/crypto/chacha/chacha-armv4.S',
'third_party/boringssl-with-bazel/ios-arm/crypto/fipsmodule/aesv8-armx32.S',
'third_party/boringssl-with-bazel/ios-arm/crypto/fipsmodule/armv4-mont.S',
'third_party/boringssl-with-bazel/ios-arm/crypto/fipsmodule/bsaes-armv7.S',
'third_party/boringssl-with-bazel/ios-arm/crypto/fipsmodule/ghash-armv4.S',
'third_party/boringssl-with-bazel/ios-arm/crypto/fipsmodule/ghashv8-armx32.S',
'third_party/boringssl-with-bazel/ios-arm/crypto/fipsmodule/sha1-armv4-large.S',
'third_party/boringssl-with-bazel/ios-arm/crypto/fipsmodule/sha256-armv4.S',
'third_party/boringssl-with-bazel/ios-arm/crypto/fipsmodule/sha512-armv4.S',
'third_party/boringssl-with-bazel/ios-arm/crypto/fipsmodule/vpaes-armv7.S',
'third_party/boringssl-with-bazel/ios-arm/crypto/test/trampoline-armv4.S',
],
'crypto_linux_aarch64': [
'third_party/boringssl-with-bazel/linux-aarch64/crypto/chacha/chacha-armv8.S',
'third_party/boringssl-with-bazel/linux-aarch64/crypto/fipsmodule/aesv8-armx64.S',
'third_party/boringssl-with-bazel/linux-aarch64/crypto/fipsmodule/armv8-mont.S',
'third_party/boringssl-with-bazel/linux-aarch64/crypto/fipsmodule/ghash-neon-armv8.S',
'third_party/boringssl-with-bazel/linux-aarch64/crypto/fipsmodule/ghashv8-armx64.S',
'third_party/boringssl-with-bazel/linux-aarch64/crypto/fipsmodule/sha1-armv8.S',
'third_party/boringssl-with-bazel/linux-aarch64/crypto/fipsmodule/sha256-armv8.S',
'third_party/boringssl-with-bazel/linux-aarch64/crypto/fipsmodule/sha512-armv8.S',
'third_party/boringssl-with-bazel/linux-aarch64/crypto/fipsmodule/vpaes-armv8.S',
'third_party/boringssl-with-bazel/linux-aarch64/crypto/test/trampoline-armv8.S',
],
'crypto_linux_arm': [
'third_party/boringssl-with-bazel/linux-arm/crypto/chacha/chacha-armv4.S',
'third_party/boringssl-with-bazel/linux-arm/crypto/fipsmodule/aesv8-armx32.S',
'third_party/boringssl-with-bazel/linux-arm/crypto/fipsmodule/armv4-mont.S',
'third_party/boringssl-with-bazel/linux-arm/crypto/fipsmodule/bsaes-armv7.S',
'third_party/boringssl-with-bazel/linux-arm/crypto/fipsmodule/ghash-armv4.S',
'third_party/boringssl-with-bazel/linux-arm/crypto/fipsmodule/ghashv8-armx32.S',
'third_party/boringssl-with-bazel/linux-arm/crypto/fipsmodule/sha1-armv4-large.S',
'third_party/boringssl-with-bazel/linux-arm/crypto/fipsmodule/sha256-armv4.S',
'third_party/boringssl-with-bazel/linux-arm/crypto/fipsmodule/sha512-armv4.S',
'third_party/boringssl-with-bazel/linux-arm/crypto/fipsmodule/vpaes-armv7.S',
'third_party/boringssl-with-bazel/linux-arm/crypto/test/trampoline-armv4.S',
'third_party/boringssl-with-bazel/src/crypto/curve25519/asm/x25519-asm-arm.S',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305_arm_asm.S',
],
'crypto_linux_ppc64le': [
'third_party/boringssl-with-bazel/linux-ppc64le/crypto/fipsmodule/aesp8-ppc.S',
'third_party/boringssl-with-bazel/linux-ppc64le/crypto/fipsmodule/ghashp8-ppc.S',
'third_party/boringssl-with-bazel/linux-ppc64le/crypto/test/trampoline-ppc.S',
],
'crypto_linux_x86': [
'third_party/boringssl-with-bazel/linux-x86/crypto/chacha/chacha-x86.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/fipsmodule/aesni-x86.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/fipsmodule/bn-586.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/fipsmodule/co-586.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/fipsmodule/ghash-ssse3-x86.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/fipsmodule/ghash-x86.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/fipsmodule/md5-586.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/fipsmodule/sha1-586.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/fipsmodule/sha256-586.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/fipsmodule/sha512-586.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/fipsmodule/vpaes-x86.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/fipsmodule/x86-mont.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/test/trampoline-x86.S',
],
'crypto_linux_x86_64': [
'third_party/boringssl-with-bazel/linux-x86_64/crypto/chacha/chacha-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/cipher_extra/aes128gcmsiv-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/cipher_extra/chacha20_poly1305_x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/aesni-gcm-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/aesni-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/ghash-ssse3-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/ghash-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/md5-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/p256-x86_64-asm.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/p256_beeu-x86_64-asm.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/rdrand-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/rsaz-avx2.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/sha1-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/sha256-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/sha512-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/vpaes-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/x86_64-mont.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/x86_64-mont5.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/test/trampoline-x86_64.S',
'third_party/boringssl-with-bazel/src/crypto/hrss/asm/poly_rq_mul.S',
],
'crypto_mac_x86': [
'third_party/boringssl-with-bazel/mac-x86/crypto/chacha/chacha-x86.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/fipsmodule/aesni-x86.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/fipsmodule/bn-586.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/fipsmodule/co-586.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/fipsmodule/ghash-ssse3-x86.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/fipsmodule/ghash-x86.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/fipsmodule/md5-586.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/fipsmodule/sha1-586.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/fipsmodule/sha256-586.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/fipsmodule/sha512-586.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/fipsmodule/vpaes-x86.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/fipsmodule/x86-mont.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/test/trampoline-x86.S',
],
'crypto_mac_x86_64': [
'third_party/boringssl-with-bazel/mac-x86_64/crypto/chacha/chacha-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/cipher_extra/aes128gcmsiv-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/cipher_extra/chacha20_poly1305_x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/aesni-gcm-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/aesni-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/ghash-ssse3-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/ghash-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/md5-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/p256-x86_64-asm.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/p256_beeu-x86_64-asm.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/rdrand-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/rsaz-avx2.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/sha1-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/sha256-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/sha512-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/vpaes-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/x86_64-mont.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/x86_64-mont5.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/test/trampoline-x86_64.S',
],
'crypto_win_aarch64': [
'third_party/boringssl-with-bazel/win-aarch64/crypto/chacha/chacha-armv8.S',
'third_party/boringssl-with-bazel/win-aarch64/crypto/fipsmodule/aesv8-armx64.S',
'third_party/boringssl-with-bazel/win-aarch64/crypto/fipsmodule/armv8-mont.S',
'third_party/boringssl-with-bazel/win-aarch64/crypto/fipsmodule/ghash-neon-armv8.S',
'third_party/boringssl-with-bazel/win-aarch64/crypto/fipsmodule/ghashv8-armx64.S',
'third_party/boringssl-with-bazel/win-aarch64/crypto/fipsmodule/sha1-armv8.S',
'third_party/boringssl-with-bazel/win-aarch64/crypto/fipsmodule/sha256-armv8.S',
'third_party/boringssl-with-bazel/win-aarch64/crypto/fipsmodule/sha512-armv8.S',
'third_party/boringssl-with-bazel/win-aarch64/crypto/fipsmodule/vpaes-armv8.S',
'third_party/boringssl-with-bazel/win-aarch64/crypto/test/trampoline-armv8.S',
],
'crypto_win_x86': [
'third_party/boringssl-with-bazel/win-x86/crypto/chacha/chacha-x86.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/fipsmodule/aesni-x86.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/fipsmodule/bn-586.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/fipsmodule/co-586.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/fipsmodule/ghash-ssse3-x86.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/fipsmodule/ghash-x86.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/fipsmodule/md5-586.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/fipsmodule/sha1-586.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/fipsmodule/sha256-586.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/fipsmodule/sha512-586.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/fipsmodule/vpaes-x86.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/fipsmodule/x86-mont.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/test/trampoline-x86.asm',
],
'crypto_win_x86_64': [
'third_party/boringssl-with-bazel/win-x86_64/crypto/chacha/chacha-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/cipher_extra/aes128gcmsiv-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/cipher_extra/chacha20_poly1305_x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/aesni-gcm-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/aesni-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/ghash-ssse3-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/ghash-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/md5-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/p256-x86_64-asm.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/p256_beeu-x86_64-asm.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/rdrand-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/rsaz-avx2.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/sha1-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/sha256-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/sha512-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/vpaes-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/x86_64-mont.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/x86_64-mont5.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/test/trampoline-x86_64.asm',
],
}
|
ejona86/grpc
|
src/python/grpcio/grpc_core_dependencies.py
|
Python
|
apache-2.0
| 80,840
|
#!/usr/bin/env python
import re
import sys
import requests
import json
import bs4
# Bros
import base64
TARGET_URL = 'http://www.parlimen.gov.my'
MRBILLS = TARGET_URL + '/bills-dewan-rakyat.html'
file_rule = re.compile("loadResult\('(.*)','(.*)'\)")
def doReq():
params = {'uweb':'dr'}
print "Requesting latest bills..."
r = requests.get(MRBILLS, params=params)
if r.status_code != 200:
sys.exit('Connection error, HTTP %d' % r.status_code)
print 'Got it, you lucky brat!'
mytable = MyTable(r.text)
print 'Extracting...'
data = mytable.extract()
class MyTable(object):
def __init__(self, html):
self.html = bs4.BeautifulSoup(html, 'lxml')
self.total_row = 0
# Count drabooola
self.count = 0
@property
def table(self):
return self.html.find("table", {"id": "mytable"})
def _extract_row(self, row):
self.count += 1
print 'Processing %d out of %d files' % (self.count, self.total_row)
cols = row.findChildren('td')
kod = cols[0].find('a')
kod_name = kod.text.strip()
kod_fileinfo = kod['onclick'].strip()
tahun = cols[1].text.strip()
tajuk = cols[2].text.strip()
status_col = cols[3]
status_code = status_col.find('div', {'class':'parent'}).text.strip()
status_table = status_col.find('table')
status_rows = status_table.find_all('tr')
# I assumed the repeated bendang is a bug
bendang = status_rows[2].find_all('td')[2].text.strip()
if not bendang:
bendang = status_rows[4].find_all('td')[2].text.strip()
status = {'status_code': status_code,
'Bacaan Pertama Pada': status_rows[0].find_all('td')[2].text.strip(),
'Bacaan Kedua Pada': status_rows[1].find_all('td')[2].text.strip(),
'Dibentang Oleh': bendang,
'Diluluskan Pada': status_rows[2].find_all('td')[2].text.strip()}
kod_filepath, kod_filename = file_rule.findall(kod_fileinfo)[0]
kod_file = self._download_file(kod_filepath)
extracted = {'kod': {'kod_name': kod_name,
'kod_path': TARGET_URL + kod_filepath,
'kod_file': {'kod_filename': kod_filename,
'kod_content': kod_file}},
'tahun': tahun,
'tajuk': tajuk,
'status': status}
return extracted
def _download_file(self, filepath):
url = TARGET_URL + filepath
r = requests.get(url)
if r.status_code != 200:
# Give you none, try again next time
return None
# Base64 power!!!
return base64.b64encode(r.content)
def extract(self):
# Pop those garbage
rows = filter(lambda x: x != '\n', self.table)
# Think if it, I don't need this
# headrow = rows.pop(0)
# header = map(lambda x: x.text, headrow.find_all('th'))
# Throw out the header, use thead stupid!
rows.pop(0)
self.total_row = len(rows)
print 'Yet another %s rows!' % self.total_row
data = map(self._extract_row, rows)
print 'Habis!'
return data
def extract_to_json(self):
data = {'bill_list': self.extract()}
return json.dumps(data)
if __name__ == '__main__':
if len(sys.argv) > 2:
sys.exit('Usage: python gov.py [HTMLFILE]')
if len(sys.argv) == 1:
doReq()
elif len(sys.argv) == 2:
htmlfile = sys.argv[1]
html = open(htmlfile, 'rb').read()
mytable = MyTable(html)
# Good for the eyes
print mytable.extract_to_json()
|
sweemeng/Malaysian-Bill-Watcher
|
billwatcher/scrapers/parliament.py
|
Python
|
gpl-3.0
| 3,779
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('blog', '0005_auto_20151215_1433'),
]
operations = [
migrations.CreateModel(
name='Contributor',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('display_name', models.CharField(max_length=50)),
('slug', models.SlugField()),
('user', models.ForeignKey(null=True, to=settings.AUTH_USER_MODEL, blank=True)),
],
),
migrations.AlterField(
model_name='article',
name='category',
field=models.ForeignKey(to='blog.Category', default=1),
),
]
|
FarmCodeGary/InspirationForge
|
blog/migrations/0006_auto_20151216_1349.py
|
Python
|
mit
| 955
|
import nengo
import numpy as np
import nengo.spa as spa
vocab = spa.Vocabulary(32)
vocab2 = spa.Vocabulary(32)
model = nengo.Network()
with model:
state = spa.State(32, vocab=vocab)
bg = nengo.networks.actionselection.BasalGanglia(4)
nengo.Connection(state.output, bg.input,
transform=[vocab.parse('DOG').v,
vocab.parse('CAT').v,
vocab.parse('RAT').v,
vocab.parse('COW').v,
])
thal = nengo.networks.actionselection.Thalamus(4)
nengo.Connection(bg.output, thal.input)
motor = spa.State(32, vocab=vocab2)
nengo.Connection(thal.output, motor.input,
transform=np.array([vocab2.parse('BARK').v,
vocab2.parse('MEOW').v,
vocab2.parse('SQUEAK').v,
vocab2.parse('MOO').v,
]).T)
|
tcstewar/nengo_assignments
|
groningen_2018/day4-action/action5.py
|
Python
|
gpl-3.0
| 1,029
|
"""
Definition of forms.
"""
from django.shortcuts import render, redirect
from django import forms
from app import models
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import authenticate, login, get_user_model,logout
from django.contrib import auth
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.contrib import messages
#class BootstrapAuthenticationForm(AuthenticationForm):
# """Authentication form which uses boostrap CSS."""
# username = forms.CharField(max_length=254,
# widget=forms.TextInput({
# 'class': 'form-control',
# 'placeholder': 'User name'}))
# password = forms.CharField(label=_("Password"),
# widget=forms.PasswordInput({
# 'class': 'form-control',
# 'placeholder':'Password'}))
User = get_user_model()
class UserForm(forms.Form):
username = forms.CharField()
password = forms.CharField(widget = forms.PasswordInput)
#email = models.CharField(max_length = 60)
#password = models.CharField(widget=forms.PasswordInput,max_length =30)
#createdAt = models.DateField(auto_now= True)
#progress = models.IntegerField(default = 0)
#dob = models.DateField()
#class Meta:
# model = User
# fields = ['name','surname','email','password','createdAt','dob','progress']
def clean(self, *args, **kwargs):
username = self.cleaned_data.get("username")
password = self.cleaned_data.get("password")
if username and password:
user = auth.authenticate(username=username, password=password)
if not user:
raise forms.ValidationError("Wrong username/password combination")
return super(UserForm, self).clean(*args,**kwargs)
class RegisterForm(forms.ModelForm):
password = forms.CharField(widget = forms.PasswordInput)
class Meta():
model = User
fields = ['username','email', 'password']
|
0909023/Dev6B_English_Website
|
DjangoWebProject1/DjangoWebProject1/app/forms.py
|
Python
|
mit
| 2,175
|
#legacy
from HTMLParser import HTMLParser
tehlink='nuffin'
class NoisyParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.tehtree={}
def handle_starttag(self, tag, attrs):
print("Starting tag ({0}), Value ({1})".format(tag,attrs))
global tehlink
try:
self.tehtree[tag]=self.tehtree[tag]+1
if tag=='a':
for attr,value in attrs:
if attr=='href':
print ">>>>>>>>>>>>> A AHEAD:" + attr,value
if value.find('index.php?view=')==0:
tehlink='http://imagecabin.com/'+value
print(tehlink)
except KeyError:
self.tehtree[tag]=1
print(self.tehtree)
def handle_endtag(self, tag):
try:
self.tehtree[tag]=self.tehtree[tag]-1
print("Ending tag ({0})".format(tag))
print(self.tehtree)
except KeyError:
self.tehtree[tag]=-1
pa=NoisyParser()
pa.feed(open('sample_xml_imagecabin.htm').read())
print(tehlink)
|
BackupTheBerlios/hiss
|
hostmodule/core/grep2.py
|
Python
|
gpl-3.0
| 959
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import psutil
import unittest
import logging
from selenium import webdriver
class PhantomJSLauncherTests(unittest.TestCase):
def testLaunchAndCloseBrowserWithoutLeakingCookieTempFileDescriptor(self):
# psutil module is used to get num open file descritors across platforms
self.p = psutil.Process()
self.num_fds_samples = []
self.driver = webdriver.PhantomJS()
self.driver.quit()
self.num_fds_samples.append(self.p.num_fds())
self.driver = webdriver.PhantomJS()
self.driver.quit()
self.num_fds_samples.append(self.p.num_fds())
self.driver = webdriver.PhantomJS()
self.driver.quit()
self.num_fds_samples.append(self.p.num_fds())
assert max(self.num_fds_samples) == min(self.num_fds_samples)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
unittest.main()
|
HtmlUnit/selenium
|
py/test/selenium/webdriver/phantomjs/phantomjs_launcher_tests.py
|
Python
|
apache-2.0
| 1,739
|
# -*- coding: utf-8 -*-
from plivo.utils import to_param_dict
from plivo.utils.validators import *
from ..base import ListResponseObject, PlivoResource, PlivoResourceInterface
class Endpoint(PlivoResource):
_name = 'Endpoint'
_identifier_string = 'endpoint_id'
@validate_args(
password=[of_type(six.text_type)],
alias=[of_type(six.text_type)],
app_id=[optional(of_type(six.text_type))])
def update(self, password=None, alias=None, app_id=None):
params = to_param_dict(self.update, locals())
self.__dict__.update(params)
return self.client.endpoints.update(self.id, **params)
def delete(self):
return self.client.endpoints.delete(self.id)
class Endpoints(PlivoResourceInterface):
_resource_type = Endpoint
@validate_args(
username=[of_type(six.text_type)],
password=[of_type(six.text_type)],
alias=[of_type(six.text_type)],
app_id=[optional(of_type(six.text_type))])
def create(self, username, password, alias, app_id=None):
return self.client.request('POST', ('Endpoint', ),
to_param_dict(self.create, locals()), is_voice_request=True)
@validate_args(endpoint_id=[of_type(six.text_type)])
def get(self, endpoint_id):
return self.client.request('GET', ('Endpoint', endpoint_id), is_voice_request=True)
@validate_args(
limit=[
optional(
all_of(
of_type(*six.integer_types),
check(lambda limit: 0 < limit <= 20, '0 < limit <= 20')))
],
offset=[
optional(
all_of(
of_type(*six.integer_types),
check(lambda offset: 0 <= offset, '0 <= offset')))
])
def list(self, limit=20, offset=0):
return self.client.request(
'GET',
('Endpoint', ),
to_param_dict(self.list, locals()),
objects_type=Endpoint,
response_type=ListResponseObject, is_voice_request=True)
@validate_args(
endpoint_id=[of_type(six.text_type)],
password=[optional(of_type(six.text_type))],
alias=[optional(of_type(six.text_type))],
app_id=[optional(of_type(six.text_type))])
def update(self, endpoint_id, password=None, alias=None, app_id=None):
# not using locals() because we need to neglect endpoint_id
temp = {
'self': self,
'password': password,
'alias': alias,
'app_id': app_id
}
return self.client.request('POST', ('Endpoint', endpoint_id),
to_param_dict(self.update, temp), is_voice_request=True)
@validate_args(endpoint_id=[of_type(six.text_type)])
def delete(self, endpoint_id):
return self.client.request('DELETE', ('Endpoint', endpoint_id), is_voice_request=True)
|
plivo/plivo-python
|
plivo/resources/endpoints.py
|
Python
|
mit
| 2,946
|
'''
Kivy Imports
'''
from kivy.config import Config
Config.set('input', 'mouse', 'mouse,disable_multitouch')
Config.set('graphics', 'minimum_width', '620')
Config.set('graphics', 'minimum_height', '440')
Config.set('kivy', 'exit_on_escape', '0')
Config.set('graphics', 'multisamples', '0')
from kivy.app import App
from kivy.uix.gridlayout import GridLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.anchorlayout import AnchorLayout
from kivy.core.window import Window
from kivy.uix.button import Button
from kivy.clock import Clock
from kivy.uix.popup import Popup
import math
import global_variables
import sys
import re
import json
'''
Internal Module Imports
'''
from UIElements.frontPage import FrontPage
from UIElements.screenControls import ScreenControls
from UIElements.gcodeCanvas import GcodeCanvas
from UIElements.otherFeatures import OtherFeatures
from UIElements.softwareSettings import SoftwareSettings
from UIElements.viewMenu import ViewMenu
from UIElements.runMenu import RunMenu
from UIElements.connectMenu import ConnectMenu
from UIElements.diagnosticsMenu import Diagnostics
from UIElements.manualControls import ManualControl
from DataStructures.data import Data
from Connection.nonVisibleWidgets import NonVisibleWidgets
from UIElements.notificationPopup import NotificationPopup
from Settings import maslowSettings
from UIElements.backgroundMenu import BackgroundMenu
'''
Main UI Program
'''
class GroundControlApp(App):
def get_application_config(self):
return super(GroundControlApp, self).get_application_config(
'~/%(appname)s.ini')
def build(self):
interface = FloatLayout()
self.data = Data()
if self.config.get('Maslow Settings', 'colorScheme') == 'Light':
self.data.iconPath = './Images/Icons/normal/'
self.data.fontColor = '[color=7a7a7a]'
self.data.drawingColor = [.47,.47,.47]
Window.clearcolor = (1, 1, 1, 1)
self.data.posIndicatorColor = [0,0,0]
self.data.targetInicatorColor = [1,0,0]
elif self.config.get('Maslow Settings', 'colorScheme') == 'Dark':
self.data.iconPath = './Images/Icons/highvis/'
self.data.fontColor = '[color=000000]'
self.data.drawingColor = [1,1,1]
Window.clearcolor = (0, 0, 0, 1)
self.data.posIndicatorColor = [1,1,1]
self.data.targetInicatorColor = [1,0,0]
elif self.config.get('Maslow Settings', 'colorScheme') == 'DarkGreyBlue':
self.data.iconPath = './Images/Icons/darkgreyblue/'
self.data.fontColor = '[color=000000]'
self.data.drawingColor = [1,1,1]
Window.clearcolor = (0.06, 0.10, 0.2, 1)
self.data.posIndicatorColor = [0.51,0.93,0.97]
self.data.targetInicatorColor = [1,0,0]
Window.maximize()
self.frontpage = FrontPage(self.data, name='FrontPage')
interface.add_widget(self.frontpage)
self.nonVisibleWidgets = NonVisibleWidgets()
'''
Load User Settings
'''
# force create an ini no matter what.
self.config.write()
if self.config.get('Advanced Settings', 'encoderSteps') == '8148.0':
self.data.message_queue.put("Message: This update will adjust the the number of encoder pulses per rotation from 8,148 to 8,113 in your settings which improves the positional accuracy.\n\nPerforming a calibration will help you get the most out of this update.")
self.config.set('Advanced Settings', 'encoderSteps', '8113.73')
#up the maximum feedrate
if self.config.get('Advanced Settings', 'maxFeedrate') == '700':
self.data.message_queue.put("Message: This update will increase the maximum feedrate of your machine. You can adjust this value under the Advanced settings.")
self.config.set('Advanced Settings', 'maxFeedrate', '800')
self.config.write()
self.data.comport = self.config.get('Maslow Settings', 'COMport')
self.data.gcodeFile = self.config.get('Maslow Settings', 'openFile')
offsetX = float(self.config.get('Advanced Settings', 'homeX'))
offsetY = float(self.config.get('Advanced Settings', 'homeY'))
self.data.gcodeShift = [offsetX,offsetY]
self.data.config = self.config
self.config.add_callback(self.configSettingChange)
# Background image setup
self.data.backgroundFile = self.config.get('Background Settings',
'backgroundFile')
self.data.backgroundManualReg = json.loads(
self.config.get('Background Settings', 'manualReg'))
if self.data.backgroundFile != "":
BackgroundMenu(self.data).processBackground()
'''
Initializations
'''
self.frontpage.setUpData(self.data)
self.nonVisibleWidgets.setUpData(self.data)
self.frontpage.gcodecanvas.initialize()
'''
Scheduling
'''
Clock.schedule_interval(self.runPeriodically, .01)
'''
Push settings to machine
'''
self.data.bind(connectionStatus = self.requestMachineSettings)
self.data.pushSettings = self.requestMachineSettings
return interface
def build_config(self, config):
"""
Set the default values for the config sections.
"""
# Calculate computed settings on load
config.add_callback(self.computeSettings)
config.setdefaults('Computed Settings', maslowSettings.getDefaultValueSection('Computed Settings'))
config.setdefaults('Maslow Settings', maslowSettings.getDefaultValueSection('Maslow Settings'))
config.setdefaults('Advanced Settings', maslowSettings.getDefaultValueSection('Advanced Settings'))
config.setdefaults('Ground Control Settings', maslowSettings.getDefaultValueSection('Ground Control Settings'))
config.setdefaults('Background Settings', maslowSettings.getDefaultValueSection('Background Settings'))
config.remove_callback(self.computeSettings)
def build_settings(self, settings):
"""
Add custom section to the default configuration object.
"""
settings.add_json_panel('Maslow Settings', self.config, data=maslowSettings.getJSONSettingSection('Maslow Settings'))
settings.add_json_panel('Advanced Settings', self.config, data=maslowSettings.getJSONSettingSection('Advanced Settings'))
settings.add_json_panel('Ground Control Settings', self.config, data=maslowSettings.getJSONSettingSection("Ground Control Settings"))
def computeSettings(self, section, key, value):
# Update Computed settings
if key == 'kinematicsType':
if value == 'Quadrilateral':
self.config.set('Computed Settings', 'kinematicsTypeComputed', "1")
else:
self.config.set('Computed Settings', 'kinematicsTypeComputed', "2")
elif (key == 'gearTeeth' or key == 'chainPitch') and self.config.has_option('Advanced Settings', 'gearTeeth') and self.config.has_option('Advanced Settings', 'chainPitch'):
distPerRot = float(self.config.get('Advanced Settings', 'gearTeeth')) * float(self.config.get('Advanced Settings', 'chainPitch'))
self.config.set('Computed Settings', "distPerRot", str(distPerRot))
if self.config.has_option('Advanced Settings', 'leftChainTolerance'):
distPerRotLeftChainTolerance = (1 + (float(self.config.get('Advanced Settings', 'leftChainTolerance')) / 100)) * float(self.config.get('Advanced Settings', 'gearTeeth')) * float(self.config.get('Advanced Settings', 'chainPitch'))
self.config.set('Computed Settings', "distPerRotLeftChainTolerance", str("{0:.5f}".format(distPerRotLeftChainTolerance)))
if self.config.has_option('Advanced Settings', 'rightChainTolerance'):
distPerRotRightChainTolerance = (1 + (float(self.config.get('Advanced Settings', 'rightChainTolerance')) / 100)) * float(self.config.get('Advanced Settings', 'gearTeeth')) * float(self.config.get('Advanced Settings', 'chainPitch'))
self.config.set('Computed Settings', "distPerRotRightChainTolerance", str("{0:.5f}".format(distPerRotRightChainTolerance)))
elif key == 'leftChainTolerance' and self.config.has_option('Advanced Settings', 'leftChainTolerance') and self.config.has_option('Computed Settings', 'distPerRot'):
distPerRotLeftChainTolerance = (1 + (float(self.config.get('Advanced Settings', 'leftChainTolerance')) / 100)) * float(self.config.get('Computed Settings', 'distPerRot'))
self.config.set('Computed Settings', "distPerRotLeftChainTolerance", str("{0:.5f}".format(distPerRotLeftChainTolerance)))
elif key == 'rightChainTolerance' and self.config.has_option('Advanced Settings', 'rightChainTolerance') and self.config.has_option('Computed Settings', 'distPerRot'):
distPerRotRightChainTolerance = (1 + (float(self.config.get('Advanced Settings', 'rightChainTolerance')) / 100)) * float(self.config.get('Computed Settings', 'distPerRot'))
self.config.set('Computed Settings', "distPerRotRightChainTolerance", str("{0:.5f}".format(distPerRotRightChainTolerance)))
elif key == 'enablePosPIDValues':
for key in ('KpPos', 'KiPos', 'KdPos', 'propWeight'):
if int(self.config.get('Advanced Settings', 'enablePosPIDValues')) == 1:
value = float(self.config.get('Advanced Settings', key))
else:
value = maslowSettings.getDefaultValue('Advanced Settings', key)
self.config.set('Computed Settings', key + "Main", value)
#updated computed values for z-axis
for key in ('KpPosZ', 'KiPosZ', 'KdPosZ', 'propWeightZ'):
if int(self.config.get('Advanced Settings', 'enablePosPIDValues')) == 1:
value = float(self.config.get('Advanced Settings', key))
else:
value = maslowSettings.getDefaultValue('Advanced Settings', key)
self.config.set('Computed Settings', key, value)
elif key == 'enableVPIDValues':
for key in ('KpV', 'KiV', 'KdV'):
if int(self.config.get('Advanced Settings', 'enablePosPIDValues')) == 1:
value = float(self.config.get('Advanced Settings', key))
else:
value = maslowSettings.getDefaultValue('Advanced Settings', key)
self.config.set('Computed Settings', key + "Main", value)
#updated computed values for z-axis
for key in ('KpVZ', 'KiVZ', 'KdVZ'):
if int(self.config.get('Advanced Settings', 'enablePosPIDValues')) == 1:
value = float(self.config.get('Advanced Settings', key))
else:
value = maslowSettings.getDefaultValue('Advanced Settings', key)
self.config.set('Computed Settings', key, value)
elif key == 'chainOverSprocket':
if value == 'Top':
self.config.set('Computed Settings', 'chainOverSprocketComputed', 1)
else:
self.config.set('Computed Settings', 'chainOverSprocketComputed', 2)
elif key == 'fPWM':
if value == '31,000Hz':
self.config.set('Computed Settings', 'fPWMComputed', 1)
elif value == '4,100Hz':
self.config.set('Computed Settings', 'fPWMComputed', 2)
else:
self.config.set('Computed Settings', 'fPWMComputed', 3)
def configSettingChange(self, section, key, value):
"""
Respond to changes in the configuration.
"""
# Update GC things
if section == "Maslow Settings":
if key == "COMport":
self.data.comport = value
if (key == "bedHeight" or key == "bedWidth"):
self.frontpage.gcodecanvas.drawWorkspace()
if (key == "macro1_title") or (key == "macro2_title"):
self.frontpage.update_macro_titles()
if section == "Advanced Settings":
if (key == "truncate") or (key == "digits"):
self.frontpage.gcodecanvas.reloadGcode()
if (key == "spindleAutomate"):
if (value == "Servo"):
value = 1
elif (value == "Relay_High"):
value = 2
elif (value == "Relay_Low"):
value = 3
else:
value = 0
# Update Computed Settings
self.computeSettings(section, key, value)
# Write the settings change to the Disk
self.data.config.write()
# only run on live connection
if self.data.connectionStatus != 1:
return
# Push settings that can be directly written to machine
firmwareKey = maslowSettings.getFirmwareKey(section, key)
if firmwareKey is not None:
self.data.gcode_queue.put("$" + str(firmwareKey) + "=" + str(value))
def requestMachineSettings(self, *args):
'''
Requests the machine to report all settings. This will implicitly
cause a sync of the machine settings because if GroundControl sees a
reported setting which does match its expected value, GC will push the
correct setting to the machine.
'''
if self.data.connectionStatus == 1:
self.data.gcode_queue.put("$$")
def receivedSetting(self, message):
'''
This parses a settings report from the machine, usually received in
response to a $$ request. If the value received does not match the
expected value.
'''
parameter, position = self.parseFloat(message, 0)
value, position = self.parseFloat(message, position)
if (parameter is not None and value is not None):
maslowSettings.syncFirmwareKey(int(parameter), value, self.data)
def parseFloat(self, text, position=0):
'''
Takes a string and parses out the float found at position default to 0
returning a list of the matched float and the ending
position of the float
'''
# This regex comes from a python docs recommended
regex = re.compile("[-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?")
match = regex.search(text[position:])
if match:
return (float(match.group(0)), match.end(0))
else:
return (None, position)
'''
Update Functions
'''
def writeToTextConsole(self, message):
try:
newText = self.frontpage.consoleText[-2000:] + message
self.frontpage.consoleText = newText
self.frontpage.textconsole.gotToBottom()
except:
self.frontpage.consoleText = "text not displayed correctly"
def runPeriodically(self, *args):
'''
this block should be handled within the appropriate widget
'''
while not self.data.message_queue.empty(): #if there is new data to be read
message = self.data.message_queue.get()
if message[0] == "<":
self.setPosOnScreen(message)
elif message[0] == "$":
self.receivedSetting(message)
elif message[0] == "[":
if message[1:4] == "PE:":
self.setErrorOnScreen(message)
elif message[1:8] == "Measure":
measuredDist = float(message[9:len(message)-3])
try:
self.data.measureRequest(measuredDist)
except:
print "No function has requested a measurement"
elif message[0:13] == "Maslow Paused":
self.data.uploadFlag = 0
self.writeToTextConsole(message)
elif message[0:8] == "Message:":
if self.data.calibrationInProcess and message[0:15] == "Message: Unable": #this suppresses the annoying messages about invalid chain lengths during the calibration process
break
self.previousUploadStatus = self.data.uploadFlag
self.data.uploadFlag = 0
try:
self._popup.dismiss() #close any open popup
except:
pass #there wasn't a popup to close
content = NotificationPopup(continueOn = self.dismiss_popup_continue, text = message[9:])
if sys.platform.startswith('darwin'):
self._popup = Popup(title="Notification: ", content=content,
auto_dismiss=False, size=(360,240), size_hint=(.3, .3))
else:
self._popup = Popup(title="Notification: ", content=content,
auto_dismiss=False, size=(360,240), size_hint=(None, None))
self._popup.open()
if global_variables._keyboard:
global_variables._keyboard.bind(on_key_down=self.keydown_popup)
self._popup.bind(on_dismiss=self.ondismiss_popup)
elif message[0:6] == "ALARM:":
self.previousUploadStatus = self.data.uploadFlag
self.data.uploadFlag = 0
try:
self._popup.dismiss() #close any open popup
except:
pass #there wasn't a popup to close
content = NotificationPopup(continueOn = self.dismiss_popup_continue, text = message[7:])
if sys.platform.startswith('darwin'):
self._popup = Popup(title="Alarm Notification: ", content=content,
auto_dismiss=False, size=(360,240), size_hint=(.3, .3))
else:
self._popup = Popup(title="Alarm Notification: ", content=content,
auto_dismiss=False, size=(360,240), size_hint=(None, None))
self._popup.open()
if global_variables._keyboard:
global_variables._keyboard.bind(on_key_down=self.keydown_popup)
self._popup.bind(on_dismiss=self.ondismiss_popup)
elif message[0:8] == "Firmware":
self.data.logger.writeToLog("Ground Control Version " + str(self.data.version) + "\n")
self.writeToTextConsole("Ground Control " + str(self.data.version) + "\r\n" + message + "\r\n")
#Check that version numbers match
if float(message[-7:]) < float(self.data.version):
self.data.message_queue.put("Message: Warning, your firmware is out of date and may not work correctly with this version of Ground Control\n\n" + "Ground Control Version " + str(self.data.version) + "\r\n" + message)
if float(message[-7:]) > float(self.data.version):
self.data.message_queue.put("Message: Warning, your version of Ground Control is out of date and may not work with this firmware version\n\n" + "Ground Control Version " + str(self.data.version) + "\r\n" + message)
elif message == "ok\r\n":
pass #displaying all the 'ok' messages clutters up the display
else:
self.writeToTextConsole(message)
def ondismiss_popup(self, event):
if global_variables._keyboard:
global_variables._keyboard.unbind(on_key_down=self.keydown_popup)
def keydown_popup(self, keyboard, keycode, text, modifiers):
if (keycode[1] == 'enter') or (keycode[1] =='numpadenter') or (keycode[1] == 'escape'):
self.dismiss_popup_continue()
return True # always swallow keypresses since this is a modal dialog
def dismiss_popup_continue(self):
'''
Close The Pop-up and continue cut
'''
self._popup.dismiss()
self.data.quick_queue.put("~") #send cycle resume command to unpause the machine
self.data.uploadFlag = self.previousUploadStatus #resume cutting if the machine was cutting before
def dismiss_popup_hold(self):
'''
Close The Pop-up and continue cut
'''
self._popup.dismiss()
self.data.uploadFlag = 0 #stop cutting
def setPosOnScreen(self, message):
'''
This should be moved into the appropriate widget
'''
try:
startpt = message.find('MPos:') + 5
endpt = message.find('WPos:')
numz = message[startpt:endpt]
units = "mm" #message[endpt+1:endpt+3]
valz = numz.split(",")
self.xval = float(valz[0])
self.yval = float(valz[1])
self.zval = float(valz[2])
if math.isnan(self.xval):
self.writeToTextConsole("Unable to resolve x Kinematics.")
self.xval = 0
if math.isnan(self.yval):
self.writeToTextConsole("Unable to resolve y Kinematics.")
self.yval = 0
if math.isnan(self.zval):
self.writeToTextConsole("Unable to resolve z Kinematics.")
self.zval = 0
except:
print "One Machine Position Report Command Misread"
return
self.frontpage.setPosReadout(self.xval, self.yval, self.zval)
self.frontpage.gcodecanvas.positionIndicator.setPos(self.xval,self.yval,self.data.units)
def setErrorOnScreen(self, message):
try:
startpt = message.find(':')+1
endpt = message.find(',', startpt)
leftErrorValueAsString = message[startpt:endpt]
leftErrorValueAsFloat = float(leftErrorValueAsString)
startpt = endpt + 1
endpt = message.find(',', startpt)
rightErrorValueAsString = message[startpt:endpt]
rightErrorValueAsFloat = float(rightErrorValueAsString)
if self.data.units == "INCHES":
rightErrorValueAsFloat = rightErrorValueAsFloat/25.4
leftErrorValueAsFloat = leftErrorValueAsFloat/25.4
avgError = (abs(leftErrorValueAsFloat) + abs(rightErrorValueAsFloat))/2
self.frontpage.gcodecanvas.positionIndicator.setError(0, self.data.units)
self.data.logger.writeErrorValueToLog(avgError)
self.frontpage.gcodecanvas.targetIndicator.setPos(self.xval - .5*rightErrorValueAsFloat + .5*leftErrorValueAsFloat, self.yval - .5*rightErrorValueAsFloat - .5*leftErrorValueAsFloat,self.data.units)
except:
print "Machine Position Report Command Misread Happened Once"
if __name__ == '__main__':
GroundControlApp().run()
|
abetusk/dev
|
projects/maslowcnc/software/GroundControl-master/main.py
|
Python
|
agpl-3.0
| 24,128
|
from django.utils.translation import ugettext_noop as _
from custom.reports.mc.models import WEEKLY_SUMMARY_XMLNS
HF_MONTHLY_REPORT = [
{
'section': _('mc_section_home_visits'),
'total_column': _('home_visits_total'),
'columns': [
_('home_visits_pregnant'),
_('home_visits_postpartem'),
_('home_visits_newborn'),
_('home_visits_children'),
_('home_visits_other'),
]
},
{
'section': _('mc_section_rdt'),
'total_column': _('rdt_total'),
'columns': [
_('rdt_positive_children'),
_('rdt_positive_adults'),
_('rdt_others'),
]
},
{
'section': _('mc_section_diagnosed_cases'),
'total_column': _('diagnosed_total'),
'columns': [
_('diagnosed_malaria_child'),
_('diagnosed_malaria_adult'),
_('diagnosed_diarrhea'),
_('diagnosed_ari'),
]
},
{
'section': _('mc_section_treated_cases'),
'total_column': _('treated_total'),
'columns': [
_('treated_malaria'),
_('treated_diarrhea'),
_('treated_ari'),
]
},
{
'section': _('mc_section_transfers'),
'total_column': _('transfer_total'),
'columns': [
_('transfer_malnutrition'),
_('transfer_incomplete_vaccination'),
_('transfer_danger_signs'),
_('transfer_prenatal_consult'),
_('transfer_missing_malaria_meds'),
_('transfer_other'),
]
},
{
'section': _('mc_section_deaths'),
'total_column': _('deaths_total'),
'columns': [
_('deaths_newborn'),
_('deaths_children'),
_('deaths_mothers'),
_('deaths_other'),
]
},
{
'section': _('mc_section_health_ed'),
'columns': [
_('heath_ed_talks'),
_('heath_ed_participants'),
]
},
]
DISTRICT_MONTHLY_REPORT = HF_MONTHLY_REPORT + [
{
'section': _('mc_section_stock_balance'),
'type': 'form_lookup',
'xmlns': WEEKLY_SUMMARY_XMLNS,
'columns': [
_('form/stock/stock_amox_pink'),
_('form/stock/stock_amox_green'),
_('form/stock/stock_ors'),
_('form/stock/stock_ra_50'),
_('form/stock/stock_ra_200'),
_('form/stock/stock_zinc'),
_('form/stock/stock_coartem_yellow'),
_('form/stock/stock_coartem_blue'),
_('form/stock/stock_coartem_green'),
_('form/stock/stock_coartem_brown'),
_('form/stock/stock_paracetamol_250'),
_('form/stock/stock_paracetamol_500'),
_('form/stock/stock_rdt'),
_('form/stock/stock_gloves'),
]
},
]
DISTRICT_WEEKLY_REPORT = [
{
'section': _('mc_section_home_visits'),
'total_column': _('home_visits_total'),
'columns': [
_('home_visits_newborn'),
_('home_visits_children'),
_('home_visits_pregnant'),
_('home_visits_non_pregnant'),
_('home_visits_followup'),
]
},
{
'section': _('mc_section_deaths_in_community'),
'columns': [
_('deaths_children'),
]
},
{
'section': _('mc_section_stock_balance'),
'type': 'form_lookup',
'xmlns': WEEKLY_SUMMARY_XMLNS,
'columns': [
_('form/stock/stock_coartem_yellow'),
_('form/stock/stock_coartem_blue'),
]
},
{
'section': _('mc_section_validation'),
'columns': [
{
'slug': _('patients_given_pneumonia_meds'),
'columns': ('patients_given_pneumonia_meds_num', 'patients_given_pneumonia_meds_denom'),
},
{
'slug': _('patients_given_diarrhoea_meds'),
'columns': ('patients_given_diarrhoea_meds_num', 'patients_given_diarrhoea_meds_denom'),
},
{
'slug': _('patients_given_malaria_meds'),
'columns': ('patients_given_malaria_meds_num', 'patients_given_malaria_meds_denom'),
},
{
'slug': _('patients_correctly_referred'),
'columns': ('patients_correctly_referred_num', 'patients_correctly_referred_denom'),
},
_('cases_rdt_not_done'),
_('cases_danger_signs_not_referred'),
_('cases_no_malaria_meds'),
]
},
]
HF_WEEKLY_REPORT = [
{
'section': _('mc_section_home_visits'),
'total_column': _('home_visits_total'),
'columns': [
_('home_visits_newborn'),
_('home_visits_children'),
_('home_visits_adult'),
]
},
{
'section': _('mc_section_transfers'),
'columns': [
_('cases_transferred'),
_('home_visits_followup'),
{
'slug': _('patients_given_pneumonia_meds'),
'columns': ('patients_given_pneumonia_meds_num', 'patients_given_pneumonia_meds_denom'),
},
{
'slug': _('patients_given_diarrhoea_meds'),
'columns': ('patients_given_diarrhoea_meds_num', 'patients_given_diarrhoea_meds_denom'),
},
{
'slug': _('patients_given_malaria_meds'),
'columns': ('patients_given_malaria_meds_num', 'patients_given_malaria_meds_denom'),
},
{
'slug': _('patients_correctly_referred'),
'columns': ('patients_correctly_referred_num', 'patients_correctly_referred_denom'),
},
_('cases_rdt_not_done'),
]
},
]
# for now this is just a lookup for translations
HF_WEEKLY_MESSAGES = {
'msg_children': _('Congratulations! This APE has visited {number} children this week. Call and congratulate them! Please help other supervisors learn from your success.'),
'msg_pneumonia': _('This APE has treated {number} of patients with the incorrect medicine for pneumonia. Please contact him/her and find out why and provide supportive supervision on use of amoxicillin.'),
'msg_diarrhoea': _('This APE has treated {number} of patients with the incorrect medicine for diarrhoea. Please contact them and find out why and provide supportive supervision on use of zinc and ORS.'),
'msg_malaria': _('This APE has treated {number} of patients with the incorrect medicine for malaria. Please contact them and find out why and provide supportive supervision on use of Coartem and Paracetamol.'),
'msg_good_referrals': _('Congratulations! This APE has correctly referred all children they visited this week. Call those APEs to congratulate them! Please help other supervisors learn from your success.'),
'msg_bad_referrals': _('This APE incorrectly referred {number} patients they visited this week. Please contact them and find out why and provide supportive supervision on correct referral.'),
'msg_rdt': _('This APE did not perform a RDT on {number} patients with fever this week. Please contact them and find out why and provide supportive supervision on when to perform a RDT.'),
}
|
puttarajubr/commcare-hq
|
custom/reports/mc/reports/definitions.py
|
Python
|
bsd-3-clause
| 7,380
|
total = 0
with open('day2.txt') as f:
for line in f:
l, w, h = map(int, line.split('x'))
total += l * w * h
total += 2 * (l + w + h - max(l, w, h))
print total
|
BethyDiakabananas/Advent-of-Code
|
Day 2/day2_part2.py
|
Python
|
mit
| 170
|
#sem:name: PDF LaTeX
#sem:tip: Generates a LaTeX project for obtaining PDF files
# Thomas Nagy, 2007-2018 GPLV3
import os, time, shutil, re, getpass
# Additional variables:
# exclude 1
# picdim [width=10cm]
settings = {
'doc_content':'',
'doc_class':'article',
'all_latex':False,
'header_off':'',
'footer_off':'',
'header_l':'',
'header_c':'',
'header_r':'',
'footer_l':'',
'footer_c':r'\thepage/\pageref{LastPage}',
'footer_r':r'\today',
'doc_title':'',
'doc_title_off':'None',
'doc_date':'',
'doc_date_off':'None',
'doc_author':getpass.getuser(),
'doc_author_off':'None',
'babel':'english', # frenchb
}
add_globals(settings)
outdir = sembind.get_var('outdir')+'/'+sembind.get_var('pname')
try:
tm = time.strftime('.%d-%b-%y--%H-%M-%S', time.gmtime(os.stat(outdir).st_mtime))
os.rename(outdir, outdir+tm)
except OSError:
pass
try:
os.makedirs(outdir)
except OSError:
debug("Cannot create folder " + outdir)
# do the document in latex ?
def tex_convert(s):
return s
if not settings.get('all_latex', 0): tex_convert = protect_tex
# make the pictures available
cwd = os.getcwd()
os.chdir(sembind.get_var('temp_dir'))
pics = {} # map the id to the picture
lst = os.listdir('.')
for x in lst:
if x.startswith('diag-'):
key = x.split('.')[0].replace('diag-', '')
if x.endswith('.pdf') or not key in pics:
pics[key] = x
shutil.copy2(x, outdir)
os.chdir(cwd)
buf = []
out = buf.append
def print_nodes(node, niv):
sm = tex_convert(node.get_val('summary'))
if settings['doc_class'] in ['book', 'report']:
if niv == 1:
out('\\chapter{%s}\n' % sm)
elif niv == 2:
out('\\section{%s}\n' % sm)
elif niv == 3:
out('\\subsection{%s}\n' % sm)
elif niv == 4:
out('\\subsubsection{%s}\n' % sm)
elif niv == 5:
out('\\paragraph{%s}\n' % sm)
else:
if niv == 1:
out('\\section{%s}\n' % sm)
elif niv == 2:
out('\\subsection{%s}\n' % sm)
elif niv == 3:
out('\\subsubsection{%s}\n' % sm)
elif niv == 4:
out('\\paragraph{%s}\n' % sm)
typo = node.get_val('type')
if typo == 'text':
y = node.get_val('text')
out(parse_string(y))
elif typo == 'table':
rows = node.num_rows()
cols = node.num_cols()
if rows>0 and cols>0:
caption = node.get_var('caption')
if not caption: caption = caption = node.get_val('summary')
out('\\begin{table}\n')
out('\\begin{center}\n')
out('\\begin{tabular}{|%s}' % ('c|'*cols))
out(' \\hline\n')
for i in range(rows):
for j in range(cols):
if i == 0 or j == 0:
out('\\textbf{%s}' % tex_convert(node.get_cell(i, j)))
else:
out('%s' % tex_convert(node.get_cell(i, j)))
if j < cols - 1: out(" & ")
out(' \\\\ \\hline\n')
out('\\end{tabular}\n')
out('\\end{center}\n')
out('\\caption{%s}\n' % tex_convert(caption))
out('\\end{table}\n')
out('\n')
elif typo == 'img' or typo == 'diag':
id = node.get_val('id')
if id in pics:
caption = node.get_var('caption')
if not caption: caption = caption = node.get_val('summary')
restrict = node.get_var("picdim")
if not restrict:
w = int(node.get_val('pic_w'))
restrict = ""
if (w > 5*72): restrict = "[width=5in]"
if not restrict:
restrict = "[width=\\textwidth,height=\\textheight,keepaspectratio]"
out('\\begin{figure}[htbp]\n')
out(' \\begin{center}\n')
out(' \\includegraphics%s{%s}\n' % (restrict, pics[id]))
out(' \\caption{\\footnotesize{%s}}\n' % tex_convert(caption))
out('%% %s\n' % protect_tex(node.get_val('pic_location')))
out('%% %s\n' % node.get_val('pic_w'))
out('%% %s\n' % node.get_val('pic_h'))
out(' \\end{center}\n')
out('\\end{figure}\n')
num = node.child_count()
for i in range(num):
print_nodes(node.child_num(i), niv+1)
root = Root()
title = root.get_val('title')
if title:
settings['doc_title_off']=''
settings['doc_title']=title
date = r'\today' #root.get_val('date')
if date:
settings['doc_date_off']='None'
settings['doc_date']=date
author = root.get_val('pname')
if author:
settings['doc_author_off']=''
settings['doc_author']=author
# the main document
print_nodes(root, 0);
settings['doc_content'] = "".join(buf)
# now write main.tex
transform("/pdflatex/main.tex", outdir+'/main.tex', settings)
# anciliary files
shutil.copy2(template_dir()+'/pdflatex/wscript', outdir+'/wscript')
shutil.copy2(template_dir()+'/waf', outdir+'/waf')
os.chmod(outdir+'/waf', 0o755)
f = open(outdir + '/run.sh', 'w')
try:
f.write('#! /bin/sh\npython waf configure build --view\n')
finally:
f.close()
os.chmod(outdir + '/run.sh', 0o755)
# load the preview on main.tex
visualize('pdflatex', outdir+'/main.tex')
|
ita1024/semantik
|
src/templates/pdflatex.sem.py
|
Python
|
gpl-3.0
| 4,634
|
#!/usr/bin/env python
"""
exons2gff.py <exons file> <gff file> <reference> <source>
"""
import re
import csv
import sys
gffFields = [
'reference',
'source',
'type',
'start',
'end',
'score',
'strand',
'phase',
'group'
]
strand = {'>': '+', '<': '-'}
def exons2gff(iFilename, oFilename, reference, source, _type='mRNA', offset=0, grouptype='mRNA'):
iFile = open(iFilename)
oFile = open(oFilename, 'w')
writer = csv.writer(oFile, dialect='excel-tab', doublequote=False)
# Stuff to remember between lines
name = None
direction = None
cdsStart = None
cdsEnd = None
for line in iFile:
line = line.strip()
if line=='' or line[0]=='#': # pass
pass
elif line[0]=='>' or line[0]=='<': # Start gene
matches = re.finditer(' ', line)
i1 = matches.next().start()
i2 = matches.next().start()
i3 = matches.next().start()
direction = line[0:i1]
start = int(line[i1+1:i2]) + offset
end = int(line[i2+1:i3]) + offset
name = line[i3+1:]
group = '%s %s' % (grouptype, name)
oLine = [
reference,
source,
_type,
start,
end,
'.',
strand[direction],
'.',
group
]
writer.writerow(oLine)
# New gene so blank these
cdsStart = None
cdsEnd = None
elif line[0]=='+': # Translated region
x = line.split(' ')
cdsStart = int(x[1]) + offset
cdsEnd = int(x[2]) + offset
else:
x = line.split(' ')
start = int(x[0]) + offset
end = int(x[1]) + offset
if cdsStart==None or cdsEnd==None:
oLine = [reference, source, 'CDS', start, end, '.', strand[direction], '.', group]
writer.writerow(oLine)
if end<cdsStart or start>cdsEnd: # UTR
oLine = [reference, source, 'UTR', start, end, '.', strand[direction], '.', group]
writer.writerow(oLine)
elif start>cdsStart and end<cdsEnd: # CDS
oLine = [reference, source, 'CDS', start, end, '.', strand[direction], '.', group]
writer.writerow(oLine)
elif start<cdsStart and end>cdsStart: # UTR + CDS
oLine = [reference, source, 'UTR', start, cdsStart-1, '.', strand[direction], '.', group]
writer.writerow(oLine)
oLine = [reference, source, 'CDS', cdsStart, end, '.', strand[direction], '.', group]
writer.writerow(oLine)
elif start<cdsEnd and end>cdsEnd: # CDS + UTR
oLine = [reference, source, 'CDS', start, cdsEnd, '.', strand[direction], '.', group]
writer.writerow(oLine)
oLine = [reference, source, 'UTR', cdsEnd+1, end, '.', strand[direction], '.', group]
writer.writerow(oLine)
if __name__=='__main__':
if '-h' in sys.argv or '--help' in sys.argv or len(sys.argv)<5:
sys.exit(__doc__)
try:
iFilename = sys.argv[1]
oFilename = sys.argv[2]
reference = sys.argv[3]
source = sys.argv[4]
offset = 0
if len(sys.argv)==6:
offset = int(sys.argv[5])
except Exception:
print "Error obtaining args"
sys.exit(__doc__)
exons2gff(iFilename, oFilename, reference, source, offset)
|
PapenfussLab/Mungo
|
snippets/exons2gff.py
|
Python
|
artistic-2.0
| 3,622
|
# This is work in progress. The aim of this file is to create a common template
# so the look and feel of all ABM embedded screens is uniform
# screen has always been 600 x 500 so stick with that.
# button size was always 140 x 40 so stick with that.
# use 8 px margin either side of buttons and 4 px margin above and below
# button bar width = 8 + 140 + 8 + 140 + 8 + 140 + 8 + 140 + 8 = 600
# button bar height = 4 + 40 + 4 = 48
from __future__ import print_function
import os
from enigma import getDesktop
# Set this to True to print the embedded skins.
# This debug is printed on enigma2 startup, not when using the plugin.
extraDebug = False
# values common to all templates
fontSize = 22
menuFontSize = fontSize + 2
descriptionsFontSize = fontSize - 2 # hints texts
windowWidth = 600 # button bar needs a minimum of 600
marginTop = 2 # for config lists
marginTopTexts = 10 # for text windows
marginLeft = 8
buttonWidth = 140 # this is the real width of the buttons graphics
buttonHeight = 40 # this is the real height of the buttons graphics
buttonMargin = 8
buttonMarginBottom = 4
configListLength = 15 # minimum 15. changing this should force the window height change in all screens without breaking anything.
configItemHeight = 30
configItemHeightMainMenu = 40
# dynamic variables
windowHeight = (configListLength * configItemHeight) + marginTop + buttonHeight + (buttonMarginBottom * 2) # 500 based on configListLength = 15
widgetWidth = windowWidth - (marginLeft * 2)
# These button colours have been selected specially so anti-aliasing around the button
# text will be done to the correct shade. This is necessary even though the button text
# widget is transparent, to avoid a black halo around the button text.
colours = {"red": 0x9f1313, "green": 0x1f771f, "yellow": 0xa08500, "blue": 0x18188b}
def insertValues(xml, values):
# The skin template is designed for a HD screen so the scaling factor is 720.
# double negative to round up not round down
return xml % tuple([int(-(x * getDesktop(0).size().height() // (-720))) for x in values])
def header():
headerXML = '\n<screen position="center,center" size="%d,%d">'
headerValues = [windowWidth, windowHeight]
return insertValues(headerXML, headerValues)
def footer():
return "\n</screen>"
def buttonBar():
buttonFontSize = fontSize + 1
buttonBarElevation = buttonHeight + buttonMarginBottom
buttonPath = "%s/images/" % os.path.dirname(os.path.realpath(__file__))
buttonBarXML = ''.join(['\n\t<widget name="key_' + c + '" conditional="key_' + c + '" position="%d,e-%d" size="%d,%d" valign="center" halign="center" font="Regular;%d" backgroundColor="#' + "%x" % colours[c] + '" foregroundColor="#ffffff" transparent="1" zPosition="+2"/>\n\t<ePixmap name="' + c + '" conditional="key_' + c + '" position="%d,e-%d" size="%d,%d" pixmap="' + buttonPath + 'key_' + c + '.png" transparent="1" zPosition="+1" alphatest="on" scale="1"/>' for c in ("red", "green", "yellow", "blue")])
buttonBarValues = []
for x in range(4):
buttonBarValues += [buttonMargin + ((buttonWidth + buttonMargin) * x), buttonBarElevation, buttonWidth, buttonHeight, buttonFontSize, buttonMargin + ((buttonWidth + buttonMargin) * x), buttonBarElevation, buttonWidth, buttonHeight]
return insertValues(buttonBarXML, buttonBarValues)
def templateOne():
# templateOne is for hidesections and keepbouquets
templateOneHeight = configItemHeight * configListLength # make the template 15 lines high
templateOneXML = """
<widget source="list" render="Listbox" position="%d,%d" size="%d,%d" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryPixmapAlphaTest(pos = (%d, %d), size = (%d, %d), flags = BT_SCALE, png = 0),
MultiContentEntryText(pos = (%d, %d), size = (%d, %d), font=0, flags = RT_HALIGN_LEFT|RT_VALIGN_TOP, text = 1),
MultiContentEntryText(pos = (%d, %d), size = (%d, %d), font=0, flags = RT_HALIGN_RIGHT|RT_VALIGN_TOP, text = 2),
],
"fonts": [gFont("Regular", %d)],
"itemHeight": %d
}
</convert>
</widget>"""
templateOneValues = [
marginLeft, marginTop, widgetWidth, templateOneHeight, # templateOneXML line 1
2, 1, 25, 24, # templateOneXML line 4
35, 2, 300, configItemHeight - 2, # templateOneXML line 5
350, 2, 210, configItemHeight - 2, # templateOneXML line 6
fontSize,
configItemHeight
]
return insertValues(templateOneXML, templateOneValues)
def templateTwo():
# template two is for the main menu
templateTwoWidgetHeight = configItemHeightMainMenu * 11 # make the template 11 lines high. Currently there are 9 menu items.
templateTwoXML = """
<widget source="list" render="Listbox" position="%d,%d" size="%d,%d" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryPixmapAlphaTest(pos = (%d, %d), size = (%d, %d), flags = BT_SCALE, png = 0),
MultiContentEntryText(pos = (%d, %d), size = (%d, %d), font=0, flags = RT_HALIGN_LEFT|RT_VALIGN_TOP, text = 1),
],
"fonts": [gFont("Regular", %d)],
"itemHeight": %d
}
</convert>
</widget>"""
templateTwoValues = [
marginLeft, marginTop, widgetWidth, templateTwoWidgetHeight, # templateTwoXML line 1
2, 4, 32, 32, # templateTwoXML line 4
44, 4, 530, configItemHeightMainMenu - 4, # templateTwoXML line 5
menuFontSize,
configItemHeightMainMenu
]
return insertValues(templateTwoXML, templateTwoValues)
def templateThree():
# template three is for about
# "oea logo" is fixed size from plugin image folder, 176 x 142
templateThreeXML = """
<widget name="about" conditional="about" position="%d,%d" size="%d,%d" font="Regular;%d" transparent="1"/>
<widget name="oealogo" conditional="oealogo" position="e-%d-176,e-%d-142" size="176,142" zPosition="-1" transparent="1" alphatest="blend"/>"""
templateThreeValues = [
marginLeft, marginTopTexts, widgetWidth, configItemHeight * configListLength, fontSize, # templateThreeXML line 1
buttonMargin, buttonMarginBottom # templateThreeXML line 2
]
return insertValues(templateThreeXML, templateThreeValues)
def templateFour():
# template four is for ordering
templateFourHeight = configItemHeight * configListLength # make the template 15 lines high
templateFourXML = """
<widget source="list" render="Listbox" position="%d,%d" size="%d,%d" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (%d, %d), size = (%d, %d), font=0, flags = RT_HALIGN_LEFT|RT_VALIGN_TOP, text = 0),
],
"fonts": [gFont("Regular", %d)],
"itemHeight": %d
}
</convert>
</widget>
<widget name="pleasewait" position="%d,%d" size="%d,%d" font="Regular;%d" halign="center" valign="center" transparent="0" zPosition="+1"/>"""
templateFourValues = [
marginLeft, marginTop, widgetWidth, templateFourHeight, # templateFourXML line 1
2, 2, widgetWidth - 4, configItemHeight - 2, # templateFourXML line 4
fontSize,
configItemHeight,
0, templateFourHeight // 2, widgetWidth, configItemHeight, fontSize # templateFourXML line 11
]
return insertValues(templateFourXML, templateFourValues)
def templateFive():
# template five is for log
templateFiveXML = '\n\t<widget name="list" position="%d,%d" size="%d,%d" itemHeight="%d" font="Regular;%d" scrollbarMode="showOnDemand"/>'
templateFiveValues = [
marginLeft, marginTop, widgetWidth, configItemHeight * configListLength, configItemHeight, fontSize # templateFiveXML line 1
]
return insertValues(templateFiveXML, templateFiveValues)
def templateSix():
# template six is for setup
templateSixHeight = configItemHeight * (configListLength - 5) # leave 5 lines for description widget
templateSixDescHeight = configItemHeight * 4 # make the description 4 lines high
templateSixXML = """
<widget name="config" position="%d,%d" size="%d,%d" itemHeight="%d" font="Regular;%d" scrollbarMode="showOnDemand"/>
<widget name="description" position="%d,%d" size="%d,%d" font="Regular;%d" halign="center" valign="top" transparent="0"/>
<widget name="pleasewait" position="%d,%d" size="%d,%d" font="Regular;%d" halign="center" valign="center" transparent="0" zPosition="+1"/>"""
templateSixValues = [
marginLeft, marginTop, widgetWidth, templateSixHeight, configItemHeight, fontSize, # templateSixXML line 1
marginLeft, templateSixHeight + configItemHeight, widgetWidth, templateSixDescHeight, descriptionsFontSize, # templateSixXML line 3
0, templateSixHeight // 2, widgetWidth, configItemHeight, fontSize # templateSixXML line 3
]
return insertValues(templateSixXML, templateSixValues)
def downloadBar():
# download bar is for scanner, frequency finder, update proviers
downloadBarHeight = 36
textBoxHeight = 30
textBoxTopMargin = 4
actionBoxLeftAlign = 7
actionBoxWidth = 433
statusBoxLeftAlign = 466
statusBoxWidth = 433
lockImageLeftAlign = 929
lockImageTopMargin = 3
lockImageWidth = 25
lockImageHeight = 24
tunerLetterLeftAlign = 955
tunerLetterWidth = fontSize
snrBoxLeftAlign = 980
snrBoxWidth = 87 # up to 7 chars, e.g. "16.2 dB"
progressTextBoxLeftAlign = 1080
progressTextBoxWidth = 87
progressPercentLeftAlign = 1187
progressPercentBoxWidth = 73
downloadBarXML = """
<screen name="DownloadBar" position="0,0" size="%d,%d" flags="wfNoBorder" backgroundColor="#54111112">
<widget name="action" position="%d,%d" size="%d,%d" font="Regular;%d" transparent="1" foregroundColor="#ffffff"/>
<widget name="status" position="%d,%d" size="%d,%d" font="Regular;%d" halign="center" transparent="1" foregroundColor="#ffffff"/>
<widget source="Frontend" conditional="Frontend" render="Pixmap" pixmap="icons/lock_on.png" position="%d,%d" size="%d,%d" alphatest="on" scale="1">
<convert type="FrontendInfo">LOCK</convert>
<convert type="ConditionalShowHide"/>
</widget>
<widget source="Frontend" conditional="Frontend" render="Pixmap" pixmap="icons/lock_off.png" position="%d,%d" size="%d,%d" alphatest="on" scale="1">
<convert type="FrontendInfo">LOCK</convert>
<convert type="ConditionalShowHide">Invert</convert>
</widget>
<widget name="tuner_text" conditional="tuner_text" position="%d,%d" size="%d,%d" font="Regular;%d" halign="center" transparent="1" foregroundColor="#ffffff"/>
<widget source="Frontend" conditional="Frontend" render="Label" position="%d,%d" size="%d,%d" font="Regular;%d" halign="left" transparent="1" foregroundColor="#ffffff">
<convert type="FrontendInfo">SNRdB</convert>
</widget>
<widget source="progress_text" render="Label" position="%d,%d" size="%d,%d" font="Regular;%d" halign="right" transparent="1" foregroundColor="#ffffff">
<convert type="ProgressToText">InText</convert>
</widget>
<widget source="progress_text" render="Label" position="%d,%d" size="%d,%d" font="Regular;%d" halign="left" transparent="1" foregroundColor="#ffffff">
<convert type="ProgressToText">InPercent</convert>
</widget>
</screen>"""
downloadBarValues = [
getDesktop(0).size().width(), downloadBarHeight, # downloadBarXML line 1, "screen" element
actionBoxLeftAlign, textBoxTopMargin, actionBoxWidth, textBoxHeight, fontSize, # downloadBarXML line 2, "action" widget
statusBoxLeftAlign, textBoxTopMargin, statusBoxWidth, textBoxHeight, fontSize, # downloadBarXML line 3, "status" widget
lockImageLeftAlign, lockImageTopMargin, lockImageWidth, lockImageHeight, # downloadBarXML, "lock_on" widget
lockImageLeftAlign, lockImageTopMargin, lockImageWidth, lockImageHeight, # downloadBarXML, "lock_off" widget
tunerLetterLeftAlign, textBoxTopMargin, tunerLetterWidth, textBoxHeight, fontSize, # downloadBarXML, "tuner letter" widget
snrBoxLeftAlign, textBoxTopMargin, snrBoxWidth, textBoxHeight, fontSize, # downloadBarXML, "SNR" widget
progressTextBoxLeftAlign, textBoxTopMargin, progressTextBoxWidth, textBoxHeight, fontSize, # downloadBarXML, "progress text" widget
progressPercentLeftAlign, textBoxTopMargin, progressPercentBoxWidth, textBoxHeight, fontSize, # downloadBarXML, "progress percent" widget
]
return insertValues(downloadBarXML, downloadBarValues)
# ------------------------------------------------------------------
def skin_mainmenu():
skin = header() + buttonBar() + templateTwo() + footer()
if extraDebug:
print("[ABM-SkinTemplates] skin_mainmenu:", skin)
return skin
def skin_about():
skin = header() + buttonBar() + templateThree() + footer()
if extraDebug:
print("[ABM-SkinTemplates] skin_about:", skin)
return skin
def skin_hidesections():
skin = header() + buttonBar() + templateOne() + footer()
if extraDebug:
print("[ABM-SkinTemplates] skin_hidesections:", skin)
return skin
def skin_keepbouquets():
return skin_hidesections()
def skin_log():
skin = header() + buttonBar() + templateFive() + footer()
if extraDebug:
print("[ABM-SkinTemplates] skin_log:", skin)
return skin
def skin_ordering():
skin = header() + buttonBar() + templateFour() + footer()
if extraDebug:
print("[ABM-SkinTemplates] skin_ordering:", skin)
return skin
def skin_setup():
skin = header() + buttonBar() + templateSix() + footer()
if extraDebug:
print("[ABM-SkinTemplates] skin_setup:", skin)
return skin
def skin_downloadBar():
skin = downloadBar()
if extraDebug:
print("[ABM-SkinTemplates] skin_downloadBar:", skin)
return skin
|
oe-alliance/AutoBouquetsMaker
|
AutoBouquetsMaker/src/skin_templates.py
|
Python
|
gpl-3.0
| 13,277
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .entities_operations import EntitiesOperations
__all__ = [
'EntitiesOperations',
]
|
lmazuel/azure-sdk-for-python
|
azure-cognitiveservices-search-entitysearch/azure/cognitiveservices/search/entitysearch/operations/__init__.py
|
Python
|
mit
| 567
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-02 01:22
from __future__ import unicode_literals
import django.db.models.deletion
import jsonfield.fields
from django.conf import settings
from django.db import migrations, models
import openslides.utils.models
class Migration(migrations.Migration):
initial = True
dependencies = [
("mediafiles", "0001_initial"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("core", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="Category",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255)),
("prefix", models.CharField(blank=True, max_length=32)),
],
options={"ordering": ["prefix"], "default_permissions": ()},
bases=(openslides.utils.models.RESTModelMixin, models.Model),
),
migrations.CreateModel(
name="Motion",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"identifier",
models.CharField(
blank=True, max_length=255, null=True, unique=True
),
),
("identifier_number", models.IntegerField(null=True)),
],
options={
"verbose_name": "Motion",
"permissions": (
("can_see", "Can see motions"),
("can_create", "Can create motions"),
("can_support", "Can support motions"),
("can_manage", "Can manage motions"),
),
"ordering": ("identifier",),
"default_permissions": (),
},
bases=(openslides.utils.models.RESTModelMixin, models.Model),
),
migrations.CreateModel(
name="MotionLog",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("message_list", jsonfield.fields.JSONField()),
("time", models.DateTimeField(auto_now=True)),
(
"motion",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="log_messages",
to="motions.Motion",
),
),
(
"person",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
],
options={"ordering": ["-time"], "default_permissions": ()},
bases=(openslides.utils.models.RESTModelMixin, models.Model),
),
migrations.CreateModel(
name="MotionOption",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
)
],
options={"default_permissions": ()},
bases=(openslides.utils.models.RESTModelMixin, models.Model),
),
migrations.CreateModel(
name="MotionPoll",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"votesvalid",
openslides.utils.models.MinMaxIntegerField(blank=True, null=True),
),
(
"votesinvalid",
openslides.utils.models.MinMaxIntegerField(blank=True, null=True),
),
(
"votescast",
openslides.utils.models.MinMaxIntegerField(blank=True, null=True),
),
(
"motion",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="polls",
to="motions.Motion",
),
),
],
options={"default_permissions": ()},
bases=(openslides.utils.models.RESTModelMixin, models.Model),
),
migrations.CreateModel(
name="MotionVersion",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("version_number", models.PositiveIntegerField(default=1)),
("title", models.CharField(max_length=255)),
("text", models.TextField()),
("reason", models.TextField(blank=True, null=True)),
("creation_time", models.DateTimeField(auto_now=True)),
(
"motion",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="versions",
to="motions.Motion",
),
),
],
options={"default_permissions": ()},
bases=(openslides.utils.models.RESTModelMixin, models.Model),
),
migrations.CreateModel(
name="MotionVote",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("weight", models.IntegerField(default=1, null=True)),
("value", models.CharField(max_length=255, null=True)),
(
"option",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="motions.MotionOption",
),
),
],
options={"default_permissions": ()},
bases=(openslides.utils.models.RESTModelMixin, models.Model),
),
migrations.CreateModel(
name="State",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255)),
("action_word", models.CharField(max_length=255)),
("css_class", models.CharField(default="primary", max_length=255)),
(
"required_permission_to_see",
models.CharField(blank=True, max_length=255),
),
("allow_support", models.BooleanField(default=False)),
("allow_create_poll", models.BooleanField(default=False)),
("allow_submitter_edit", models.BooleanField(default=False)),
("versioning", models.BooleanField(default=False)),
("leave_old_version_active", models.BooleanField(default=False)),
("dont_set_identifier", models.BooleanField(default=False)),
("next_states", models.ManyToManyField(to="motions.State")),
],
options={"default_permissions": ()},
bases=(openslides.utils.models.RESTModelMixin, models.Model),
),
migrations.CreateModel(
name="Workflow",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255)),
(
"first_state",
models.OneToOneField(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="motions.State",
),
),
],
options={"default_permissions": ()},
bases=(openslides.utils.models.RESTModelMixin, models.Model),
),
migrations.AddField(
model_name="state",
name="workflow",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="states",
to="motions.Workflow",
),
),
migrations.AddField(
model_name="motionoption",
name="poll",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="motions.MotionPoll"
),
),
migrations.AddField(
model_name="motion",
name="active_version",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="active_version",
to="motions.MotionVersion",
),
),
migrations.AddField(
model_name="motion",
name="attachments",
field=models.ManyToManyField(blank=True, to="mediafiles.Mediafile"),
),
migrations.AddField(
model_name="motion",
name="category",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="motions.Category",
),
),
migrations.AddField(
model_name="motion",
name="parent",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="amendments",
to="motions.Motion",
),
),
migrations.AddField(
model_name="motion",
name="state",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="motions.State",
),
),
migrations.AddField(
model_name="motion",
name="submitters",
field=models.ManyToManyField(
blank=True,
related_name="motion_submitters",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AddField(
model_name="motion",
name="supporters",
field=models.ManyToManyField(
blank=True,
related_name="motion_supporters",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AddField(
model_name="motion",
name="tags",
field=models.ManyToManyField(blank=True, to="core.Tag"),
),
migrations.AlterUniqueTogether(
name="motionversion", unique_together=set([("motion", "version_number")])
),
]
|
FinnStutzenstein/OpenSlides
|
server/openslides/motions/migrations/0001_initial.py
|
Python
|
mit
| 12,740
|
"""
Implements the EN track check, described on pp 7 and 21 of
http://www.metoffice.gov.uk/hadobs/en3/OQCpaper.pdf
"""
import numpy as np
import util.main as main
import util.geo as geo
import copy, datetime, math, sys, calendar, sqlite3
# module constants
DistRes = 20000. # meters
TimeRes = 600. # seconds
def test(p, parameters):
"""
Runs the quality control check on profile p and returns a numpy array
of quality control decisions with False where the data value has
passed the check and True where it failed.
"""
country = p.primary_header['Country code']
cruise = p.cruise()
originator_cruise = p.originator_cruise()
uid = p.uid()
# don't bother if this has already been analyzed
command = 'SELECT en_track_check FROM ' + parameters["table"] + ' WHERE uid = ' + str(uid) + ';'
en_track_result = main.dbinteract(command)
if en_track_result[0][0] is not None:
en_track_result = main.unpack_row(en_track_result[0])[0]
result = np.zeros(1, dtype=bool)
result[0] = np.any(en_track_result)
return result
# make sure this profile makes sense in the track check
if not assess_usability(p):
return np.zeros(1, dtype=bool)
# fetch all profiles on track, sorted chronologically, earliest first (None sorted as highest), then by uid
command = 'SELECT uid, year, month, day, time, lat, long, probe, raw FROM ' + parameters["table"] + ' WHERE cruise = ' + str(cruise) + ' and country = "' + str(country) + '" and ocruise = "' + str(originator_cruise) + '" and year is not null and month is not null and day is not null and time is not null ORDER BY year, month, day, time, uid ASC;'
track_rows = main.dbinteract(command)
# avoid inappropriate profiles
track_rows = [tr for tr in track_rows if assess_usability_raw(tr[8][1:-1])]
# start all as passing by default
EN_track_results = {}
for i in range(len(track_rows)):
EN_track_results[track_rows[i][0]] = np.zeros(1, dtype=bool)
# copy the list of headers;
# remove entries as they are flagged.
passed_rows = copy.deepcopy(track_rows)
rejects = findOutlier(passed_rows, EN_track_results)
while rejects != []:
passed_index = [x for x in range(len(passed_rows)) if x not in rejects ]
passed_rows = [passed_rows[index] for index in passed_index ]
rejects = findOutlier(passed_rows, EN_track_results)
# if more than half got rejected, reject everyone
if len(passed_rows) < len(track_rows) / 2:
for i in range(len(track_rows)):
EN_track_results[track_rows[i][0]][0] = True
# write all to db
result = []
for i in range(len(track_rows)):
result.append((main.pack_array(EN_track_results[track_rows[i][0]]), track_rows[i][0]))
query = "UPDATE " + sys.argv[1] + " SET en_track_check=? WHERE uid=?"
main.interact_many(query, result)
return EN_track_results[uid]
#def sliceTrack(p, rows, margin=7):
# '''
# remove all table rows from rows whose dates are more than margin days before or after the month that p falls in
# each row has row[1] = year, row[2] = month, row[3] = day
# '''
# m = datetime.timedelta(days=margin)
# earliest = datetime.datetime(p.year(), p.month(), 1) - m
# latest = datetime.datetime(p.year(), p.month(), calendar.monthrange(p.year(), p.month())[1] ) + m
# inrange = []
# for row in rows:
# date = datetime.datetime(row[1], row[2], row[3])
# if date >= earliest and date <= latest:
# inrange.append(row)
# return inrange
def assess_usability(p):
'''
given a profile p, return true if the track check is suitable for this profile
'''
# don't bother if cruise == 0 or None, or if timestamp is corrupt
if (p.cruise() in [0, None]) or (None in [p.year(), p.month(), p.day(), p.time()]):
return False
# don't bother if country code is 99
if str(p.primary_header['Country code']) == '99':
return False
# don't bother if originator cruise is None or 0
if (p.originator_cruise() in [0, None]):
return False
# some detector types cannot be assessed by this test; do not raise flag.
if p.probe_type() in [None]:
return False
# avoid aircraft
if isAircraft(p):
return False
return True
def assess_usability_raw(raw):
p = main.text2wod(raw)
return assess_usability(p)
def isAircraft(profile):
'''
decide if platform is aircraft
'''
platform = profile.extract_secondary_header(3)
if platform is not None:
platform = int(platform)
return platform in [2635, 1053, 5178, 6876, 305, 879, 7841, 6743, 2911, 183]
def aircraft_raw(raw):
trk = main.text2wod(raw)
return isAircraft(trk)
def findOutlier(rows, results):
'''
given a list of rows, find the fastest one;
if it's too fast, reject it or the one before it, return a list of rejected indices;
once the fastest is within limits, return [].
'''
maxShipSpeed = 15. # m/s
maxBuoySpeed = 2. # m/s
if rows == []:
return []
# determine speeds and angles for list of headers
speeds, angles = calculateTraj(rows)
# decide if something needs to be flagged
maxSpeed = maxShipSpeed
if isBuoy(rows[0][7]):
maxSpeed = maxBuoySpeed
iMax = speeds.index(max(speeds))
flag = detectExcessiveSpeed(speeds, angles, iMax, maxSpeed)
# decide which profile to reject, flag it, and return a list of indices rejected at this step.
if flag:
rejects = chooseReject(rows, speeds, angles, iMax, maxSpeed)
for reject in rejects:
results[rows[reject][0]][0] = True
return rejects
else:
return []
def chooseReject(rows, speeds, angles, index, maxSpeed):
'''
decide which row to reject, rows[index] or rows[index-1], or both,
and return a list of indices to reject.
'''
# chain of tests breaks when a reject is found:
reject, rejecting_condition = condition_a(rows, speeds, angles, index, maxSpeed)
# condition i needs to run at the end of the chain in all cases:
# if no decision, reject both:
if reject == -1:
reject = [index-1, index]
# if excessive speed is created by removing the flag, reject both instead
# can't create new excessive speed by removing last profile.
elif reject < len(rows)-1:
new_rows = copy.deepcopy(rows)
del new_rows[reject]
newSpeeds, newAngles = calculateTraj(new_rows)
flag = detectExcessiveSpeed(newSpeeds, newAngles, reject, maxSpeed)
if flag:
reject = [index-1, index]
else:
reject = [reject]
else:
reject = [reject]
return reject
def calculateTraj(rows):
'''
return a list of speeds and a list of angles describing the trajectory of the track described
by the time-ordered list of rows.
'''
speeds = [None]
angles = [None]
# Find speed and angle for all profiles remaining in the list
for i in range(1, len(rows)):
speeds.append(None)
angles.append(None)
speeds[i] = trackSpeed(rows[i-1], rows[i])
if i < len(rows)-1: # can't do angle on last point
angles[i] = abs(math.pi - geo.haversineAngle(rows[i-1][5], rows[i-1][6], rows[i][5], rows[i][6], rows[i+1][5], rows[i+1][6]))
return speeds, angles
def isBuoy(probeindex):
'''
decide if probe is buoy-based
'''
return probeindex in [4,7,9,10,11,12,13,15]
def detectExcessiveSpeed(speeds, angles, index, maxSpeed):
'''
decide if there was an excessive speed at <index> in the lists <speeds> and <angles>
'''
flag = speeds[index] > maxSpeed
if index > 0:
flag = flag or ( (speeds[index] > 0.8*maxSpeed) and (angles[index]>math.pi/2 or angles[index-1]>math.pi/2) )
return flag
def meanSpeed(speeds, rows, maxSpeed):
'''
determine mean speed, neglecting missing data, intervals less than 1h, and speeds over maxspeed, for use in condition (f)
'''
meanSpeed = 0
speedCount = 0
for iSpeed, speed in enumerate(speeds):
if speed == None or iSpeed == 0:
#missing values
continue
elif iSpeed > 0 and geo.deltaTime((rows[iSpeed-1][1], rows[iSpeed-1][2], rows[iSpeed-1][3], rows[iSpeed-1][4]), (rows[iSpeed][1], rows[iSpeed][2], rows[iSpeed][3], rows[iSpeed][4])):
#too close together in time
continue
elif speed > maxSpeed:
#too fast
continue
else:
meanSpeed += speed
speedCount += 1
if speedCount > 0:
meanSpeed = meanSpeed / speedCount
return meanSpeed
def trackSpeed(prev_row, row):
'''
computes the speed, including rounding tolerance from the reference,
for the track at <row>.
return None if some necessary data is missing
'''
# check that all required data (full timestamp + lat + long) is present:
if None in [row[1], row[2], row[3], row[4], row[5], row[6]]:
return None
if None in [prev_row[1], prev_row[2], prev_row[3], prev_row[4], prev_row[5], prev_row[6] ]:
return None
dist = geo.haversineDistance(prev_row[5], prev_row[6], row[5], row[6])
DTime = geo.deltaTime((prev_row[1], prev_row[2], prev_row[3], prev_row[4]), (row[1], row[2], row[3], row[4]))
speed = (dist - DistRes) / max(DTime, TimeRes)
return speed
def condition_a(rows, speeds, angles, index, maxSpeed):
'''
assess condition (a) from the text
'''
if index == 1 and len(rows) == 2:
return 0, 'a'
elif index == 1 and len(rows) > 2: # note 'M' in the text seems to count from 1, not 0.
impliedSpeed = trackSpeed(rows[0], rows[2])
if impliedSpeed < maxSpeed and (speeds[2]>maxSpeed or angles[2]>45./180.*math.pi):
return 1, 'a'
else:
return 0, 'a'
elif index == len(rows)-1 and len(rows)>3: # why not >=? seems to cause problems, investigate.
impliedSpeed = trackSpeed(rows[-3], rows[-1])
if impliedSpeed < maxSpeed and (speeds[-2] > maxSpeed or angles[-3]>45./180.*math.pi):
return index-1, 'a'
else:
return index, 'a'
else:
return condition_b(rows, speeds, angles, index, maxSpeed)
def condition_b(rows, speeds, angles, index, maxSpeed):
'''
assess condition (b) from the text
'''
if speeds[index-1] > maxSpeed:
return index-1, 'b'
elif index < len(speeds) - 1 and speeds[index+1] > maxSpeed:
return index, 'b'
return condition_c(rows, speeds, angles, index, maxSpeed)
def condition_c(rows, speeds, angles, index, maxSpeed):
'''
assess condition (c) from the text
'''
if index < len(rows)-1 and index > 0:
impliedSpeed = trackSpeed(rows[index-1], rows[index+1])
if impliedSpeed > maxSpeed:
return index-1, 'c'
if index > 1:
impliedSpeed = trackSpeed(rows[index-2], rows[index])
if impliedSpeed > maxSpeed:
return index, 'c'
return condition_d(rows, speeds, angles, index, maxSpeed)
def condition_d(rows, speeds, angles, index, maxSpeed):
'''
assess condition (d) from the text
'''
if None not in [angles[index-1], angles[index]] and angles[index-1] > 45./180.*math.pi + angles[index]:
return index-1, 'd'
if None not in [angles[index-1], angles[index]] and angles[index] > 45./180.*math.pi + angles[index-1]:
return index, 'd'
return condition_e(rows, speeds, angles, index, maxSpeed)
def condition_e(rows, speeds, angles, index, maxSpeed):
'''
assess condition (e) from the text
'''
if len(rows) > max(2, index+1):
if None not in [angles[index-2], angles[index+1]] and angles[index-2] > 45./180.*math.pi and angles[index-2] > angles[index+1]:
return index-1, 'e'
if None not in [angles[index+1]] and angles[index+1] > 45./180.*math.pi:
return index, 'e'
return condition_f(rows, speeds, angles, index, maxSpeed)
def condition_f(rows, speeds, angles, index, maxSpeed):
'''
assess condition (f) from the text
'''
if index>0 and index < len(speeds)-1:
ms = meanSpeed(speeds, rows, maxSpeed)
if None not in [speeds[index-1], speeds[index+1]] and speeds[index-1] < min([speeds[index+1], 0.5*ms]):
return index-1, 'f'
if None not in [speeds[index-1], speeds[index+1]] and speeds[index+1] < min([speeds[index-1], 0.5*ms]):
return index, 'f'
return condition_g(rows, speeds, angles, index, maxSpeed)
def condition_g(rows, speeds, angles, index, maxSpeed):
'''
assess condition (g) from the text
'''
if index > 1 and index < len(rows) - 1:
dist1 = geo.haversineDistance(rows[index][5], rows[index][6], rows[index-2][5], rows[index-2][6]) + geo.haversineDistance(rows[index+1][5], rows[index+1][6], rows[index][5], rows[index][6])
dist2 = geo.haversineDistance(rows[index-1][5], rows[index-1][6], rows[index-2][5], rows[index-2][6]) + geo.haversineDistance(rows[index+1][5], rows[index+1][6], rows[index-1][5], rows[index-1][6])
distTol = geo.haversineDistance(rows[index-1][5], rows[index-1][6], rows[index-2][5], rows[index-2][6])
distTol += geo.haversineDistance(rows[index][5], rows[index][6], rows[index-1][5], rows[index-1][6])
distTol += geo.haversineDistance(rows[index+1][5], rows[index+1][6], rows[index][5], rows[index][6])
distTol = max(DistRes, 0.1*distTol)
if dist1 < dist2 - distTol:
return index-1, 'g'
if dist2 < dist1 - distTol:
return index, 'g'
return condition_h(rows, speeds, angles, index, maxSpeed)
def condition_h(rows, speeds, angles, index, maxSpeed):
'''
assess condition (h) from the text
'''
if index > 1 and index < len(rows) - 1:
dist1 = geo.haversineDistance(rows[index][5], rows[index][6], rows[index-2][5], rows[index-2][6]) + geo.haversineDistance(rows[index+1][5], rows[index+1][6], rows[index][5], rows[index][6])
dist2 = geo.haversineDistance(rows[index-1][5], rows[index-1][6], rows[index-2][5], rows[index-2][6]) + geo.haversineDistance(rows[index+1][5], rows[index+1][6], rows[index-1][5], rows[index-1][6])
PD1 = geo.haversineDistance(rows[index-1][5], rows[index-1][6], rows[index-2][5], rows[index-2][6]) / dist2
PD2 = geo.haversineDistance(rows[index][5], rows[index][6], rows[index-2][5], rows[index-2][6]) / dist1
PT1 = geo.deltaTime((rows[index-2][1], rows[index-2][2], rows[index-2][3], rows[index-2][4]), (rows[index-1][1], rows[index-1][2], rows[index-1][3], rows[index-1][4])) / geo.deltaTime((rows[index-2][1], rows[index-2][2], rows[index-2][3], rows[index-2][4]), (rows[index+1][1], rows[index+1][2], rows[index+1][3], rows[index+1][4]))
PT2 = geo.deltaTime((rows[index-2][1], rows[index-2][2], rows[index-2][3], rows[index-2][4]), (rows[index][1], rows[index][2], rows[index][3], rows[index][4])) / geo.deltaTime((rows[index-2][1], rows[index-2][2], rows[index-2][3], rows[index-2][4]), (rows[index+1][1], rows[index+1][2], rows[index+1][3], rows[index+1][4]))
if abs(PD1-PT1) > 0.1 + abs(PD2-PT2):
return index-1, 'h'
if abs(PD2 - PT2) > 0.1 + abs(PD1 - PT1):
return index, 'h'
return -1, 'i'
|
s-good/AutoQC
|
qctests/EN_track_check.py
|
Python
|
mit
| 15,479
|
#!/usr/bin/python2.5
#
# Copyright 2010 the Melange authors.
# Copyright 2009 Jake McGuire.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing a profiler tuned for GAE requests.
"""
__authors__ = [
'"Jake McGuire" <jaekmcguire@gmail.com>',
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
import cProfile
import logging
import os
import ppstats
import random
import string
import zlib
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.api.labs import taskqueue
from soc.profiling import storage
alphanumeric = string.letters + string.digits
mc_client = memcache.Client()
class GAEProfiler(object):
"""Profiler class that contains profiling data.
"""
def __init__(self, path=None, key=None):
"""Constructs a new profiler for the given path.
"""
self._profiler = cProfile.Profile()
self.loaded = False
self.task_url = '/profiler/store'
key = int(key) if key and key.isdigit() else None
data = storage.from_key(key) if key else None
if data:
self.profile_key = key
self.pstats_obj = ppstats.from_gz(data.profile)
self.path = data.path
self.loaded = True
else:
key = [random.choice(alphanumeric) for x in range(6)]
self.profile_key = ''.join(key)
self.pstats_obj = None
self.path = path
def get_pstats(self):
"""Return a ppstats object from current profile data.
"""
if self.pstats_obj:
return self.pstats_obj
gae_base_dir = '/'.join(webapp.__file__.split('/')[:-5])
sys_base_dir = '/'.join(logging.__file__.split('/')[:-2])
app_base_dir = '/'.join(storage.__file__.split('/')[:-3])
logging.info(app_base_dir)
stats = ppstats.Stats(self._profiler)
stats.hide_directory(gae_base_dir, 'GAEHome')
stats.hide_directory(sys_base_dir, 'SysHome')
stats.hide_directory(app_base_dir, 'AppHome')
stats.strip_dirs()
self.pstats_obj = stats
return stats
def runcall(self, func, *args, **kwargs):
"""Profile one call, saving stats.
"""
self.pstats_obj = None
ret = self._profiler.runcall(func, *args, **kwargs)
self.save_pstats_with_task()
return ret
def save_pstats_with_task(self):
"""Save stats from profiler object to memcache.
"""
ps = self.get_pstats()
output = ps.dump_stats_pickle()
compressed_data = zlib.compress(output, 3)
cache_key = cache_key_for_profile(self.profile_key)
mc_client.set(cache_key, compressed_data)
new_task = taskqueue.Task(url=self.task_url, params={
'key': self.profile_key,
'path': self.path,
'user': users.get_current_user(),
'version': os.environ.get('CURRENT_VERSION_ID'),
})
new_task.add('profiler')
logging.info("Queued pstats save with key '%s' on path '%s' of size %d" % (
self.profile_key, self.path, len(compressed_data)))
def cache_key_for_profile(profile_key):
"""Returns a memcache key for the specified profile key.
"""
return "ProfileData.%s" % profile_key
|
MatthewWilkes/mw4068-packaging
|
src/melange/src/soc/profiling/profiler.py
|
Python
|
apache-2.0
| 3,602
|
#!/usr/bin/env python
# Copyright (C) 2009,2010 Junta de Andalucia
#
# Authors:
# Roberto Majadas <roberto.majadas at openshine.com>
# Cesar Garcia Tapia <cesar.garcia.tapia at openshine.com>
# Luis de Bethencourt <luibg at openshine.com>
# Pablo Vieytes <pvieytes at openshine.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
import gobject
import os
import subprocess
from twisted.internet import reactor
from time import localtime, strftime
(
SESSION_APPID,
WEB_APPID,
MAIL_APPID,
IM_APPID) = range(4)
iptables_weekdays = {"mon" : "Mon",
"tue" : "Tue",
"wed" : "Wed",
"thu" : "Thu",
"fri" : "Fri",
"sat" : "Sat",
"sun" : "Sun"}
services_ports = {WEB_APPID : "80, 443, 8080",
MAIL_APPID : "25, 110, 109, 995, 143, 220, 993, 465",
IM_APPID : "1863, 5222, 5269",
}
class Win32Firewall(gobject.GObject) :
def __init__(self) :
gobject.GObject.__init__(self)
self.fw_status = [None, None, None, None]
self.platform = None
if self.__find_in_path("ipseccmd.exe") != None :
self.platform = "xp"
self.fw = "ipseccmd.exe"
elif self.__find_in_path("ipsecpol.exe") != None :
self.platform = "2000"
self.fw = "ipsecpol.exe"
else:
p = subprocess.Popen(["netsh", "ipsec"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.wait () != 0 :
pass
else:
self.platform = "7"
self.fw = "netsh ipsec static"
def get_platform(self):
return self.platform
def set_appid_fwstatus(self, appid, block):
if self.fw_status[appid] != None and self.fw_status[appid] == block :
return
if self.platform == "xp" or self.platform == "2000":
port_params = ''
for port in services_ports[appid].replace(" ","").split(",") :
port_params = port_params + '-f 0=*:%s:TCP ' % port
block_param = "-n BLOCK "
if block == False:
block_param = "-n PASS"
os.system('%s -x -w REG -p "nanny_firewall" -r "nanny_appid_%s" %s %s > NUL' % (self.fw, appid, port_params, block_param))
if self.fw_status[appid] != block :
appid_name = { 1: "WEB", 2: "MAIL", 3: "IM"}
print "[W32Filtering] %s ports block == %s" % (appid_name[appid], block)
self.fw_status[appid] = block
elif self.platform == '7' :
block_param = "nanny_block"
if block == False:
block_param = "nanny_permit"
appid_name = { 1: "WEB", 2: "MAIL", 3: "IM"}
appid_netsh_id = { 1: "nanny_web", 2: "nanny_mail", 3: "nanny_im"}
os.system("%s set rule name=%s_r policy=nanny_policy filterlist=%s_fl filteraction=%s > NUL" % (self.fw,
appid_netsh_id[appid],
appid_netsh_id[appid],
block_param))
print "[W32Filtering] %s ports block == %s" % (appid_name[appid], block)
self.fw_status[appid] = block
def start(self):
if self.platform == "xp" or self.platform == "2000":
os.system('%s -w reg -p "nanny_firewall" -y > NUL' % self.fw)
os.system('%s -w reg -p "nanny_firewall" -o > NUL' % self.fw)
self.set_appid_fwstatus(WEB_APPID, False)
self.set_appid_fwstatus(MAIL_APPID, False)
self.set_appid_fwstatus(IM_APPID, False)
elif self.platform == "7" :
os.system("%s add filterlist name=nanny_web_fl > NUL" % self.fw)
os.system("%s add filterlist name=nanny_mail_fl > NUL" % self.fw)
os.system("%s add filterlist name=nanny_im_fl > NUL" % self.fw)
os.system("%s add filteraction name=nanny_block action=block > NUL" % self.fw)
os.system("%s add filteraction name=nanny_permit action=permit > NUL" % self.fw)
for port in services_ports[WEB_APPID].replace(" ","").split(",") :
os.system("%s add filter filterlist=nanny_web_fl srcaddr=me dstaddr=any protocol=TCP srcport=0 dstport=%s > NUL" % (self.fw, port) )
for port in services_ports[MAIL_APPID].replace(" ","").split(",") :
os.system("%s add filter filterlist=nanny_mail_fl srcaddr=me dstaddr=any protocol=TCP srcport=0 dstport=%s > NUL" % (self.fw, port) )
for port in services_ports[IM_APPID].replace(" ","").split(",") :
os.system("%s add filter filterlist=nanny_im_fl srcaddr=me dstaddr=any protocol=TCP srcport=0 dstport=%s > NUL" % (self.fw, port) )
os.system("%s add policy name=nanny_policy assign=yes > NUL" % self.fw)
os.system("%s add rule name=nanny_web_r policy=nanny_policy filterlist=nanny_web_fl filteraction=nanny_permit > NUL" % self.fw)
os.system("%s add rule name=nanny_mail_r policy=nanny_policy filterlist=nanny_mail_fl filteraction=nanny_permit > NUL" % self.fw)
os.system("%s add rule name=nanny_im_r policy=nanny_policy filterlist=nanny_im_fl filteraction=nanny_permit > NUL" % self.fw)
else:
pass
def stop(self):
if self.platform == "xp" or self.platform == "2000":
os.system('%s -w reg -p "nanny_firewall" -y > NUL' % self.fw)
os.system('%s -w reg -p "nanny_firewall" -o > NUL' % self.fw)
elif self.platform == "7" :
os.system("%s del rule name=nanny_web_r > NUL" % self.fw)
os.system("%s del rule name=nanny_mail_r > NUL" % self.fw)
os.system("%s del rule name=nanny_im_r > NUL" % self.fw)
os.system("%s del policy name=nanny_policy > NUL" % self.fw)
for port in services_ports[WEB_APPID].replace(" ","").split(",") :
os.system("%s del filter filterlist=nanny_web_fl srcaddr=me dstaddr=any protocol=TCP srcport=0 dstport=%s > NUL" % (self.fw, port) )
for port in services_ports[MAIL_APPID].replace(" ","").split(",") :
os.system("%s del filter filterlist=nanny_mail_fl srcaddr=me dstaddr=any protocol=TCP srcport=0 dstport=%s > NUL" % (self.fw, port) )
for port in services_ports[IM_APPID].replace(" ","").split(",") :
os.system("%s del filter filterlist=nanny_im_fl srcaddr=me dstaddr=any protocol=TCP srcport=0 dstport=%s > NUL" % (self.fw, port) )
os.system("%s del filteraction name=nanny_block > NUL" % self.fw)
os.system("%s del filteraction name=nanny_permit > NUL" % self.fw)
os.system("%s del filterlist name=nanny_web_fl > NUL" % self.fw)
os.system("%s del filterlist name=nanny_mail_fl > NUL" % self.fw)
os.system("%s del filterlist name=nanny_im_fl > NUL" % self.fw)
else:
pass
def __find_in_path(self, program):
for dir in os.environ["PATH"].split(";") :
if os.path.exists(os.path.join(dir, program)) :
return os.path.join(dir, program)
return None
class Win32Filtering(gobject.GObject) :
def __init__(self, quarterback) :
gobject.GObject.__init__(self)
self.quarterback = quarterback
reactor.addSystemEventTrigger("before", "startup", self.start)
reactor.addSystemEventTrigger("before", "shutdown", self.stop)
def start(self):
print "Start Win32 Filtering"
self.win32fw = Win32Firewall()
if self.win32fw.get_platform() != None:
print "[W32Filtering] found Windows %s fw_tool" % self.win32fw.get_platform()
self.win32fw.start()
gobject.timeout_add(1000, self.__update_rules)
def stop(self):
print "Stop Win32 Filtering"
if self.win32fw.get_platform() != None:
self.win32fw.stop()
def __update_rules(self):
if self.win32fw.get_platform() == None:
return True
if self.quarterback.win32top.get_current_user_session() == 0 :
self.win32fw.set_appid_fwstatus(WEB_APPID, False)
self.win32fw.set_appid_fwstatus(MAIL_APPID, False)
self.win32fw.set_appid_fwstatus(IM_APPID, False)
return True
session_uid = str(self.quarterback.win32top.get_current_user_session())
blocks = self.quarterback.blocks
for user_id in blocks.keys() :
if int(user_id) == int(session_uid):
for app_id in blocks[user_id].keys() :
if app_id == SESSION_APPID :
continue
if self.quarterback.get_available_time(user_id, app_id) == 0 :
self.win32fw.set_appid_fwstatus(int(app_id), True)
continue
try:
block_status, next_block = self.quarterback.is_blocked(user_id, app_id)
except:
print "[W32Filtering] Fail getting self.quarterback.is_blocked"
block_status = False
if block_status == True :
self.win32fw.set_appid_fwstatus(int(app_id), True)
else:
self.win32fw.set_appid_fwstatus(int(app_id), False)
return True
gobject.type_register(Win32Filtering)
gobject.type_register(Win32Firewall)
|
hychen/gnome-nanny
|
daemon/src/Win32Filtering.py
|
Python
|
gpl-2.0
| 10,565
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
""" tests for sisl/geom """
|
zerothi/sisl
|
sisl/geom/tests/__init__.py
|
Python
|
mpl-2.0
| 228
|
#!/usr/bin/env ${python_interpreter}
import os
import sys
import unittest
import numpy
# This is a hack for nw. What this does is, add the build directory to python's
# path, so that it can find the module mtca4u.
sys.path.insert(0,os.path.abspath(os.curdir))
import mtca4u
import versionnumbers as vn
#to lock the mtcadummy driver against simulteneous usage by other tests
import fcntl
class TestPCIEDevice(unittest.TestCase):
# TODO: Refactor to take care of the harcoded values used for comparisions
def setUp(self):
mtca4u.set_dmap_location("deviceInformation/exampleCrate.dmap")
def testRead(self):
self.__prepareDataOnCards()
device = mtca4u.Device("sdm://./pci:mtcadummys1=deviceInformation/mtcadummy.map")
self.__testRead(device, "", device.read)
device = mtca4u.Device("sdm://./pci:mtcadummys1=deviceInformation/modular_mtcadummy.map")
self.__testRead(device, "BOARD", device.read)
device = mtca4u.Device("CARD_WITH_MODULES")
self.__testRead(device, "BOARD", device.read)
device = mtca4u.Device("CARD_WITH_OUT_MODULES")
self.__testRead(device, "", device.read)
def testWrite(self):
device = mtca4u.Device("sdm://./pci:mtcadummys1=deviceInformation/mtcadummy.map")
self.__testWrite(device, "", device.write)
device = mtca4u.Device("sdm://./pci:mtcadummys1=deviceInformation/modular_mtcadummy.map")
self.__testWrite(device, "BOARD", device.write)
device = mtca4u.Device("CARD_WITH_MODULES")
self.__testWrite(device, "BOARD", device.write)
device = mtca4u.Device("CARD_WITH_OUT_MODULES")
self.__testWrite(device, "", device.write)
def testReadRaw(self):
self.__prepareDataOnCards()
device = mtca4u.Device("sdm://./pci:mtcadummys1=deviceInformation/mtcadummy.map")
self.__testRead(device, "", device.read_raw)
device = mtca4u.Device("sdm://./pci:mtcadummys1=deviceInformation/modular_mtcadummy.map")
self.__testRead(device, "BOARD", device.read_raw)
device = mtca4u.Device("CARD_WITH_OUT_MODULES")
self.__testRead(device, "", device.read_raw)
device = mtca4u.Device("CARD_WITH_MODULES")
self.__testRead(device, "BOARD", device.read_raw)
def testwriteRaw(self):
device = mtca4u.Device("sdm://./pci:mtcadummys1=deviceInformation/mtcadummy.map")
self.__testWrite(device, "", device.write_raw)
device = mtca4u.Device("sdm://./pci:mtcadummys1=deviceInformation/modular_mtcadummy.map")
self.__testWrite(device, "BOARD", device.write_raw)
device = mtca4u.Device("CARD_WITH_OUT_MODULES")
self.__testWrite(device, "", device.write_raw)
device = mtca4u.Device("CARD_WITH_MODULES")
self.__testWrite(device, "BOARD", device.write_raw)
def testreadDMARaw(self):
device = mtca4u.Device("sdm://./pci:mtcadummys1=deviceInformation/mtcadummy.map")
self.__testreadDMARaw(device, "")
device = mtca4u.Device("sdm://./pci:mtcadummys1=deviceInformation/modular_mtcadummy.map")
self.__testreadDMARaw(device, "BOARD")
device = mtca4u.Device("CARD_WITH_OUT_MODULES")
self.__testreadDMARaw(device, "")
device = mtca4u.Device("CARD_WITH_MODULES")
self.__testreadDMARaw(device, "BOARD")
def testReadSequences(self):
device = mtca4u.Device("sdm://./pci:mtcadummys1=deviceInformation/mtcadummy.map")
self.__testSequences(device, "")
device = mtca4u.Device("sdm://./pci:mtcadummys1=deviceInformation/modular_mtcadummy.map")
self.__testSequences(device, "BOARD")
device = mtca4u.Device("CARD_WITH_OUT_MODULES")
self.__testSequences(device, "")
device = mtca4u.Device("CARD_WITH_MODULES")
self.__testSequences(device, "BOARD")
# http://stackoverflow.com/questions/4219717/how-to-assert-output-with-nosetest-unittest-in-python
def testGetInfo(self):
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
expectedString = "mtca4uPy v" +vn.moduleVersion + ", linked with mtca4u-deviceaccess v"+ vn.deviceaccessVersion
outStream = StringIO()
mtca4u.get_info(outStream)
returnedString = outStream.getvalue().strip()
self.assertTrue(expectedString == returnedString)
def testException(self):
device = mtca4u.mtca4udeviceaccess.createDevice("sdm://./pci:mtcadummys1=deviceInformation/modular_mtcadummy.map")
array = numpy.array([1, 2, 3, 4], dtype = numpy.int32)
self.assertRaisesRegexp(RuntimeError, "size to write is more than the supplied array size",
device.writeRaw, 'BOARD.WORD_STATUS', 0, array, (array.size * 4) + 1)
def testDeviceCreation(self):
with self.assertRaises(RuntimeError):
mtca4u.Device("NON_EXISTENT_ALIAS_NAME") # Not testing for the printed
# message. This comes from the
# device access library and the
# text has been chainging to
# often/ can change in the
# future. What is being
# prioritized is to check that we
# have an exception for an
# incorrect usage.
self.assertRaisesRegexp(RuntimeError, "Cannot open file \"NON_EXISTENT_MAPFILE\"", mtca4u.Device,
"sdm://./pci:mtcadummys1=NON_EXISTENT_MAPFILE")
self.assertRaisesRegexp(SyntaxError, "Syntax Error: please see help\(mtca4u.Device\) for usage instructions.",
mtca4u.Device)
self.assertRaisesRegexp(SyntaxError, "Syntax Error: please see help\(mtca4u.Device\) for usage instructions.",
mtca4u.Device, "BogusText", "BogusText", "BogusText")
dmapFilePath = mtca4u.get_dmap_location()
mtca4u.set_dmap_location("")
with self.assertRaises(RuntimeError):
mtca4u.Device("CARD_WITH_OUT_MODULES")
def testSetGetDmapfile(self):
# set by the test setUp method
self.assertTrue(mtca4u.get_dmap_location() == "deviceInformation/exampleCrate.dmap")
"""
The idea here is to preset data on registers that is then read in and
verified later. The following registers on each card are set:
- WORD_STATUS (Offset: 8)
- WORD_CLK_MUX (Offset: 32)
- WORD_INCOMPLETE_2 (Offset: 100)
The memory map for each device has been kept identical. The map files all
contain unique register names which are at the same address on each card
(despite being in different modules on individual cards).
A copy of the data that gets written is stored in these variables:
- word_status_content
- word_clk_mux_content
- word_incomplete_content
"""
def __prepareDataOnCards(self):
self.__prepareDataToWrite()
self.__writeDataToDevices()
def __prepareDataToWrite(self):
self.word_status_content = self.__createRandomArray(1)
self.word_clk_mux_content = self.__createRandomArray(4)
self.word_incomplete_2_content = numpy.array([544], dtype = numpy.int32)
def __writeDataToDevices(self):
# Test Read from a module register
# set up the register with a known values
device = mtca4u.mtca4udeviceaccess.createDevice("sdm://./pci:mtcadummys1=deviceInformation/modular_mtcadummy.map")
self.__preSetValuesOnCard(device, True)
device = mtca4u.mtca4udeviceaccess.createDevice("sdm://./pci:llrfdummys4=deviceInformation/mtcadummy.map")
self.__preSetValuesOnCard(device)
def __createRandomArray(self, arrayLength):
array = numpy.random.randint(0, 1073741824, arrayLength)
return array.astype(numpy.int32)
def __preSetValuesOnCard(self, device, modular=False):
if modular == False:
word_status = 'WORD_STATUS'
word_clk_mux = 'WORD_CLK_MUX'
word_incomplete_2 = 'WORD_INCOMPLETE_2'
else:
word_status = 'BOARD.WORD_STATUS'
word_clk_mux = 'BOARD.WORD_CLK_MUX'
word_incomplete_2 = 'BOARD.WORD_INCOMPLETE_2'
bytesToWrite = self.word_status_content.size * 4 # 1 32 bit word -> 1 element
device.writeRaw(word_status, 0, self.word_status_content,
bytesToWrite)
bytesToWrite = self.word_clk_mux_content.size * 4
device.writeRaw(word_clk_mux, 0, self.word_clk_mux_content, bytesToWrite)
bytesToWrite = self.word_incomplete_2_content.size * 4
device.writeRaw(word_incomplete_2, 0, self.word_incomplete_2_content, bytesToWrite)
def __testRead(self, device, module, readCommand):
dtype = self.__getDtypeToUse(device, readCommand)
word_status_content = self.word_status_content.astype(dtype)
word_clk_mux_content = self.word_clk_mux_content.astype(dtype)
# Test the read from module functionality
readInValues = readCommand(str(module), "WORD_STATUS")
self.assertTrue(readInValues.dtype == dtype)
self.assertTrue(numpy.array_equiv(readInValues, word_status_content))
readInValues = readCommand(registerPath = '/' + str(module)+ "/WORD_STATUS")
self.assertTrue(readInValues.dtype == dtype)
self.assertTrue(numpy.array_equiv(readInValues, word_status_content))
# This section checks the read register code for the Device class
# check if function reads values correctly
# Run this only for device.read and not device.read_raw
if readCommand == device.read:
readInValues = readCommand(str(module), "WORD_INCOMPLETE_2")
self.assertTrue(readInValues.dtype == dtype)
self.assertTrue(readInValues.tolist() == [2.125])
readInValues = readCommand(str(module), "WORD_CLK_MUX")
self.assertTrue(readInValues.dtype == dtype)
self.assertTrue(numpy.array_equiv(readInValues, word_clk_mux_content))
readInValues = readCommand(str(module), "WORD_CLK_MUX", 1)
self.assertTrue(readInValues[0] == word_clk_mux_content[0])
readInValues = readCommand(str(module), "WORD_CLK_MUX", 1, 2)
self.assertTrue(readInValues[0] == word_clk_mux_content[2])
readInValues = readCommand(str(module), "WORD_CLK_MUX", 2, 2)
self.assertTrue(numpy.array_equiv(readInValues, word_clk_mux_content[2:]))
# check for corner cases
# Register Not Found
# hack
exceptionMessage = self.__returnRegisterNotFoundExceptionMsg(module, "BAD_REGISTER_NAME")
self.assertRaisesRegexp(RuntimeError, exceptionMessage, readCommand, str(module),
"BAD_REGISTER_NAME")
# Num of elements specified is more than register size
registerName = "WORD_CLK_MUX"
elementsToRead = 5
offset = 2
self.assertRaises(RuntimeError, readCommand, str(module) ,registerName, elementsToRead, offset)
# bad value for number of elements
self.assertRaises(OverflowError,
readCommand,
str(module),
registerName,
numberOfElementsToRead=-1)
# offset exceeds register size
offset = 5
elementsToRead = 5
self.assertRaises(RuntimeError,
readCommand, str(module),
registerName, elementIndexInRegister = offset)
def __testWrite(self, device, module, writeCommand ):
module = str(module)
dtype = self.__getDtypeToUse(device, writeCommand)
if writeCommand == device.write:
readCommand = device.read
else:
readCommand = device.read_raw
word_status_content = self.__createRandomArray(1).astype(dtype)
word_clk_mux_content = self.__createRandomArray(4).astype(dtype)
writeCommand(module, "WORD_STATUS", word_status_content)
readInValues = readCommand(module, "WORD_STATUS")
self.assertTrue(readInValues.dtype == dtype)
self.assertTrue(numpy.array_equiv(readInValues, word_status_content))
# test register path
writeCommand(registerPath = '/' + str(module) + '/WORD_STATUS', dataToWrite=word_status_content)
readInValues = readCommand(module, "WORD_STATUS")
self.assertTrue(readInValues.dtype == dtype)
self.assertTrue(numpy.array_equiv(readInValues, word_status_content))
# These set of commands will be run for Device.write only
word_incomplete_register = "WORD_INCOMPLETE_2"
if writeCommand == device.write:
# write to WORD_INCOMPLETE_2, this is 13 bits wide and supports 8
# fractional bits
# check the write functionality
# check functionalty when using dtype numpy.float32
writeCommand(module, word_incomplete_register,
numpy.array([2.125], dtype))
readInValue = readCommand(module, word_incomplete_register)
self.assertTrue(readInValue.dtype == numpy.float64)
self.assertTrue(readInValue.tolist() == [2.125])
# check functionalty when using dtype numpy.float64
writeCommand(module, word_incomplete_register,
numpy.array([3.125], dtype = numpy.float64))
readInValue = readCommand(module, word_incomplete_register)
self.assertTrue(readInValue.dtype == dtype)
self.assertTrue(readInValue.tolist() == [3.125])
# check functionalty when using dtype numpy.int32
writeCommand(module, word_incomplete_register,
numpy.array([2], dtype = numpy.int32))
readInValue = readCommand(module, word_incomplete_register)
self.assertTrue(readInValue.dtype == dtype)
self.assertTrue(readInValue.tolist() == [2.])
# check functionalty when using dtype numpy.int64
writeCommand(module, word_incomplete_register,
numpy.array([25], dtype = numpy.int64))
readInValue = readCommand(module, word_incomplete_register)
self.assertTrue(readInValue.dtype == dtype)
self.assertTrue(readInValue.tolist() == [15.99609375]) # This is the
# valid fp converted
# value of int 25
# for this reg
writeCommand(module, word_incomplete_register,[2.5])
readInValues = readCommand(module, word_incomplete_register)
self.assertTrue(readInValues.tolist() == [2.5])
# continue tests for checking if method accepts int/float/list/numpyarray as valid dataToWrite
# input a list
writeCommand(module, "WORD_CLK_MUX", word_status_content, 1)
readInValues = readCommand(module, "WORD_CLK_MUX", 1, 1)
self.assertTrue(numpy.array_equiv(readInValues, word_status_content))
writeCommand(module, word_incomplete_register, 3.5)
readInValues = readCommand(module, word_incomplete_register)
self.assertTrue(readInValues.tolist() == [3.5])
writeCommand(module, word_incomplete_register, 14)
readInValues = readCommand(module, word_incomplete_register)
self.assertTrue(readInValues.tolist() == [14])
writeCommand(module, "WORD_CLK_MUX", 5)
readInValues = readCommand(module, "WORD_CLK_MUX", 1, 0)
self.assertTrue(readInValues.tolist() == [5])
self.assertRaisesRegexp(RuntimeError, "Data format used is unsupported",
writeCommand, module, word_incomplete_register,
"")
# Test for Unsupported dtype eg. dtype = numpy.int8
self.assertRaisesRegexp(RuntimeError, "Data format used is unsupported",
writeCommand, module, word_incomplete_register,
numpy.array([2], dtype = numpy.int8))
# check offset functionality
writeCommand(module, "WORD_CLK_MUX", word_clk_mux_content)
readInValues = readCommand(module, "WORD_CLK_MUX")
self.assertTrue(numpy.array_equiv(readInValues, word_clk_mux_content))
word_clk_mux_register = "WORD_CLK_MUX"
writeCommand(module, word_clk_mux_register, word_clk_mux_content[0:2],
elementIndexInRegister = 2)
readInValue = readCommand(module, word_clk_mux_register,numberOfElementsToRead = 2,
elementIndexInRegister = 2)
self.assertTrue(readInValue.dtype == dtype)
self.assertTrue(numpy.array_equiv(readInValue, word_clk_mux_content[0:2]))
# Check corner cases
# Bogus register name
exceptionMessage = self.__returnRegisterNotFoundExceptionMsg(module, "BAD_REGISTER_NAME")
self.assertRaisesRegexp(RuntimeError, exceptionMessage, writeCommand, module,
"BAD_REGISTER_NAME",
numpy.array([2.125], dtype = dtype))
# supplied array size exceeds register capacity: !regex /BOARD can be there 1 o 0 times. () and ? have special meaning in regex.
self.assertRaisesRegexp(RuntimeError, "Requested number of words exceeds the size of the register '(/BOARD)?/WORD_INCOMPLETE_2'!",
writeCommand, module, word_incomplete_register,
word_clk_mux_content)
# supplied offset exceeds register span
self.assertRaises(RuntimeError, writeCommand, module,
word_incomplete_register, word_clk_mux_content,
elementIndexInRegister=1)
# write nothing
initialValue = readCommand(module, "WORD_STATUS")
writeCommand(module,"WORD_STATUS", numpy.array([], dtype = dtype))
valueAfterEmptyWrite = readCommand(module, "WORD_STATUS")
self.assertTrue(numpy.array_equiv(initialValue, valueAfterEmptyWrite))
def __returnRegisterNotFoundExceptionMsg(self, module, registerName):
if not str(module):
exceptionMessage = "Cannot find register " + str(registerName) + \
" in map file: deviceInformation/mtcadummy.map"
else:
exceptionMessage = "Cannot find register " + str(module) + "." + str(registerName) + \
" in map file: deviceInformation/modular_mtcadummy.map"
def __getDtypeToUse(self, device, command):
if command == device.read or command == device.write:
return numpy.float64
elif command == device.read_raw or command == device.write_raw:
return numpy.int32
def __testreadDMARaw(self, device, module):
module = str(module)
# Set the parabolic values in the DMA region by writing 1 to WORD_ADC_ENA
# register
device.write(module, "WORD_ADC_ENA", numpy.array([1], dtype = numpy.float32))
# Read in the parabolic values from the function
readInValues = device.read_dma_raw(module, "AREA_DMA_VIA_DMA",
numberOfElementsToRead= 10)
self.assertTrue(readInValues.dtype == numpy.int32)
self.assertTrue(readInValues.tolist() == [0, 1, 4, 9, 16, 25, 36, 49, 64, 81])
# Check offset read
readInValues = device.read_dma_raw(module, "AREA_DMA_VIA_DMA",
numberOfElementsToRead=10,
elementIndexInRegister=3)
self.assertTrue(readInValues.dtype == numpy.int32)
self.assertTrue(readInValues.tolist() == [9, 16, 25, 36, 49, 64, 81, 100, \
121, 144])
# corner cases:
# bad register name
exceptionText = self.__returnRegisterNotFoundExceptionMsg(module, "BAD_REGISTER_NAME")
# bad element size
# bad offset
# FIXME: Not checking this; size of AREA_DMA_VIA_DMA is big 1024 elements
def __testSequences(self, device, module):
module = str(module)
# Basic Interface: Currently supports read of all sequences only
#device.write("", "WORD_ADC_ENA", 1)
# Arrange the data on the card:
predefinedSequence = numpy.array([0x00010000,
0x00030002,
0x00050004,
0x00070006,
0x00090008,
0x000b000a,
0x000d000c,
0x000f000e,
0x00110010,
0x00130012,
0x00150014,
0x00170016], dtype=numpy.int32)
device.write_raw(module, 'AREA_DMAABLE', predefinedSequence)
expectedMatrix = numpy.array([[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]], dtype=numpy.double)
readInMatrix = device.read_sequences(module, 'DMA')
self.assertTrue(numpy.array_equiv(readInMatrix, expectedMatrix))
self.assertTrue(readInMatrix.dtype == numpy.double)
readInMatrix = device.read_sequences(registerPath = '/' + str(module)+ '/DMA')
self.assertTrue(numpy.array_equiv(readInMatrix, expectedMatrix))
self.assertTrue(readInMatrix.dtype == numpy.double)
# Check that 32 bit data can be transfered without precision loss (Hack by using double.
# This is not clean, but works sufficiently well.)
predefinedSequence = numpy.array([0x12345678, 0x90abcdef, 0xa5a5a5a5,
0x5a5a5a5a, 0xffeeffee, 0xcc33cc33,
0x33cc33cc, 0xdeadbeef, 0x87654321,
0xfdecba09, 0xb0b00b0b, 0x73533537], dtype=numpy.int32)
device.write_raw(module, 'AREA_MULTIPLEXED_SEQUENCE_UNSIGNED_INT', predefinedSequence)
# Use dtype=numpy.int32 to make sure we don't have rounding errors in the expected values.
# The comparison array_equiv still works, even if we compare to a different dtype.
expectedMatrix = numpy.array([[0x12345678, 0x90abcdef, 0xa5a5a5a5],
[0x5a5a5a5a, 0xffeeffee, 0xcc33cc33],
[0x33cc33cc, 0xdeadbeef, 0x87654321],
[0xfdecba09, 0xb0b00b0b, 0x73533537]], dtype=numpy.uint32)
readInMatrix = device.read_sequences(module, 'UNSIGNED_INT')
self.assertTrue(numpy.array_equiv(readInMatrix, expectedMatrix))
self.assertTrue(readInMatrix.dtype == numpy.double)
if __name__ == '__main__':
#Lock the kernel driver dummy against simultaneous usage
#First make sure the directory is there. Otherwise the locking command will fail
try:
os.makedirs('/var/run/lock/mtcadummy')
except OSError:
#We will end up here if the directory exists. This is ok.
#Only raise if the directory is not there.
if not os.path.isdir('/var/run/lock/mtcadummy'):
raise
s1 = open('/var/run/lock/mtcadummy/mtcadummys1','w+')
fcntl.flock(s1, fcntl.LOCK_EX)
s4 = open('/var/run/lock/mtcadummy/llrfdummys4','w+')
fcntl.flock(s4, fcntl.LOCK_EX)
unittest.main()
fcntl.flock(s1, fcntl.LOCK_UN)
fcntl.flock(s4, fcntl.LOCK_UN)
|
ChimeraTK/DeviceAccess-PythonBindings
|
tests/testMtca4upy.py
|
Python
|
lgpl-3.0
| 22,852
|
import sys
from robotremoteserver import RobotRemoteServer
from sikuli import *
class SikuliRemoteLibrary:
def __init__(self):
self.SS = Screen()
self.PT = Pattern()
def _wait(self, imgFile, timeOut, similarity):
try:
self.PT = Pattern(imgFile)
self.PT = self.PT.similar(float(similarity))
self.SS.wait(self.PT, float(timeOut))
except FindFailed, err:
print "ERR: _wait"
raise AssertionError(err)
def click_object(self, imgFile, timeOut, similarity):
try:
self._wait(imgFile, timeOut, similarity)
self.SS.click(imgFile)
except FindFailed, err:
raise AssertionError("Cannot click [" + imgFile + "]")
def object_exists(self, imgFile, similarity, timeOut):
try:
self._wait(imgFile, timeOut, similarity)
except FindFailed, err:
raise AssertionError("Could not find [" + imgFile + "]")
def type_at_object(self, imgFile, txt, timeOut, similarity):
try:
self._wait(imgFile, timeOut, similarity)
self.SS.type(imgFile, txt)
except FindFailed, err:
raise AssertionError("Cannot type at [" + imgFile + "]")
def paste_at_object(self, imgFile, txt, timeOut, similarity):
try:
self._wait(imgFile, timeOut, similarity)
self.SS.paste(imgFile, txt)
except FindFailed, err:
raise AssertionError("Cannot paste at [" + imgFile + "]")
if __name__ == '__main__':
SRL = SikuliRemoteLibrary()
RobotRemoteServer(SRL, *sys.argv[1:])
|
priyesingh/rijenpy
|
libs/SikuliRemoteLibrary.py
|
Python
|
gpl-3.0
| 1,635
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend sinertalers received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a sinertalerd or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting SIN values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the sinertaler data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.sinertaler")
def read_sinertaler_config(dbdir):
"""Read the sinertaler.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "sinertaler.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a sinertaler JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 49876 if testnet else 39876
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the sinertalerd we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(sinertalerd):
info = sinertalerd.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
sinertalerd.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = sinertalerd.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(sinertalerd):
address_summary = dict()
address_to_account = dict()
for info in sinertalerd.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = sinertalerd.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = sinertalerd.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-sinertaler-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(sinertalerd, fromaddresses, toaddress, amount, fee):
all_coins = list_available(sinertalerd)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f SIN available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to sinertalerd.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = sinertalerd.createrawtransaction(inputs, outputs)
signed_rawtx = sinertalerd.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(sinertalerd, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = sinertalerd.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(sinertalerd, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = sinertalerd.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(sinertalerd, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get sinertalers from")
parser.add_option("--to", dest="to", default=None,
help="address to get send sinertalers to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of sinertaler.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_sinertaler_config(options.datadir)
if options.testnet: config['testnet'] = True
sinertalerd = connect_JSON(config)
if options.amount is None:
address_summary = list_available(sinertalerd)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(sinertalerd) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(sinertalerd, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(sinertalerd, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = sinertalerd.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
fourty2/sinertaler
|
contrib/spendfrom/spendfrom.py
|
Python
|
mit
| 10,168
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-20 22:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('quiz_view', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Answers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('answer_text', models.CharField(max_length=255, unique=True)),
('weight', models.IntegerField(default=0)),
('choice_int', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Questions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=255, unique=True)),
],
),
migrations.CreateModel(
name='Quiz',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, unique=True)),
],
),
migrations.AddField(
model_name='questions',
name='quizes',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='quiz_view.Quiz'),
),
migrations.AddField(
model_name='answers',
name='questions',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='quiz_view.Questions'),
),
]
|
tetsucceed/quiz-app
|
qapp/quiz_view/migrations/0002_auto_20170920_2216.py
|
Python
|
mit
| 1,785
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
from datetime import datetime
from helpers.loghelpers import LOG
from .trigger import Trigger
from .triggertype import TriggerType
from validators.validators import valid_amount, valid_timestamp
class RecurringTrigger(Trigger):
def __init__(self, trigger_id):
super(RecurringTrigger, self).__init__(trigger_id=trigger_id)
self.trigger_type = TriggerType.RECURRING
self.next_activation = None
self.begin_time = None
self.end_time = None
self.interval = None
def conditions_fulfilled(self):
if self.interval is None or self.begin_time is None:
return False
if self.end_time is None:
return self.next_activation <= int(time.time())
elif self.end_time <= int(time.time()):
LOG.info('Recurring trigger %s has reached its end time' % self.id)
self.status = 'Succeeded'
self.save()
return False
return self.next_activation <= int(time.time()) <= self.end_time
def activate(self):
super(RecurringTrigger, self).activate()
if self.end_time is None or self.next_activation + self.interval <= self.end_time:
self.next_activation += self.interval # Todo what if trigger was activated after interval has passed??
LOG.info('Setting next activation of recurring trigger %s to %s' % (self.id, datetime.fromtimestamp(self.next_activation)))
self.save()
def configure(self, **config):
super(RecurringTrigger, self).configure(**config)
if 'interval' in config and valid_amount(config['interval']):
self.interval = config['interval']
if 'begin_time' in config and valid_timestamp(config['begin_time']):
self.begin_time = config['begin_time']
if 'end_time' in config and valid_timestamp(config['end_time']):
self.end_time = config['end_time']
if 'next_activation' in config and valid_timestamp(config['next_activation']):
self.next_activation = config['next_activation']
elif self.begin_time is not None:
self.next_activation = self.begin_time
LOG.info('Setting first activation of recurring trigger %s to %s' % (self.id, datetime.fromtimestamp(self.next_activation)))
self.multi = True
def json_encodable(self):
ret = super(RecurringTrigger, self).json_encodable()
ret.update({
'begin_time': self.begin_time,
'end_time': self.end_time,
'interval': self.interval,
'next_activation': self.next_activation})
return ret
|
ValyrianTech/BitcoinSpellbook-v0.3
|
trigger/recurringtrigger.py
|
Python
|
gpl-3.0
| 2,696
|
# This file is part of CherryPy <http://www.cherrypy.org/>
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:expandtab:fileencoding=utf-8
from hashlib import md5
import cherrypy
from cherrypy._cpcompat import ntob
from cherrypy.lib import auth_basic
from cherrypy.test import helper
class BasicAuthTest(helper.CPWebCase):
@staticmethod
def setup_server():
class Root:
@cherrypy.expose
def index(self):
return 'This is public.'
class BasicProtected:
@cherrypy.expose
def index(self):
return "Hello %s, you've been authorized." % (
cherrypy.request.login)
class BasicProtected2:
@cherrypy.expose
def index(self):
return "Hello %s, you've been authorized." % (
cherrypy.request.login)
userpassdict = {'xuser': 'xpassword'}
userhashdict = {'xuser': md5(b'xpassword').hexdigest()}
def checkpasshash(realm, user, password):
p = userhashdict.get(user)
return p and p == md5(ntob(password)).hexdigest() or False
basic_checkpassword_dict = auth_basic.checkpassword_dict(userpassdict)
conf = {
'/basic': {
'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'wonderland',
'tools.auth_basic.checkpassword': basic_checkpassword_dict
},
'/basic2': {
'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'wonderland',
'tools.auth_basic.checkpassword': checkpasshash
},
}
root = Root()
root.basic = BasicProtected()
root.basic2 = BasicProtected2()
cherrypy.tree.mount(root, config=conf)
def testPublic(self):
self.getPage('/')
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/html;charset=utf-8')
self.assertBody('This is public.')
def testBasic(self):
self.getPage('/basic/')
self.assertStatus(401)
self.assertHeader('WWW-Authenticate', 'Basic realm="wonderland"')
self.getPage('/basic/',
[('Authorization', 'Basic eHVzZXI6eHBhc3N3b3JX')])
self.assertStatus(401)
self.getPage('/basic/',
[('Authorization', 'Basic eHVzZXI6eHBhc3N3b3Jk')])
self.assertStatus('200 OK')
self.assertBody("Hello xuser, you've been authorized.")
def testBasic2(self):
self.getPage('/basic2/')
self.assertStatus(401)
self.assertHeader('WWW-Authenticate', 'Basic realm="wonderland"')
self.getPage('/basic2/',
[('Authorization', 'Basic eHVzZXI6eHBhc3N3b3JX')])
self.assertStatus(401)
self.getPage('/basic2/',
[('Authorization', 'Basic eHVzZXI6eHBhc3N3b3Jk')])
self.assertStatus('200 OK')
self.assertBody("Hello xuser, you've been authorized.")
|
Southpaw-TACTIC/TACTIC
|
3rd_party/python2/site-packages/cherrypy/test/test_auth_basic.py
|
Python
|
epl-1.0
| 3,034
|
'''
ÔÚ Python1.6 ºÍºó¼Ì°æ±¾£¬¸ü¶àµÄ×Ö·û´®²Ù×÷¶¼¿ÉÒÔ×÷Ϊ×Ö·û´®·½·¨À´·ÃÎÊ,
Èç ÏÂÀý Ëùʾ, string Ä£¿éÖеÄÐí¶àº¯ÊýÖ»ÊǶÔÏà¶ÔÓ¦×Ö·û´®·½·¨µÄ·â×°.
'''
text = "Monty Python's Flying Circus"
print "upper", "=>", text.upper()
print "lower", "=>", text.lower()
print "split", "=>", text.split()
print "join", "=>", "+".join(text.split())
print "replace", "=>", text.replace("Python", "Perl")
print "find", "=>", text.find("Python"), text.find("Perl")
print "count", "=>", text.count("n")
|
iamweilee/pylearn
|
string-example-2.py
|
Python
|
mit
| 478
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.utils import filters
POLICY_CHECK = getattr(settings, "POLICY_CHECK_FUNCTION",
lambda policy, request, target: True)
class DeleteGroup(tables.DeleteAction):
data_type_singular = _("Security Group")
data_type_plural = _("Security Groups")
def get_policy_target(self, request, datum=None):
project_id = None
if datum:
project_id = getattr(datum, 'tenant_id', None)
return {"project_id": project_id}
def allowed(self, request, security_group=None):
policy_target = self.get_policy_target(request, security_group)
if api.base.is_service_enabled(request, "network"):
policy = (("network", "delete_security_group"),)
else:
policy = (("compute", "compute_extension:security_groups"),)
if not POLICY_CHECK(policy, request, policy_target):
return False
if not security_group:
return True
return security_group.name != 'default'
def delete(self, request, obj_id):
api.network.security_group_delete(request, obj_id)
class CreateGroup(tables.LinkAction):
name = "create"
verbose_name = _("Create Security Group")
url = "horizon:project:access_and_security:security_groups:create"
classes = ("ajax-modal",)
icon = "plus"
def allowed(self, request, security_group=None):
if api.base.is_service_enabled(request, "network"):
policy = (("network", "create_security_group"),)
else:
policy = (("compute", "compute_extension:security_groups"),)
return POLICY_CHECK(policy, request, target={})
class EditGroup(tables.LinkAction):
name = "edit"
verbose_name = _("Edit Security Group")
url = "horizon:project:access_and_security:security_groups:update"
classes = ("ajax-modal",)
icon = "pencil"
def get_policy_target(self, request, datum=None):
project_id = None
if datum:
project_id = getattr(datum, 'tenant_id', None)
return {"project_id": project_id}
def allowed(self, request, security_group=None):
policy_target = self.get_policy_target(request, security_group)
if api.base.is_service_enabled(request, "network"):
policy = (("network", "update_security_group"),)
else:
policy = (("compute", "compute_extension:security_groups"),)
if not POLICY_CHECK(policy, request, policy_target):
return False
if not security_group:
return True
return security_group.name != 'default'
class ManageRules(tables.LinkAction):
name = "manage_rules"
verbose_name = _("Manage Rules")
url = "horizon:project:access_and_security:security_groups:detail"
icon = "pencil"
def get_policy_target(self, request, datum=None):
project_id = None
if datum:
project_id = getattr(datum, 'tenant_id', None)
return {"project_id": project_id}
def allowed(self, request, security_group=None):
policy_target = self.get_policy_target(request, security_group)
if api.base.is_service_enabled(request, "network"):
policy = (("network", "get_security_group"),)
else:
policy = (("compute", "compute_extension:security_groups"),)
return POLICY_CHECK(policy, request, policy_target)
class SecurityGroupsTable(tables.DataTable):
name = tables.Column("name", verbose_name=_("Name"))
description = tables.Column("description", verbose_name=_("Description"))
def sanitize_id(self, obj_id):
return filters.get_int_or_uuid(obj_id)
class Meta:
name = "security_groups"
verbose_name = _("Security Groups")
table_actions = (CreateGroup, DeleteGroup)
row_actions = (ManageRules, EditGroup, DeleteGroup)
class CreateRule(tables.LinkAction):
name = "add_rule"
verbose_name = _("Add Rule")
url = "horizon:project:access_and_security:security_groups:add_rule"
classes = ("ajax-modal",)
icon = "plus"
def allowed(self, request, security_group_rule=None):
if api.base.is_service_enabled(request, "network"):
policy = (("network", "create_security_group_rule"),)
else:
policy = (("compute", "compute_extension:security_groups"),)
return POLICY_CHECK(policy, request, target={})
def get_link_url(self):
return reverse(self.url, args=[self.table.kwargs['security_group_id']])
class DeleteRule(tables.DeleteAction):
data_type_singular = _("Rule")
data_type_plural = _("Rules")
def allowed(self, request, security_group_rule=None):
if api.base.is_service_enabled(request, "network"):
policy = (("network", "delete_security_group_rule"),)
else:
policy = (("compute", "compute_extension:security_groups"),)
return POLICY_CHECK(policy, request, target={})
def delete(self, request, obj_id):
api.network.security_group_rule_delete(request, obj_id)
def get_success_url(self, request):
sg_id = self.table.kwargs['security_group_id']
return reverse("horizon:project:access_and_security:"
"security_groups:detail", args=[sg_id])
def get_remote(rule):
if 'cidr' in rule.ip_range:
if rule.ip_range['cidr'] is None:
range = '::/0' if rule.ethertype == 'IPv6' else '0.0.0.0/0'
else:
range = rule.ip_range['cidr']
return range + ' (CIDR)'
elif 'name' in rule.group:
return rule.group['name']
else:
return None
def get_port_range(rule):
ip_proto = rule.ip_protocol
if rule.from_port == rule.to_port:
return check_rule_template(rule.from_port, ip_proto)
else:
return (u"%(from)s - %(to)s" %
{'from': check_rule_template(rule.from_port, ip_proto),
'to': check_rule_template(rule.to_port, ip_proto)})
def filter_direction(direction):
if direction is None or direction.lower() == 'ingress':
return _('Ingress')
else:
return _('Egress')
def filter_protocol(protocol):
if protocol is None:
return _('Any')
return unicode.upper(protocol)
def check_rule_template(port, ip_proto):
rules_dict = getattr(settings, 'SECURITY_GROUP_RULES', {})
if not rules_dict:
return port
templ_rule = filter(lambda rule: str(port) == rule['from_port']
and str(port) == rule['to_port']
and ip_proto == rule['ip_protocol'],
[rule for rule in rules_dict.values()])
if templ_rule:
return u"%(from_port)s (%(name)s)" % templ_rule[0]
return port
class RulesTable(tables.DataTable):
direction = tables.Column("direction",
verbose_name=_("Direction"),
filters=(filter_direction,))
ethertype = tables.Column("ethertype",
verbose_name=_("Ether Type"))
protocol = tables.Column("ip_protocol",
verbose_name=_("IP Protocol"),
filters=(filter_protocol,))
port_range = tables.Column(get_port_range,
verbose_name=_("Port Range"))
remote = tables.Column(get_remote, verbose_name=_("Remote"))
def sanitize_id(self, obj_id):
return filters.get_int_or_uuid(obj_id)
def get_object_display(self, rule):
return unicode(rule)
class Meta:
name = "rules"
verbose_name = _("Security Group Rules")
table_actions = (CreateRule, DeleteRule)
row_actions = (DeleteRule,)
|
JioCloud/horizon
|
openstack_dashboard/dashboards/project/access_and_security/security_groups/tables.py
|
Python
|
apache-2.0
| 8,526
|
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
try:
import json
except ImportError:
import simplejson as json
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.rds2 import exceptions
class RDSConnection(AWSQueryConnection):
"""
Amazon Relational Database Service
Amazon Relational Database Service (Amazon RDS) is a web service
that makes it easier to set up, operate, and scale a relational
database in the cloud. It provides cost-efficient, resizable
capacity for an industry-standard relational database and manages
common database administration tasks, freeing up developers to
focus on what makes their applications and businesses unique.
Amazon RDS gives you access to the capabilities of a familiar
MySQL or Oracle database server. This means the code,
applications, and tools you already use today with your existing
MySQL or Oracle databases work with Amazon RDS without
modification. Amazon RDS automatically backs up your database and
maintains the database software that powers your DB instance.
Amazon RDS is flexible: you can scale your database instance's
compute resources and storage capacity to meet your application's
demand. As with all Amazon Web Services, there are no up-front
investments, and you pay only for the resources you use.
This is the Amazon RDS API Reference . It contains a comprehensive
description of all Amazon RDS Query APIs and data types. Note that
this API is asynchronous and some actions may require polling to
determine when an action has been applied. See the parameter
description to determine if a change is applied immediately or on
the next instance reboot or during the maintenance window. For
more information on Amazon RDS concepts and usage scenarios, go to
the `Amazon RDS User Guide`_.
"""
APIVersion = "2013-09-09"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "rds.us-east-1.amazonaws.com"
ResponseError = JSONResponseError
_faults = {
"InvalidSubnet": exceptions.InvalidSubnet,
"DBParameterGroupQuotaExceeded": exceptions.DBParameterGroupQuotaExceeded,
"DBSubnetGroupAlreadyExists": exceptions.DBSubnetGroupAlreadyExists,
"DBSubnetGroupQuotaExceeded": exceptions.DBSubnetGroupQuotaExceeded,
"InstanceQuotaExceeded": exceptions.InstanceQuotaExceeded,
"InvalidRestore": exceptions.InvalidRestore,
"InvalidDBParameterGroupState": exceptions.InvalidDBParameterGroupState,
"AuthorizationQuotaExceeded": exceptions.AuthorizationQuotaExceeded,
"DBSecurityGroupAlreadyExists": exceptions.DBSecurityGroupAlreadyExists,
"InsufficientDBInstanceCapacity": exceptions.InsufficientDBInstanceCapacity,
"ReservedDBInstanceQuotaExceeded": exceptions.ReservedDBInstanceQuotaExceeded,
"DBSecurityGroupNotFound": exceptions.DBSecurityGroupNotFound,
"DBInstanceAlreadyExists": exceptions.DBInstanceAlreadyExists,
"ReservedDBInstanceNotFound": exceptions.ReservedDBInstanceNotFound,
"DBSubnetGroupDoesNotCoverEnoughAZs": exceptions.DBSubnetGroupDoesNotCoverEnoughAZs,
"InvalidDBSecurityGroupState": exceptions.InvalidDBSecurityGroupState,
"InvalidVPCNetworkState": exceptions.InvalidVPCNetworkState,
"ReservedDBInstancesOfferingNotFound": exceptions.ReservedDBInstancesOfferingNotFound,
"SNSTopicArnNotFound": exceptions.SNSTopicArnNotFound,
"SNSNoAuthorization": exceptions.SNSNoAuthorization,
"SnapshotQuotaExceeded": exceptions.SnapshotQuotaExceeded,
"OptionGroupQuotaExceeded": exceptions.OptionGroupQuotaExceeded,
"DBParameterGroupNotFound": exceptions.DBParameterGroupNotFound,
"SNSInvalidTopic": exceptions.SNSInvalidTopic,
"InvalidDBSubnetGroupState": exceptions.InvalidDBSubnetGroupState,
"DBSubnetGroupNotFound": exceptions.DBSubnetGroupNotFound,
"InvalidOptionGroupState": exceptions.InvalidOptionGroupState,
"SourceNotFound": exceptions.SourceNotFound,
"SubscriptionCategoryNotFound": exceptions.SubscriptionCategoryNotFound,
"EventSubscriptionQuotaExceeded": exceptions.EventSubscriptionQuotaExceeded,
"DBSecurityGroupNotSupported": exceptions.DBSecurityGroupNotSupported,
"InvalidEventSubscriptionState": exceptions.InvalidEventSubscriptionState,
"InvalidDBSubnetState": exceptions.InvalidDBSubnetState,
"InvalidDBSnapshotState": exceptions.InvalidDBSnapshotState,
"SubscriptionAlreadyExist": exceptions.SubscriptionAlreadyExist,
"DBSecurityGroupQuotaExceeded": exceptions.DBSecurityGroupQuotaExceeded,
"ProvisionedIopsNotAvailableInAZ": exceptions.ProvisionedIopsNotAvailableInAZ,
"AuthorizationNotFound": exceptions.AuthorizationNotFound,
"OptionGroupAlreadyExists": exceptions.OptionGroupAlreadyExists,
"SubscriptionNotFound": exceptions.SubscriptionNotFound,
"DBUpgradeDependencyFailure": exceptions.DBUpgradeDependencyFailure,
"PointInTimeRestoreNotEnabled": exceptions.PointInTimeRestoreNotEnabled,
"AuthorizationAlreadyExists": exceptions.AuthorizationAlreadyExists,
"DBSubnetQuotaExceeded": exceptions.DBSubnetQuotaExceeded,
"OptionGroupNotFound": exceptions.OptionGroupNotFound,
"DBParameterGroupAlreadyExists": exceptions.DBParameterGroupAlreadyExists,
"DBInstanceNotFound": exceptions.DBInstanceNotFound,
"ReservedDBInstanceAlreadyExists": exceptions.ReservedDBInstanceAlreadyExists,
"InvalidDBInstanceState": exceptions.InvalidDBInstanceState,
"DBSnapshotNotFound": exceptions.DBSnapshotNotFound,
"DBSnapshotAlreadyExists": exceptions.DBSnapshotAlreadyExists,
"StorageQuotaExceeded": exceptions.StorageQuotaExceeded,
"SubnetAlreadyInUse": exceptions.SubnetAlreadyInUse,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs:
kwargs['host'] = region.endpoint
super(RDSConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def add_source_identifier_to_subscription(self, subscription_name,
source_identifier):
"""
Adds a source identifier to an existing RDS event notification
subscription.
:type subscription_name: string
:param subscription_name: The name of the RDS event notification
subscription you want to add a source identifier to.
:type source_identifier: string
:param source_identifier:
The identifier of the event source to be added. An identifier must
begin with a letter and must contain only ASCII letters, digits,
and hyphens; it cannot end with a hyphen or contain two consecutive
hyphens.
Constraints:
+ If the source type is a DB instance, then a `DBInstanceIdentifier`
must be supplied.
+ If the source type is a DB security group, a `DBSecurityGroupName`
must be supplied.
+ If the source type is a DB parameter group, a `DBParameterGroupName`
must be supplied.
+ If the source type is a DB snapshot, a `DBSnapshotIdentifier` must be
supplied.
"""
params = {
'SubscriptionName': subscription_name,
'SourceIdentifier': source_identifier,
}
return self._make_request(
action='AddSourceIdentifierToSubscription',
verb='POST',
path='/', params=params)
def add_tags_to_resource(self, resource_name, tags):
"""
Adds metadata tags to an Amazon RDS resource. These tags can
also be used with cost allocation reporting to track cost
associated with Amazon RDS resources, or used in Condition
statement in IAM policy for Amazon RDS.
For an overview on tagging Amazon RDS resources, see `Tagging
Amazon RDS Resources`_.
:type resource_name: string
:param resource_name: The Amazon RDS resource the tags will be added
to. This value is an Amazon Resource Name (ARN). For information
about creating an ARN, see ` Constructing an RDS Amazon Resource
Name (ARN)`_.
:type tags: list
:param tags: The tags to be assigned to the Amazon RDS resource.
"""
params = {'ResourceName': resource_name, }
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='AddTagsToResource',
verb='POST',
path='/', params=params)
def authorize_db_security_group_ingress(self, db_security_group_name,
cidrip=None,
ec2_security_group_name=None,
ec2_security_group_id=None,
ec2_security_group_owner_id=None):
"""
Enables ingress to a DBSecurityGroup using one of two forms of
authorization. First, EC2 or VPC security groups can be added
to the DBSecurityGroup if the application using the database
is running on EC2 or VPC instances. Second, IP ranges are
available if the application accessing your database is
running on the Internet. Required parameters for this API are
one of CIDR range, EC2SecurityGroupId for VPC, or
(EC2SecurityGroupOwnerId and either EC2SecurityGroupName or
EC2SecurityGroupId for non-VPC).
You cannot authorize ingress from an EC2 security group in one
Region to an Amazon RDS DB instance in another. You cannot
authorize ingress from a VPC security group in one VPC to an
Amazon RDS DB instance in another.
For an overview of CIDR ranges, go to the `Wikipedia
Tutorial`_.
:type db_security_group_name: string
:param db_security_group_name: The name of the DB security group to add
authorization to.
:type cidrip: string
:param cidrip: The IP range to authorize.
:type ec2_security_group_name: string
:param ec2_security_group_name: Name of the EC2 security group to
authorize. For VPC DB security groups, `EC2SecurityGroupId` must be
provided. Otherwise, EC2SecurityGroupOwnerId and either
`EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.
:type ec2_security_group_id: string
:param ec2_security_group_id: Id of the EC2 security group to
authorize. For VPC DB security groups, `EC2SecurityGroupId` must be
provided. Otherwise, EC2SecurityGroupOwnerId and either
`EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.
:type ec2_security_group_owner_id: string
:param ec2_security_group_owner_id: AWS Account Number of the owner of
the EC2 security group specified in the EC2SecurityGroupName
parameter. The AWS Access Key ID is not an acceptable value. For
VPC DB security groups, `EC2SecurityGroupId` must be provided.
Otherwise, EC2SecurityGroupOwnerId and either
`EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.
"""
params = {'DBSecurityGroupName': db_security_group_name, }
if cidrip is not None:
params['CIDRIP'] = cidrip
if ec2_security_group_name is not None:
params['EC2SecurityGroupName'] = ec2_security_group_name
if ec2_security_group_id is not None:
params['EC2SecurityGroupId'] = ec2_security_group_id
if ec2_security_group_owner_id is not None:
params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id
return self._make_request(
action='AuthorizeDBSecurityGroupIngress',
verb='POST',
path='/', params=params)
def copy_db_snapshot(self, source_db_snapshot_identifier,
target_db_snapshot_identifier, tags=None):
"""
Copies the specified DBSnapshot. The source DBSnapshot must be
in the "available" state.
:type source_db_snapshot_identifier: string
:param source_db_snapshot_identifier: The identifier for the source DB
snapshot.
Constraints:
+ Must be the identifier for a valid system snapshot in the "available"
state.
Example: `rds:mydb-2012-04-02-00-01`
:type target_db_snapshot_identifier: string
:param target_db_snapshot_identifier: The identifier for the copied
snapshot.
Constraints:
+ Cannot be null, empty, or blank
+ Must contain from 1 to 255 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
Example: `my-db-snapshot`
:type tags: list
:param tags: A list of tags.
"""
params = {
'SourceDBSnapshotIdentifier': source_db_snapshot_identifier,
'TargetDBSnapshotIdentifier': target_db_snapshot_identifier,
}
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CopyDBSnapshot',
verb='POST',
path='/', params=params)
def create_db_instance(self, db_instance_identifier, allocated_storage,
db_instance_class, engine, master_username,
master_user_password, db_name=None,
db_security_groups=None,
vpc_security_group_ids=None,
availability_zone=None, db_subnet_group_name=None,
preferred_maintenance_window=None,
db_parameter_group_name=None,
backup_retention_period=None,
preferred_backup_window=None, port=None,
multi_az=None, engine_version=None,
auto_minor_version_upgrade=None,
license_model=None, iops=None,
option_group_name=None, character_set_name=None,
publicly_accessible=None, tags=None):
"""
Creates a new DB instance.
:type db_name: string
:param db_name: The meaning of this parameter differs according to the
database engine you use.
**MySQL**
The name of the database to create when the DB instance is created. If
this parameter is not specified, no database is created in the DB
instance.
Constraints:
+ Must contain 1 to 64 alphanumeric characters
+ Cannot be a word reserved by the specified database engine
Type: String
**Oracle**
The Oracle System ID (SID) of the created DB instance.
Default: `ORCL`
Constraints:
+ Cannot be longer than 8 characters
**SQL Server**
Not applicable. Must be null.
:type db_instance_identifier: string
:param db_instance_identifier: The DB instance identifier. This
parameter is stored as a lowercase string.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15
for SQL Server).
+ First character must be a letter.
+ Cannot end with a hyphen or contain two consecutive hyphens.
Example: `mydbinstance`
:type allocated_storage: integer
:param allocated_storage: The amount of storage (in gigabytes) to be
initially allocated for the database instance.
**MySQL**
Constraints: Must be an integer from 5 to 1024.
Type: Integer
**Oracle**
Constraints: Must be an integer from 10 to 1024.
**SQL Server**
Constraints: Must be an integer from 200 to 1024 (Standard Edition and
Enterprise Edition) or from 30 to 1024 (Express Edition and Web
Edition)
:type db_instance_class: string
:param db_instance_class: The compute and memory capacity of the DB
instance.
Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large |
db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge`
:type engine: string
:param engine: The name of the database engine to be used for this
instance.
Valid Values: `MySQL` | `oracle-se1` | `oracle-se` | `oracle-ee` |
`sqlserver-ee` | `sqlserver-se` | `sqlserver-ex` | `sqlserver-web`
:type master_username: string
:param master_username:
The name of master user for the client DB instance.
**MySQL**
Constraints:
+ Must be 1 to 16 alphanumeric characters.
+ First character must be a letter.
+ Cannot be a reserved word for the chosen database engine.
Type: String
**Oracle**
Constraints:
+ Must be 1 to 30 alphanumeric characters.
+ First character must be a letter.
+ Cannot be a reserved word for the chosen database engine.
**SQL Server**
Constraints:
+ Must be 1 to 128 alphanumeric characters.
+ First character must be a letter.
+ Cannot be a reserved word for the chosen database engine.
:type master_user_password: string
:param master_user_password: The password for the master database user.
Can be any printable ASCII character except "/", '"', or "@".
Type: String
**MySQL**
Constraints: Must contain from 8 to 41 characters.
**Oracle**
Constraints: Must contain from 8 to 30 characters.
**SQL Server**
Constraints: Must contain from 8 to 128 characters.
:type db_security_groups: list
:param db_security_groups: A list of DB security groups to associate
with this DB instance.
Default: The default DB security group for the database engine.
:type vpc_security_group_ids: list
:param vpc_security_group_ids: A list of EC2 VPC security groups to
associate with this DB instance.
Default: The default EC2 VPC security group for the DB subnet group's
VPC.
:type availability_zone: string
:param availability_zone: The EC2 Availability Zone that the database
instance will be created in.
Default: A random, system-chosen Availability Zone in the endpoint's
region.
Example: `us-east-1d`
Constraint: The AvailabilityZone parameter cannot be specified if the
MultiAZ parameter is set to `True`. The specified Availability Zone
must be in the same region as the current endpoint.
:type db_subnet_group_name: string
:param db_subnet_group_name: A DB subnet group to associate with this
DB instance.
If there is no DB subnet group, then it is a non-VPC DB instance.
:type preferred_maintenance_window: string
:param preferred_maintenance_window: The weekly time range (in UTC)
during which system maintenance can occur.
Format: `ddd:hh24:mi-ddd:hh24:mi`
Default: A 30-minute window selected at random from an 8-hour block of
time per region, occurring on a random day of the week. To see the
time blocks available, see ` Adjusting the Preferred Maintenance
Window`_ in the Amazon RDS User Guide.
Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun
Constraints: Minimum 30-minute window.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of the DB parameter group to associate with this DB instance.
If this argument is omitted, the default DBParameterGroup for the
specified engine will be used.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type backup_retention_period: integer
:param backup_retention_period:
The number of days for which automated backups are retained. Setting
this parameter to a positive number enables backups. Setting this
parameter to 0 disables automated backups.
Default: 1
Constraints:
+ Must be a value from 0 to 8
+ Cannot be set to 0 if the DB instance is a master instance with read
replicas
:type preferred_backup_window: string
:param preferred_backup_window: The daily time range during which
automated backups are created if automated backups are enabled,
using the `BackupRetentionPeriod` parameter.
Default: A 30-minute window selected at random from an 8-hour block of
time per region. See the Amazon RDS User Guide for the time blocks
for each region from which the default backup windows are assigned.
Constraints: Must be in the format `hh24:mi-hh24:mi`. Times should be
Universal Time Coordinated (UTC). Must not conflict with the
preferred maintenance window. Must be at least 30 minutes.
:type port: integer
:param port: The port number on which the database accepts connections.
**MySQL**
Default: `3306`
Valid Values: `1150-65535`
Type: Integer
**Oracle**
Default: `1521`
Valid Values: `1150-65535`
**SQL Server**
Default: `1433`
Valid Values: `1150-65535` except for `1434` and `3389`.
:type multi_az: boolean
:param multi_az: Specifies if the DB instance is a Multi-AZ deployment.
You cannot set the AvailabilityZone parameter if the MultiAZ
parameter is set to true.
:type engine_version: string
:param engine_version: The version number of the database engine to
use.
**MySQL**
Example: `5.1.42`
Type: String
**Oracle**
Example: `11.2.0.2.v2`
Type: String
**SQL Server**
Example: `10.50.2789.0.v1`
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Indicates that minor engine upgrades
will be applied automatically to the DB instance during the
maintenance window.
Default: `True`
:type license_model: string
:param license_model: License model information for this DB instance.
Valid values: `license-included` | `bring-your-own-license` | `general-
public-license`
:type iops: integer
:param iops: The amount of Provisioned IOPS (input/output operations
per second) to be initially allocated for the DB instance.
Constraints: Must be an integer greater than 1000.
:type option_group_name: string
:param option_group_name: Indicates that the DB instance should be
associated with the specified option group.
Permanent options, such as the TDE option for Oracle Advanced Security
TDE, cannot be removed from an option group, and that option group
cannot be removed from a DB instance once it is associated with a
DB instance
:type character_set_name: string
:param character_set_name: For supported engines, indicates that the DB
instance should be associated with the specified CharacterSet.
:type publicly_accessible: boolean
:param publicly_accessible: Specifies the accessibility options for the
DB instance. A value of true specifies an Internet-facing instance
with a publicly resolvable DNS name, which resolves to a public IP
address. A value of false specifies an internal instance with a DNS
name that resolves to a private IP address.
Default: The default behavior varies depending on whether a VPC has
been requested or not. The following list shows the default
behavior in each case.
+ **Default VPC:**true
+ **VPC:**false
If no DB subnet group has been specified as part of the request and the
PubliclyAccessible value has not been set, the DB instance will be
publicly accessible. If a specific DB subnet group has been
specified as part of the request and the PubliclyAccessible value
has not been set, the DB instance will be private.
:type tags: list
:param tags: A list of tags.
"""
params = {
'DBInstanceIdentifier': db_instance_identifier,
'AllocatedStorage': allocated_storage,
'DBInstanceClass': db_instance_class,
'Engine': engine,
'MasterUsername': master_username,
'MasterUserPassword': master_user_password,
}
if db_name is not None:
params['DBName'] = db_name
if db_security_groups is not None:
self.build_list_params(params,
db_security_groups,
'DBSecurityGroups.member')
if vpc_security_group_ids is not None:
self.build_list_params(params,
vpc_security_group_ids,
'VpcSecurityGroupIds.member')
if availability_zone is not None:
params['AvailabilityZone'] = availability_zone
if db_subnet_group_name is not None:
params['DBSubnetGroupName'] = db_subnet_group_name
if preferred_maintenance_window is not None:
params['PreferredMaintenanceWindow'] = preferred_maintenance_window
if db_parameter_group_name is not None:
params['DBParameterGroupName'] = db_parameter_group_name
if backup_retention_period is not None:
params['BackupRetentionPeriod'] = backup_retention_period
if preferred_backup_window is not None:
params['PreferredBackupWindow'] = preferred_backup_window
if port is not None:
params['Port'] = port
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if engine_version is not None:
params['EngineVersion'] = engine_version
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
if license_model is not None:
params['LicenseModel'] = license_model
if iops is not None:
params['Iops'] = iops
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if character_set_name is not None:
params['CharacterSetName'] = character_set_name
if publicly_accessible is not None:
params['PubliclyAccessible'] = str(
publicly_accessible).lower()
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBInstance',
verb='POST',
path='/', params=params)
def create_db_instance_read_replica(self, db_instance_identifier,
source_db_instance_identifier,
db_instance_class=None,
availability_zone=None, port=None,
auto_minor_version_upgrade=None,
iops=None, option_group_name=None,
publicly_accessible=None, tags=None):
"""
Creates a DB instance that acts as a read replica of a source
DB instance.
All read replica DB instances are created as Single-AZ
deployments with backups disabled. All other DB instance
attributes (including DB security groups and DB parameter
groups) are inherited from the source DB instance, except as
specified below.
The source DB instance must have backup retention enabled.
:type db_instance_identifier: string
:param db_instance_identifier: The DB instance identifier of the read
replica. This is the unique key that identifies a DB instance. This
parameter is stored as a lowercase string.
:type source_db_instance_identifier: string
:param source_db_instance_identifier: The identifier of the DB instance
that will act as the source for the read replica. Each DB instance
can have up to five read replicas.
Constraints: Must be the identifier of an existing DB instance that is
not already a read replica DB instance.
:type db_instance_class: string
:param db_instance_class: The compute and memory capacity of the read
replica.
Valid Values: `db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge
| db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge`
Default: Inherits from the source DB instance.
:type availability_zone: string
:param availability_zone: The Amazon EC2 Availability Zone that the
read replica will be created in.
Default: A random, system-chosen Availability Zone in the endpoint's
region.
Example: `us-east-1d`
:type port: integer
:param port: The port number that the DB instance uses for connections.
Default: Inherits from the source DB instance
Valid Values: `1150-65535`
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Indicates that minor engine upgrades
will be applied automatically to the read replica during the
maintenance window.
Default: Inherits from the source DB instance
:type iops: integer
:param iops: The amount of Provisioned IOPS (input/output operations
per second) to be initially allocated for the DB instance.
:type option_group_name: string
:param option_group_name: The option group the DB instance will be
associated with. If omitted, the default option group for the
engine specified will be used.
:type publicly_accessible: boolean
:param publicly_accessible: Specifies the accessibility options for the
DB instance. A value of true specifies an Internet-facing instance
with a publicly resolvable DNS name, which resolves to a public IP
address. A value of false specifies an internal instance with a DNS
name that resolves to a private IP address.
Default: The default behavior varies depending on whether a VPC has
been requested or not. The following list shows the default
behavior in each case.
+ **Default VPC:**true
+ **VPC:**false
If no DB subnet group has been specified as part of the request and the
PubliclyAccessible value has not been set, the DB instance will be
publicly accessible. If a specific DB subnet group has been
specified as part of the request and the PubliclyAccessible value
has not been set, the DB instance will be private.
:type tags: list
:param tags: A list of tags.
"""
params = {
'DBInstanceIdentifier': db_instance_identifier,
'SourceDBInstanceIdentifier': source_db_instance_identifier,
}
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if availability_zone is not None:
params['AvailabilityZone'] = availability_zone
if port is not None:
params['Port'] = port
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
if iops is not None:
params['Iops'] = iops
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if publicly_accessible is not None:
params['PubliclyAccessible'] = str(
publicly_accessible).lower()
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBInstanceReadReplica',
verb='POST',
path='/', params=params)
def create_db_parameter_group(self, db_parameter_group_name,
db_parameter_group_family, description,
tags=None):
"""
Creates a new DB parameter group.
A DB parameter group is initially created with the default
parameters for the database engine used by the DB instance. To
provide custom values for any of the parameters, you must
modify the group after creating it using
ModifyDBParameterGroup . Once you've created a DB parameter
group, you need to associate it with your DB instance using
ModifyDBInstance . When you associate a new DB parameter group
with a running DB instance, you need to reboot the DB Instance
for the new DB parameter group and associated settings to take
effect.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of the DB parameter group.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
This value is stored as a lower-case string.
:type db_parameter_group_family: string
:param db_parameter_group_family: The DB parameter group family name. A
DB parameter group can be associated with one and only one DB
parameter group family, and can be applied only to a DB instance
running a database engine and engine version compatible with that
DB parameter group family.
:type description: string
:param description: The description for the DB parameter group.
:type tags: list
:param tags: A list of tags.
"""
params = {
'DBParameterGroupName': db_parameter_group_name,
'DBParameterGroupFamily': db_parameter_group_family,
'Description': description,
}
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBParameterGroup',
verb='POST',
path='/', params=params)
def create_db_security_group(self, db_security_group_name,
db_security_group_description, tags=None):
"""
Creates a new DB security group. DB security groups control
access to a DB instance.
:type db_security_group_name: string
:param db_security_group_name: The name for the DB security group. This
value is stored as a lowercase string.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
+ Must not be "Default"
+ May not contain spaces
Example: `mysecuritygroup`
:type db_security_group_description: string
:param db_security_group_description: The description for the DB
security group.
:type tags: list
:param tags: A list of tags.
"""
params = {
'DBSecurityGroupName': db_security_group_name,
'DBSecurityGroupDescription': db_security_group_description,
}
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBSecurityGroup',
verb='POST',
path='/', params=params)
def create_db_snapshot(self, db_snapshot_identifier,
db_instance_identifier, tags=None):
"""
Creates a DBSnapshot. The source DBInstance must be in
"available" state.
:type db_snapshot_identifier: string
:param db_snapshot_identifier: The identifier for the DB snapshot.
Constraints:
+ Cannot be null, empty, or blank
+ Must contain from 1 to 255 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
Example: `my-snapshot-id`
:type db_instance_identifier: string
:param db_instance_identifier:
The DB instance identifier. This is the unique key that identifies a DB
instance. This parameter isn't case sensitive.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type tags: list
:param tags: A list of tags.
"""
params = {
'DBSnapshotIdentifier': db_snapshot_identifier,
'DBInstanceIdentifier': db_instance_identifier,
}
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBSnapshot',
verb='POST',
path='/', params=params)
def create_db_subnet_group(self, db_subnet_group_name,
db_subnet_group_description, subnet_ids,
tags=None):
"""
Creates a new DB subnet group. DB subnet groups must contain
at least one subnet in at least two AZs in the region.
:type db_subnet_group_name: string
:param db_subnet_group_name: The name for the DB subnet group. This
value is stored as a lowercase string.
Constraints: Must contain no more than 255 alphanumeric characters or
hyphens. Must not be "Default".
Example: `mySubnetgroup`
:type db_subnet_group_description: string
:param db_subnet_group_description: The description for the DB subnet
group.
:type subnet_ids: list
:param subnet_ids: The EC2 Subnet IDs for the DB subnet group.
:type tags: list
:param tags: A list of tags.
"""
params = {
'DBSubnetGroupName': db_subnet_group_name,
'DBSubnetGroupDescription': db_subnet_group_description,
}
self.build_list_params(params,
subnet_ids,
'SubnetIds.member')
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBSubnetGroup',
verb='POST',
path='/', params=params)
def create_event_subscription(self, subscription_name, sns_topic_arn,
source_type=None, event_categories=None,
source_ids=None, enabled=None, tags=None):
"""
Creates an RDS event notification subscription. This action
requires a topic ARN (Amazon Resource Name) created by either
the RDS console, the SNS console, or the SNS API. To obtain an
ARN with SNS, you must create a topic in Amazon SNS and
subscribe to the topic. The ARN is displayed in the SNS
console.
You can specify the type of source (SourceType) you want to be
notified of, provide a list of RDS sources (SourceIds) that
triggers the events, and provide a list of event categories
(EventCategories) for events you want to be notified of. For
example, you can specify SourceType = db-instance, SourceIds =
mydbinstance1, mydbinstance2 and EventCategories =
Availability, Backup.
If you specify both the SourceType and SourceIds, such as
SourceType = db-instance and SourceIdentifier = myDBInstance1,
you will be notified of all the db-instance events for the
specified source. If you specify a SourceType but do not
specify a SourceIdentifier, you will receive notice of the
events for that source type for all your RDS sources. If you
do not specify either the SourceType nor the SourceIdentifier,
you will be notified of events generated from all RDS sources
belonging to your customer account.
:type subscription_name: string
:param subscription_name: The name of the subscription.
Constraints: The name must be less than 255 characters.
:type sns_topic_arn: string
:param sns_topic_arn: The Amazon Resource Name (ARN) of the SNS topic
created for event notification. The ARN is created by Amazon SNS
when you create a topic and subscribe to it.
:type source_type: string
:param source_type: The type of source that will be generating the
events. For example, if you want to be notified of events generated
by a DB instance, you would set this parameter to db-instance. if
this value is not specified, all events are returned.
Valid values: db-instance | db-parameter-group | db-security-group |
db-snapshot
:type event_categories: list
:param event_categories: A list of event categories for a SourceType
that you want to subscribe to. You can see a list of the categories
for a given SourceType in the `Events`_ topic in the Amazon RDS
User Guide or by using the **DescribeEventCategories** action.
:type source_ids: list
:param source_ids:
The list of identifiers of the event sources for which events will be
returned. If not specified, then all sources are included in the
response. An identifier must begin with a letter and must contain
only ASCII letters, digits, and hyphens; it cannot end with a
hyphen or contain two consecutive hyphens.
Constraints:
+ If SourceIds are supplied, SourceType must also be provided.
+ If the source type is a DB instance, then a `DBInstanceIdentifier`
must be supplied.
+ If the source type is a DB security group, a `DBSecurityGroupName`
must be supplied.
+ If the source type is a DB parameter group, a `DBParameterGroupName`
must be supplied.
+ If the source type is a DB snapshot, a `DBSnapshotIdentifier` must be
supplied.
:type enabled: boolean
:param enabled: A Boolean value; set to **true** to activate the
subscription, set to **false** to create the subscription but not
active it.
:type tags: list
:param tags: A list of tags.
"""
params = {
'SubscriptionName': subscription_name,
'SnsTopicArn': sns_topic_arn,
}
if source_type is not None:
params['SourceType'] = source_type
if event_categories is not None:
self.build_list_params(params,
event_categories,
'EventCategories.member')
if source_ids is not None:
self.build_list_params(params,
source_ids,
'SourceIds.member')
if enabled is not None:
params['Enabled'] = str(
enabled).lower()
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateEventSubscription',
verb='POST',
path='/', params=params)
def create_option_group(self, option_group_name, engine_name,
major_engine_version, option_group_description,
tags=None):
"""
Creates a new option group. You can create up to 20 option
groups.
:type option_group_name: string
:param option_group_name: Specifies the name of the option group to be
created.
Constraints:
+ Must be 1 to 255 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
Example: `myoptiongroup`
:type engine_name: string
:param engine_name: Specifies the name of the engine that this option
group should be associated with.
:type major_engine_version: string
:param major_engine_version: Specifies the major version of the engine
that this option group should be associated with.
:type option_group_description: string
:param option_group_description: The description of the option group.
:type tags: list
:param tags: A list of tags.
"""
params = {
'OptionGroupName': option_group_name,
'EngineName': engine_name,
'MajorEngineVersion': major_engine_version,
'OptionGroupDescription': option_group_description,
}
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateOptionGroup',
verb='POST',
path='/', params=params)
def delete_db_instance(self, db_instance_identifier,
skip_final_snapshot=None,
final_db_snapshot_identifier=None):
"""
The DeleteDBInstance action deletes a previously provisioned
DB instance. A successful response from the web service
indicates the request was received correctly. When you delete
a DB instance, all automated backups for that instance are
deleted and cannot be recovered. Manual DB snapshots of the DB
instance to be deleted are not deleted.
If a final DB snapshot is requested the status of the RDS
instance will be "deleting" until the DB snapshot is created.
The API action `DescribeDBInstance` is used to monitor the
status of this operation. The action cannot be canceled or
reverted once submitted.
:type db_instance_identifier: string
:param db_instance_identifier:
The DB instance identifier for the DB instance to be deleted. This
parameter isn't case sensitive.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type skip_final_snapshot: boolean
:param skip_final_snapshot: Determines whether a final DB snapshot is
created before the DB instance is deleted. If `True` is specified,
no DBSnapshot is created. If false is specified, a DB snapshot is
created before the DB instance is deleted.
The FinalDBSnapshotIdentifier parameter must be specified if
SkipFinalSnapshot is `False`.
Default: `False`
:type final_db_snapshot_identifier: string
:param final_db_snapshot_identifier:
The DBSnapshotIdentifier of the new DBSnapshot created when
SkipFinalSnapshot is set to `False`.
Specifying this parameter and also setting the SkipFinalShapshot
parameter to true results in an error.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
"""
params = {'DBInstanceIdentifier': db_instance_identifier, }
if skip_final_snapshot is not None:
params['SkipFinalSnapshot'] = str(
skip_final_snapshot).lower()
if final_db_snapshot_identifier is not None:
params['FinalDBSnapshotIdentifier'] = final_db_snapshot_identifier
return self._make_request(
action='DeleteDBInstance',
verb='POST',
path='/', params=params)
def delete_db_parameter_group(self, db_parameter_group_name):
"""
Deletes a specified DBParameterGroup. The DBParameterGroup
cannot be associated with any RDS instances to be deleted.
The specified DB parameter group cannot be associated with any
DB instances.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of the DB parameter group.
Constraints:
+ Must be the name of an existing DB parameter group
+ You cannot delete a default DB parameter group
+ Cannot be associated with any DB instances
"""
params = {'DBParameterGroupName': db_parameter_group_name, }
return self._make_request(
action='DeleteDBParameterGroup',
verb='POST',
path='/', params=params)
def delete_db_security_group(self, db_security_group_name):
"""
Deletes a DB security group.
The specified DB security group must not be associated with
any DB instances.
:type db_security_group_name: string
:param db_security_group_name:
The name of the DB security group to delete.
You cannot delete the default DB security group.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
+ Must not be "Default"
+ May not contain spaces
"""
params = {'DBSecurityGroupName': db_security_group_name, }
return self._make_request(
action='DeleteDBSecurityGroup',
verb='POST',
path='/', params=params)
def delete_db_snapshot(self, db_snapshot_identifier):
"""
Deletes a DBSnapshot.
The DBSnapshot must be in the `available` state to be deleted.
:type db_snapshot_identifier: string
:param db_snapshot_identifier: The DBSnapshot identifier.
Constraints: Must be the name of an existing DB snapshot in the
`available` state.
"""
params = {'DBSnapshotIdentifier': db_snapshot_identifier, }
return self._make_request(
action='DeleteDBSnapshot',
verb='POST',
path='/', params=params)
def delete_db_subnet_group(self, db_subnet_group_name):
"""
Deletes a DB subnet group.
The specified database subnet group must not be associated
with any DB instances.
:type db_subnet_group_name: string
:param db_subnet_group_name:
The name of the database subnet group to delete.
You cannot delete the default subnet group.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
"""
params = {'DBSubnetGroupName': db_subnet_group_name, }
return self._make_request(
action='DeleteDBSubnetGroup',
verb='POST',
path='/', params=params)
def delete_event_subscription(self, subscription_name):
"""
Deletes an RDS event notification subscription.
:type subscription_name: string
:param subscription_name: The name of the RDS event notification
subscription you want to delete.
"""
params = {'SubscriptionName': subscription_name, }
return self._make_request(
action='DeleteEventSubscription',
verb='POST',
path='/', params=params)
def delete_option_group(self, option_group_name):
"""
Deletes an existing option group.
:type option_group_name: string
:param option_group_name:
The name of the option group to be deleted.
You cannot delete default option groups.
"""
params = {'OptionGroupName': option_group_name, }
return self._make_request(
action='DeleteOptionGroup',
verb='POST',
path='/', params=params)
def describe_db_engine_versions(self, engine=None, engine_version=None,
db_parameter_group_family=None,
max_records=None, marker=None,
default_only=None,
list_supported_character_sets=None):
"""
Returns a list of the available DB engines.
:type engine: string
:param engine: The database engine to return.
:type engine_version: string
:param engine_version: The database engine version to return.
Example: `5.1.49`
:type db_parameter_group_family: string
:param db_parameter_group_family:
The name of a specific DB parameter group family to return details for.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more than the `MaxRecords` value is available, a
pagination token called a marker is included in the response so
that the following results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
request. If this parameter is specified, the response includes only
records beyond the marker, up to the value specified by
`MaxRecords`.
:type default_only: boolean
:param default_only: Indicates that only the default version of the
specified engine or engine and major version combination is
returned.
:type list_supported_character_sets: boolean
:param list_supported_character_sets: If this parameter is specified,
and if the requested engine supports the CharacterSetName parameter
for CreateDBInstance, the response includes a list of supported
character sets for each engine version.
"""
params = {}
if engine is not None:
params['Engine'] = engine
if engine_version is not None:
params['EngineVersion'] = engine_version
if db_parameter_group_family is not None:
params['DBParameterGroupFamily'] = db_parameter_group_family
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
if default_only is not None:
params['DefaultOnly'] = str(
default_only).lower()
if list_supported_character_sets is not None:
params['ListSupportedCharacterSets'] = str(
list_supported_character_sets).lower()
return self._make_request(
action='DescribeDBEngineVersions',
verb='POST',
path='/', params=params)
def describe_db_instances(self, db_instance_identifier=None,
filters=None, max_records=None, marker=None):
"""
Returns information about provisioned RDS instances. This API
supports pagination.
:type db_instance_identifier: string
:param db_instance_identifier:
The user-supplied instance identifier. If this parameter is specified,
information from only the specific DB instance is returned. This
parameter isn't case sensitive.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeDBInstances request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords` .
"""
params = {}
if db_instance_identifier is not None:
params['DBInstanceIdentifier'] = db_instance_identifier
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBInstances',
verb='POST',
path='/', params=params)
def describe_db_log_files(self, db_instance_identifier,
filename_contains=None, file_last_written=None,
file_size=None, max_records=None, marker=None):
"""
Returns a list of DB log files for the DB instance.
:type db_instance_identifier: string
:param db_instance_identifier:
The customer-assigned name of the DB instance that contains the log
files you want to list.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type filename_contains: string
:param filename_contains: Filters the available log files for log file
names that contain the specified string.
:type file_last_written: long
:param file_last_written: Filters the available log files for files
written since the specified date, in POSIX timestamp format.
:type file_size: long
:param file_size: Filters the available log files for files larger than
the specified size.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified MaxRecords
value, a pagination token called a marker is included in the
response so that the remaining results can be retrieved.
:type marker: string
:param marker: The pagination token provided in the previous request.
If this parameter is specified the response includes only records
beyond the marker, up to MaxRecords.
"""
params = {'DBInstanceIdentifier': db_instance_identifier, }
if filename_contains is not None:
params['FilenameContains'] = filename_contains
if file_last_written is not None:
params['FileLastWritten'] = file_last_written
if file_size is not None:
params['FileSize'] = file_size
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBLogFiles',
verb='POST',
path='/', params=params)
def describe_db_parameter_groups(self, db_parameter_group_name=None,
filters=None, max_records=None,
marker=None):
"""
Returns a list of `DBParameterGroup` descriptions. If a
`DBParameterGroupName` is specified, the list will contain
only the description of the specified DB parameter group.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of a specific DB parameter group to return details for.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
`DescribeDBParameterGroups` request. If this parameter is
specified, the response includes only records beyond the marker, up
to the value specified by `MaxRecords`.
"""
params = {}
if db_parameter_group_name is not None:
params['DBParameterGroupName'] = db_parameter_group_name
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBParameterGroups',
verb='POST',
path='/', params=params)
def describe_db_parameters(self, db_parameter_group_name, source=None,
max_records=None, marker=None):
"""
Returns the detailed parameter list for a particular DB
parameter group.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of a specific DB parameter group to return details for.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type source: string
:param source: The parameter types to return.
Default: All parameter types returned
Valid Values: `user | system | engine-default`
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
`DescribeDBParameters` request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords`.
"""
params = {'DBParameterGroupName': db_parameter_group_name, }
if source is not None:
params['Source'] = source
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBParameters',
verb='POST',
path='/', params=params)
def describe_db_security_groups(self, db_security_group_name=None,
filters=None, max_records=None,
marker=None):
"""
Returns a list of `DBSecurityGroup` descriptions. If a
`DBSecurityGroupName` is specified, the list will contain only
the descriptions of the specified DB security group.
:type db_security_group_name: string
:param db_security_group_name: The name of the DB security group to
return details for.
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeDBSecurityGroups request. If this parameter is specified,
the response includes only records beyond the marker, up to the
value specified by `MaxRecords`.
"""
params = {}
if db_security_group_name is not None:
params['DBSecurityGroupName'] = db_security_group_name
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBSecurityGroups',
verb='POST',
path='/', params=params)
def describe_db_snapshots(self, db_instance_identifier=None,
db_snapshot_identifier=None,
snapshot_type=None, filters=None,
max_records=None, marker=None):
"""
Returns information about DB snapshots. This API supports
pagination.
:type db_instance_identifier: string
:param db_instance_identifier:
A DB instance identifier to retrieve the list of DB snapshots for.
Cannot be used in conjunction with `DBSnapshotIdentifier`. This
parameter is not case sensitive.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type db_snapshot_identifier: string
:param db_snapshot_identifier:
A specific DB snapshot identifier to describe. Cannot be used in
conjunction with `DBInstanceIdentifier`. This value is stored as a
lowercase string.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
+ If this is the identifier of an automated snapshot, the
`SnapshotType` parameter must also be specified.
:type snapshot_type: string
:param snapshot_type: The type of snapshots that will be returned.
Values can be "automated" or "manual." If not specified, the
returned results will include all snapshots types.
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
`DescribeDBSnapshots` request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords`.
"""
params = {}
if db_instance_identifier is not None:
params['DBInstanceIdentifier'] = db_instance_identifier
if db_snapshot_identifier is not None:
params['DBSnapshotIdentifier'] = db_snapshot_identifier
if snapshot_type is not None:
params['SnapshotType'] = snapshot_type
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBSnapshots',
verb='POST',
path='/', params=params)
def describe_db_subnet_groups(self, db_subnet_group_name=None,
filters=None, max_records=None,
marker=None):
"""
Returns a list of DBSubnetGroup descriptions. If a
DBSubnetGroupName is specified, the list will contain only the
descriptions of the specified DBSubnetGroup.
For an overview of CIDR ranges, go to the `Wikipedia
Tutorial`_.
:type db_subnet_group_name: string
:param db_subnet_group_name: The name of the DB subnet group to return
details for.
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeDBSubnetGroups request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords`.
"""
params = {}
if db_subnet_group_name is not None:
params['DBSubnetGroupName'] = db_subnet_group_name
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBSubnetGroups',
verb='POST',
path='/', params=params)
def describe_engine_default_parameters(self, db_parameter_group_family,
max_records=None, marker=None):
"""
Returns the default engine and system parameter information
for the specified database engine.
:type db_parameter_group_family: string
:param db_parameter_group_family: The name of the DB parameter group
family.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
`DescribeEngineDefaultParameters` request. If this parameter is
specified, the response includes only records beyond the marker, up
to the value specified by `MaxRecords`.
"""
params = {
'DBParameterGroupFamily': db_parameter_group_family,
}
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeEngineDefaultParameters',
verb='POST',
path='/', params=params)
def describe_event_categories(self, source_type=None):
"""
Displays a list of categories for all event source types, or,
if specified, for a specified source type. You can see a list
of the event categories and source types in the ` Events`_
topic in the Amazon RDS User Guide.
:type source_type: string
:param source_type: The type of source that will be generating the
events.
Valid values: db-instance | db-parameter-group | db-security-group |
db-snapshot
"""
params = {}
if source_type is not None:
params['SourceType'] = source_type
return self._make_request(
action='DescribeEventCategories',
verb='POST',
path='/', params=params)
def describe_event_subscriptions(self, subscription_name=None,
filters=None, max_records=None,
marker=None):
"""
Lists all the subscription descriptions for a customer
account. The description for a subscription includes
SubscriptionName, SNSTopicARN, CustomerID, SourceType,
SourceID, CreationTime, and Status.
If you specify a SubscriptionName, lists the description for
that subscription.
:type subscription_name: string
:param subscription_name: The name of the RDS event notification
subscription you want to describe.
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeOrderableDBInstanceOptions request. If this parameter is
specified, the response includes only records beyond the marker, up
to the value specified by `MaxRecords` .
"""
params = {}
if subscription_name is not None:
params['SubscriptionName'] = subscription_name
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeEventSubscriptions',
verb='POST',
path='/', params=params)
def describe_events(self, source_identifier=None, source_type=None,
start_time=None, end_time=None, duration=None,
event_categories=None, max_records=None, marker=None):
"""
Returns events related to DB instances, DB security groups, DB
snapshots, and DB parameter groups for the past 14 days.
Events specific to a particular DB instance, DB security
group, database snapshot, or DB parameter group can be
obtained by providing the name as a parameter. By default, the
past hour of events are returned.
:type source_identifier: string
:param source_identifier:
The identifier of the event source for which events will be returned.
If not specified, then all sources are included in the response.
Constraints:
+ If SourceIdentifier is supplied, SourceType must also be provided.
+ If the source type is `DBInstance`, then a `DBInstanceIdentifier`
must be supplied.
+ If the source type is `DBSecurityGroup`, a `DBSecurityGroupName` must
be supplied.
+ If the source type is `DBParameterGroup`, a `DBParameterGroupName`
must be supplied.
+ If the source type is `DBSnapshot`, a `DBSnapshotIdentifier` must be
supplied.
+ Cannot end with a hyphen or contain two consecutive hyphens.
:type source_type: string
:param source_type: The event source to retrieve events for. If no
value is specified, all events are returned.
:type start_time: timestamp
:param start_time: The beginning of the time interval to retrieve
events for, specified in ISO 8601 format. For more information
about ISO 8601, go to the `ISO8601 Wikipedia page.`_
Example: 2009-07-08T18:00Z
:type end_time: timestamp
:param end_time: The end of the time interval for which to retrieve
events, specified in ISO 8601 format. For more information about
ISO 8601, go to the `ISO8601 Wikipedia page.`_
Example: 2009-07-08T18:00Z
:type duration: integer
:param duration: The number of minutes to retrieve events for.
Default: 60
:type event_categories: list
:param event_categories: A list of event categories that trigger
notifications for a event notification subscription.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeEvents request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords`.
"""
params = {}
if source_identifier is not None:
params['SourceIdentifier'] = source_identifier
if source_type is not None:
params['SourceType'] = source_type
if start_time is not None:
params['StartTime'] = start_time
if end_time is not None:
params['EndTime'] = end_time
if duration is not None:
params['Duration'] = duration
if event_categories is not None:
self.build_list_params(params,
event_categories,
'EventCategories.member')
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeEvents',
verb='POST',
path='/', params=params)
def describe_option_group_options(self, engine_name,
major_engine_version=None,
max_records=None, marker=None):
"""
Describes all available options.
:type engine_name: string
:param engine_name: A required parameter. Options available for the
given Engine name will be described.
:type major_engine_version: string
:param major_engine_version: If specified, filters the results to
include only options for the specified major engine version.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
request. If this parameter is specified, the response includes only
records beyond the marker, up to the value specified by
`MaxRecords`.
"""
params = {'EngineName': engine_name, }
if major_engine_version is not None:
params['MajorEngineVersion'] = major_engine_version
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeOptionGroupOptions',
verb='POST',
path='/', params=params)
def describe_option_groups(self, option_group_name=None, filters=None,
marker=None, max_records=None,
engine_name=None, major_engine_version=None):
"""
Describes the available option groups.
:type option_group_name: string
:param option_group_name: The name of the option group to describe.
Cannot be supplied together with EngineName or MajorEngineVersion.
:type filters: list
:param filters:
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeOptionGroups request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords`.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type engine_name: string
:param engine_name: Filters the list of option groups to only include
groups associated with a specific database engine.
:type major_engine_version: string
:param major_engine_version: Filters the list of option groups to only
include groups associated with a specific database engine version.
If specified, then EngineName must also be specified.
"""
params = {}
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if marker is not None:
params['Marker'] = marker
if max_records is not None:
params['MaxRecords'] = max_records
if engine_name is not None:
params['EngineName'] = engine_name
if major_engine_version is not None:
params['MajorEngineVersion'] = major_engine_version
return self._make_request(
action='DescribeOptionGroups',
verb='POST',
path='/', params=params)
def describe_orderable_db_instance_options(self, engine,
engine_version=None,
db_instance_class=None,
license_model=None, vpc=None,
max_records=None, marker=None):
"""
Returns a list of orderable DB instance options for the
specified engine.
:type engine: string
:param engine: The name of the engine to retrieve DB instance options
for.
:type engine_version: string
:param engine_version: The engine version filter value. Specify this
parameter to show only the available offerings matching the
specified engine version.
:type db_instance_class: string
:param db_instance_class: The DB instance class filter value. Specify
this parameter to show only the available offerings matching the
specified DB instance class.
:type license_model: string
:param license_model: The license model filter value. Specify this
parameter to show only the available offerings matching the
specified license model.
:type vpc: boolean
:param vpc: The VPC filter value. Specify this parameter to show only
the available VPC or non-VPC offerings.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeOrderableDBInstanceOptions request. If this parameter is
specified, the response includes only records beyond the marker, up
to the value specified by `MaxRecords` .
"""
params = {'Engine': engine, }
if engine_version is not None:
params['EngineVersion'] = engine_version
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if license_model is not None:
params['LicenseModel'] = license_model
if vpc is not None:
params['Vpc'] = str(
vpc).lower()
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeOrderableDBInstanceOptions',
verb='POST',
path='/', params=params)
def describe_reserved_db_instances(self, reserved_db_instance_id=None,
reserved_db_instances_offering_id=None,
db_instance_class=None, duration=None,
product_description=None,
offering_type=None, multi_az=None,
filters=None, max_records=None,
marker=None):
"""
Returns information about reserved DB instances for this
account, or about a specified reserved DB instance.
:type reserved_db_instance_id: string
:param reserved_db_instance_id: The reserved DB instance identifier
filter value. Specify this parameter to show only the reservation
that matches the specified reservation ID.
:type reserved_db_instances_offering_id: string
:param reserved_db_instances_offering_id: The offering identifier
filter value. Specify this parameter to show only purchased
reservations matching the specified offering identifier.
:type db_instance_class: string
:param db_instance_class: The DB instance class filter value. Specify
this parameter to show only those reservations matching the
specified DB instances class.
:type duration: string
:param duration: The duration filter value, specified in years or
seconds. Specify this parameter to show only reservations for this
duration.
Valid Values: `1 | 3 | 31536000 | 94608000`
:type product_description: string
:param product_description: The product description filter value.
Specify this parameter to show only those reservations matching the
specified product description.
:type offering_type: string
:param offering_type: The offering type filter value. Specify this
parameter to show only the available offerings matching the
specified offering type.
Valid Values: `"Light Utilization" | "Medium Utilization" | "Heavy
Utilization" `
:type multi_az: boolean
:param multi_az: The Multi-AZ filter value. Specify this parameter to
show only those reservations matching the specified Multi-AZ
parameter.
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more than the `MaxRecords` value is available, a
pagination token called a marker is included in the response so
that the following results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
request. If this parameter is specified, the response includes only
records beyond the marker, up to the value specified by
`MaxRecords`.
"""
params = {}
if reserved_db_instance_id is not None:
params['ReservedDBInstanceId'] = reserved_db_instance_id
if reserved_db_instances_offering_id is not None:
params['ReservedDBInstancesOfferingId'] = reserved_db_instances_offering_id
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if duration is not None:
params['Duration'] = duration
if product_description is not None:
params['ProductDescription'] = product_description
if offering_type is not None:
params['OfferingType'] = offering_type
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeReservedDBInstances',
verb='POST',
path='/', params=params)
def describe_reserved_db_instances_offerings(self,
reserved_db_instances_offering_id=None,
db_instance_class=None,
duration=None,
product_description=None,
offering_type=None,
multi_az=None,
max_records=None,
marker=None):
"""
Lists available reserved DB instance offerings.
:type reserved_db_instances_offering_id: string
:param reserved_db_instances_offering_id: The offering identifier
filter value. Specify this parameter to show only the available
offering that matches the specified reservation identifier.
Example: `438012d3-4052-4cc7-b2e3-8d3372e0e706`
:type db_instance_class: string
:param db_instance_class: The DB instance class filter value. Specify
this parameter to show only the available offerings matching the
specified DB instance class.
:type duration: string
:param duration: Duration filter value, specified in years or seconds.
Specify this parameter to show only reservations for this duration.
Valid Values: `1 | 3 | 31536000 | 94608000`
:type product_description: string
:param product_description: Product description filter value. Specify
this parameter to show only the available offerings matching the
specified product description.
:type offering_type: string
:param offering_type: The offering type filter value. Specify this
parameter to show only the available offerings matching the
specified offering type.
Valid Values: `"Light Utilization" | "Medium Utilization" | "Heavy
Utilization" `
:type multi_az: boolean
:param multi_az: The Multi-AZ filter value. Specify this parameter to
show only the available offerings matching the specified Multi-AZ
parameter.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more than the `MaxRecords` value is available, a
pagination token called a marker is included in the response so
that the following results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
request. If this parameter is specified, the response includes only
records beyond the marker, up to the value specified by
`MaxRecords`.
"""
params = {}
if reserved_db_instances_offering_id is not None:
params['ReservedDBInstancesOfferingId'] = reserved_db_instances_offering_id
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if duration is not None:
params['Duration'] = duration
if product_description is not None:
params['ProductDescription'] = product_description
if offering_type is not None:
params['OfferingType'] = offering_type
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeReservedDBInstancesOfferings',
verb='POST',
path='/', params=params)
def download_db_log_file_portion(self, db_instance_identifier,
log_file_name, marker=None,
number_of_lines=None):
"""
Downloads the last line of the specified log file.
:type db_instance_identifier: string
:param db_instance_identifier:
The customer-assigned name of the DB instance that contains the log
files you want to list.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type log_file_name: string
:param log_file_name: The name of the log file to be downloaded.
:type marker: string
:param marker: The pagination token provided in the previous request.
If this parameter is specified the response includes only records
beyond the marker, up to MaxRecords.
:type number_of_lines: integer
:param number_of_lines: The number of lines remaining to be downloaded.
"""
params = {
'DBInstanceIdentifier': db_instance_identifier,
'LogFileName': log_file_name,
}
if marker is not None:
params['Marker'] = marker
if number_of_lines is not None:
params['NumberOfLines'] = number_of_lines
return self._make_request(
action='DownloadDBLogFilePortion',
verb='POST',
path='/', params=params)
def list_tags_for_resource(self, resource_name):
"""
Lists all tags on an Amazon RDS resource.
For an overview on tagging an Amazon RDS resource, see
`Tagging Amazon RDS Resources`_.
:type resource_name: string
:param resource_name: The Amazon RDS resource with tags to be listed.
This value is an Amazon Resource Name (ARN). For information about
creating an ARN, see ` Constructing an RDS Amazon Resource Name
(ARN)`_.
"""
params = {'ResourceName': resource_name, }
return self._make_request(
action='ListTagsForResource',
verb='POST',
path='/', params=params)
def modify_db_instance(self, db_instance_identifier,
allocated_storage=None, db_instance_class=None,
db_security_groups=None,
vpc_security_group_ids=None,
apply_immediately=None, master_user_password=None,
db_parameter_group_name=None,
backup_retention_period=None,
preferred_backup_window=None,
preferred_maintenance_window=None, multi_az=None,
engine_version=None,
allow_major_version_upgrade=None,
auto_minor_version_upgrade=None, iops=None,
option_group_name=None,
new_db_instance_identifier=None):
"""
Modify settings for a DB instance. You can change one or more
database configuration parameters by specifying these
parameters and the new values in the request.
:type db_instance_identifier: string
:param db_instance_identifier:
The DB instance identifier. This value is stored as a lowercase string.
Constraints:
+ Must be the identifier for an existing DB instance
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type allocated_storage: integer
:param allocated_storage: The new storage capacity of the RDS instance.
Changing this parameter does not result in an outage and the change
is applied during the next maintenance window unless the
`ApplyImmediately` parameter is set to `True` for this request.
**MySQL**
Default: Uses existing setting
Valid Values: 5-1024
Constraints: Value supplied must be at least 10% greater than the
current value. Values that are not at least 10% greater than the
existing value are rounded up so that they are 10% greater than the
current value.
Type: Integer
**Oracle**
Default: Uses existing setting
Valid Values: 10-1024
Constraints: Value supplied must be at least 10% greater than the
current value. Values that are not at least 10% greater than the
existing value are rounded up so that they are 10% greater than the
current value.
**SQL Server**
Cannot be modified.
If you choose to migrate your DB instance from using standard storage
to using Provisioned IOPS, or from using Provisioned IOPS to using
standard storage, the process can take time. The duration of the
migration depends on several factors such as database load, storage
size, storage type (standard or Provisioned IOPS), amount of IOPS
provisioned (if any), and the number of prior scale storage
operations. Typical migration times are under 24 hours, but the
process can take up to several days in some cases. During the
migration, the DB instance will be available for use, but may
experience performance degradation. While the migration takes
place, nightly backups for the instance will be suspended. No other
Amazon RDS operations can take place for the instance, including
modifying the instance, rebooting the instance, deleting the
instance, creating a read replica for the instance, and creating a
DB snapshot of the instance.
:type db_instance_class: string
:param db_instance_class: The new compute and memory capacity of the DB
instance. To determine the instance classes that are available for
a particular DB engine, use the DescribeOrderableDBInstanceOptions
action.
Passing a value for this parameter causes an outage during the change
and is applied during the next maintenance window, unless the
`ApplyImmediately` parameter is specified as `True` for this
request.
Default: Uses existing setting
Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large |
db.m1.xlarge | db.m2.xlarge | db.m2.2xlarge | db.m2.4xlarge`
:type db_security_groups: list
:param db_security_groups:
A list of DB security groups to authorize on this DB instance. Changing
this parameter does not result in an outage and the change is
asynchronously applied as soon as possible.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type vpc_security_group_ids: list
:param vpc_security_group_ids:
A list of EC2 VPC security groups to authorize on this DB instance.
This change is asynchronously applied as soon as possible.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type apply_immediately: boolean
:param apply_immediately: Specifies whether or not the modifications in
this request and any pending modifications are asynchronously
applied as soon as possible, regardless of the
`PreferredMaintenanceWindow` setting for the DB instance.
If this parameter is passed as `False`, changes to the DB instance are
applied on the next call to RebootDBInstance, the next maintenance
reboot, or the next failure reboot, whichever occurs first. See
each parameter to determine when a change is applied.
Default: `False`
:type master_user_password: string
:param master_user_password:
The new password for the DB instance master user. Can be any printable
ASCII character except "/", '"', or "@".
Changing this parameter does not result in an outage and the change is
asynchronously applied as soon as possible. Between the time of the
request and the completion of the request, the `MasterUserPassword`
element exists in the `PendingModifiedValues` element of the
operation response.
Default: Uses existing setting
Constraints: Must be 8 to 41 alphanumeric characters (MySQL), 8 to 30
alphanumeric characters (Oracle), or 8 to 128 alphanumeric
characters (SQL Server).
Amazon RDS API actions never return the password, so this action
provides a way to regain access to a master instance user if the
password is lost.
:type db_parameter_group_name: string
:param db_parameter_group_name: The name of the DB parameter group to
apply to this DB instance. Changing this parameter does not result
in an outage and the change is applied during the next maintenance
window unless the `ApplyImmediately` parameter is set to `True` for
this request.
Default: Uses existing setting
Constraints: The DB parameter group must be in the same DB parameter
group family as this DB instance.
:type backup_retention_period: integer
:param backup_retention_period:
The number of days to retain automated backups. Setting this parameter
to a positive number enables backups. Setting this parameter to 0
disables automated backups.
Changing this parameter can result in an outage if you change from 0 to
a non-zero value or from a non-zero value to 0. These changes are
applied during the next maintenance window unless the
`ApplyImmediately` parameter is set to `True` for this request. If
you change the parameter from one non-zero value to another non-
zero value, the change is asynchronously applied as soon as
possible.
Default: Uses existing setting
Constraints:
+ Must be a value from 0 to 8
+ Cannot be set to 0 if the DB instance is a master instance with read
replicas or if the DB instance is a read replica
:type preferred_backup_window: string
:param preferred_backup_window:
The daily time range during which automated backups are created if
automated backups are enabled, as determined by the
`BackupRetentionPeriod`. Changing this parameter does not result in
an outage and the change is asynchronously applied as soon as
possible.
Constraints:
+ Must be in the format hh24:mi-hh24:mi
+ Times should be Universal Time Coordinated (UTC)
+ Must not conflict with the preferred maintenance window
+ Must be at least 30 minutes
:type preferred_maintenance_window: string
:param preferred_maintenance_window: The weekly time range (in UTC)
during which system maintenance can occur, which may result in an
outage. Changing this parameter does not result in an outage,
except in the following situation, and the change is asynchronously
applied as soon as possible. If there are pending actions that
cause a reboot, and the maintenance window is changed to include
the current time, then changing this parameter will cause a reboot
of the DB instance. If moving this window to the current time,
there must be at least 30 minutes between the current time and end
of the window to ensure pending changes are applied.
Default: Uses existing setting
Format: ddd:hh24:mi-ddd:hh24:mi
Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun
Constraints: Must be at least 30 minutes
:type multi_az: boolean
:param multi_az: Specifies if the DB instance is a Multi-AZ deployment.
Changing this parameter does not result in an outage and the change
is applied during the next maintenance window unless the
`ApplyImmediately` parameter is set to `True` for this request.
Constraints: Cannot be specified if the DB instance is a read replica.
:type engine_version: string
:param engine_version: The version number of the database engine to
upgrade to. Changing this parameter results in an outage and the
change is applied during the next maintenance window unless the
`ApplyImmediately` parameter is set to `True` for this request.
For major version upgrades, if a non-default DB parameter group is
currently in use, a new DB parameter group in the DB parameter
group family for the new engine version must be specified. The new
DB parameter group can be the default for that DB parameter group
family.
Example: `5.1.42`
:type allow_major_version_upgrade: boolean
:param allow_major_version_upgrade: Indicates that major version
upgrades are allowed. Changing this parameter does not result in an
outage and the change is asynchronously applied as soon as
possible.
Constraints: This parameter must be set to true when specifying a value
for the EngineVersion parameter that is a different major version
than the DB instance's current version.
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Indicates that minor version
upgrades will be applied automatically to the DB instance during
the maintenance window. Changing this parameter does not result in
an outage except in the following case and the change is
asynchronously applied as soon as possible. An outage will result
if this parameter is set to `True` during the maintenance window,
and a newer minor version is available, and RDS has enabled auto
patching for that engine version.
:type iops: integer
:param iops: The new Provisioned IOPS (I/O operations per second) value
for the RDS instance. Changing this parameter does not result in an
outage and the change is applied during the next maintenance window
unless the `ApplyImmediately` parameter is set to `True` for this
request.
Default: Uses existing setting
Constraints: Value supplied must be at least 10% greater than the
current value. Values that are not at least 10% greater than the
existing value are rounded up so that they are 10% greater than the
current value.
Type: Integer
If you choose to migrate your DB instance from using standard storage
to using Provisioned IOPS, or from using Provisioned IOPS to using
standard storage, the process can take time. The duration of the
migration depends on several factors such as database load, storage
size, storage type (standard or Provisioned IOPS), amount of IOPS
provisioned (if any), and the number of prior scale storage
operations. Typical migration times are under 24 hours, but the
process can take up to several days in some cases. During the
migration, the DB instance will be available for use, but may
experience performance degradation. While the migration takes
place, nightly backups for the instance will be suspended. No other
Amazon RDS operations can take place for the instance, including
modifying the instance, rebooting the instance, deleting the
instance, creating a read replica for the instance, and creating a
DB snapshot of the instance.
:type option_group_name: string
:param option_group_name: Indicates that the DB instance should be
associated with the specified option group. Changing this parameter
does not result in an outage except in the following case and the
change is applied during the next maintenance window unless the
`ApplyImmediately` parameter is set to `True` for this request. If
the parameter change results in an option group that enables OEM,
this change can cause a brief (sub-second) period during which new
connections are rejected but existing connections are not
interrupted.
Permanent options, such as the TDE option for Oracle Advanced Security
TDE, cannot be removed from an option group, and that option group
cannot be removed from a DB instance once it is associated with a
DB instance
:type new_db_instance_identifier: string
:param new_db_instance_identifier:
The new DB instance identifier for the DB instance when renaming a DB
Instance. This value is stored as a lowercase string.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
"""
params = {'DBInstanceIdentifier': db_instance_identifier, }
if allocated_storage is not None:
params['AllocatedStorage'] = allocated_storage
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if db_security_groups is not None:
self.build_list_params(params,
db_security_groups,
'DBSecurityGroups.member')
if vpc_security_group_ids is not None:
self.build_list_params(params,
vpc_security_group_ids,
'VpcSecurityGroupIds.member')
if apply_immediately is not None:
params['ApplyImmediately'] = str(
apply_immediately).lower()
if master_user_password is not None:
params['MasterUserPassword'] = master_user_password
if db_parameter_group_name is not None:
params['DBParameterGroupName'] = db_parameter_group_name
if backup_retention_period is not None:
params['BackupRetentionPeriod'] = backup_retention_period
if preferred_backup_window is not None:
params['PreferredBackupWindow'] = preferred_backup_window
if preferred_maintenance_window is not None:
params['PreferredMaintenanceWindow'] = preferred_maintenance_window
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if engine_version is not None:
params['EngineVersion'] = engine_version
if allow_major_version_upgrade is not None:
params['AllowMajorVersionUpgrade'] = str(
allow_major_version_upgrade).lower()
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
if iops is not None:
params['Iops'] = iops
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if new_db_instance_identifier is not None:
params['NewDBInstanceIdentifier'] = new_db_instance_identifier
return self._make_request(
action='ModifyDBInstance',
verb='POST',
path='/', params=params)
def modify_db_parameter_group(self, db_parameter_group_name, parameters):
"""
Modifies the parameters of a DB parameter group. To modify
more than one parameter, submit a list of the following:
`ParameterName`, `ParameterValue`, and `ApplyMethod`. A
maximum of 20 parameters can be modified in a single request.
The `apply-immediate` method can be used only for dynamic
parameters; the `pending-reboot` method can be used with MySQL
and Oracle DB instances for either dynamic or static
parameters. For Microsoft SQL Server DB instances, the
`pending-reboot` method can be used only for static
parameters.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of the DB parameter group.
Constraints:
+ Must be the name of an existing DB parameter group
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type parameters: list
:param parameters:
An array of parameter names, values, and the apply method for the
parameter update. At least one parameter name, value, and apply
method must be supplied; subsequent arguments are optional. A
maximum of 20 parameters may be modified in a single request.
Valid Values (for the application method): `immediate | pending-reboot`
You can use the immediate value with dynamic parameters only. You can
use the pending-reboot value for both dynamic and static
parameters, and changes are applied when DB instance reboots.
"""
params = {'DBParameterGroupName': db_parameter_group_name, }
self.build_complex_list_params(
params, parameters,
'Parameters.member',
('ParameterName', 'ParameterValue', 'Description', 'Source', 'ApplyType', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion', 'ApplyMethod'))
return self._make_request(
action='ModifyDBParameterGroup',
verb='POST',
path='/', params=params)
def modify_db_subnet_group(self, db_subnet_group_name, subnet_ids,
db_subnet_group_description=None):
"""
Modifies an existing DB subnet group. DB subnet groups must
contain at least one subnet in at least two AZs in the region.
:type db_subnet_group_name: string
:param db_subnet_group_name: The name for the DB subnet group. This
value is stored as a lowercase string.
Constraints: Must contain no more than 255 alphanumeric characters or
hyphens. Must not be "Default".
Example: `mySubnetgroup`
:type db_subnet_group_description: string
:param db_subnet_group_description: The description for the DB subnet
group.
:type subnet_ids: list
:param subnet_ids: The EC2 subnet IDs for the DB subnet group.
"""
params = {'DBSubnetGroupName': db_subnet_group_name, }
self.build_list_params(params,
subnet_ids,
'SubnetIds.member')
if db_subnet_group_description is not None:
params['DBSubnetGroupDescription'] = db_subnet_group_description
return self._make_request(
action='ModifyDBSubnetGroup',
verb='POST',
path='/', params=params)
def modify_event_subscription(self, subscription_name,
sns_topic_arn=None, source_type=None,
event_categories=None, enabled=None):
"""
Modifies an existing RDS event notification subscription. Note
that you cannot modify the source identifiers using this call;
to change source identifiers for a subscription, use the
AddSourceIdentifierToSubscription and
RemoveSourceIdentifierFromSubscription calls.
You can see a list of the event categories for a given
SourceType in the `Events`_ topic in the Amazon RDS User Guide
or by using the **DescribeEventCategories** action.
:type subscription_name: string
:param subscription_name: The name of the RDS event notification
subscription.
:type sns_topic_arn: string
:param sns_topic_arn: The Amazon Resource Name (ARN) of the SNS topic
created for event notification. The ARN is created by Amazon SNS
when you create a topic and subscribe to it.
:type source_type: string
:param source_type: The type of source that will be generating the
events. For example, if you want to be notified of events generated
by a DB instance, you would set this parameter to db-instance. if
this value is not specified, all events are returned.
Valid values: db-instance | db-parameter-group | db-security-group |
db-snapshot
:type event_categories: list
:param event_categories: A list of event categories for a SourceType
that you want to subscribe to. You can see a list of the categories
for a given SourceType in the `Events`_ topic in the Amazon RDS
User Guide or by using the **DescribeEventCategories** action.
:type enabled: boolean
:param enabled: A Boolean value; set to **true** to activate the
subscription.
"""
params = {'SubscriptionName': subscription_name, }
if sns_topic_arn is not None:
params['SnsTopicArn'] = sns_topic_arn
if source_type is not None:
params['SourceType'] = source_type
if event_categories is not None:
self.build_list_params(params,
event_categories,
'EventCategories.member')
if enabled is not None:
params['Enabled'] = str(
enabled).lower()
return self._make_request(
action='ModifyEventSubscription',
verb='POST',
path='/', params=params)
def modify_option_group(self, option_group_name, options_to_include=None,
options_to_remove=None, apply_immediately=None):
"""
Modifies an existing option group.
:type option_group_name: string
:param option_group_name: The name of the option group to be modified.
Permanent options, such as the TDE option for Oracle Advanced Security
TDE, cannot be removed from an option group, and that option group
cannot be removed from a DB instance once it is associated with a
DB instance
:type options_to_include: list
:param options_to_include: Options in this list are added to the option
group or, if already present, the specified configuration is used
to update the existing configuration.
:type options_to_remove: list
:param options_to_remove: Options in this list are removed from the
option group.
:type apply_immediately: boolean
:param apply_immediately: Indicates whether the changes should be
applied immediately, or during the next maintenance window for each
instance associated with the option group.
"""
params = {'OptionGroupName': option_group_name, }
if options_to_include is not None:
self.build_complex_list_params(
params, options_to_include,
'OptionsToInclude.member',
('OptionName', 'Port', 'DBSecurityGroupMemberships', 'VpcSecurityGroupMemberships', 'OptionSettings'))
if options_to_remove is not None:
self.build_list_params(params,
options_to_remove,
'OptionsToRemove.member')
if apply_immediately is not None:
params['ApplyImmediately'] = str(
apply_immediately).lower()
return self._make_request(
action='ModifyOptionGroup',
verb='POST',
path='/', params=params)
def promote_read_replica(self, db_instance_identifier,
backup_retention_period=None,
preferred_backup_window=None):
"""
Promotes a read replica DB instance to a standalone DB
instance.
:type db_instance_identifier: string
:param db_instance_identifier: The DB instance identifier. This value
is stored as a lowercase string.
Constraints:
+ Must be the identifier for an existing read replica DB instance
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
Example: mydbinstance
:type backup_retention_period: integer
:param backup_retention_period:
The number of days to retain automated backups. Setting this parameter
to a positive number enables backups. Setting this parameter to 0
disables automated backups.
Default: 1
Constraints:
+ Must be a value from 0 to 8
:type preferred_backup_window: string
:param preferred_backup_window: The daily time range during which
automated backups are created if automated backups are enabled,
using the `BackupRetentionPeriod` parameter.
Default: A 30-minute window selected at random from an 8-hour block of
time per region. See the Amazon RDS User Guide for the time blocks
for each region from which the default backup windows are assigned.
Constraints: Must be in the format `hh24:mi-hh24:mi`. Times should be
Universal Time Coordinated (UTC). Must not conflict with the
preferred maintenance window. Must be at least 30 minutes.
"""
params = {'DBInstanceIdentifier': db_instance_identifier, }
if backup_retention_period is not None:
params['BackupRetentionPeriod'] = backup_retention_period
if preferred_backup_window is not None:
params['PreferredBackupWindow'] = preferred_backup_window
return self._make_request(
action='PromoteReadReplica',
verb='POST',
path='/', params=params)
def purchase_reserved_db_instances_offering(self,
reserved_db_instances_offering_id,
reserved_db_instance_id=None,
db_instance_count=None,
tags=None):
"""
Purchases a reserved DB instance offering.
:type reserved_db_instances_offering_id: string
:param reserved_db_instances_offering_id: The ID of the Reserved DB
instance offering to purchase.
Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706
:type reserved_db_instance_id: string
:param reserved_db_instance_id: Customer-specified identifier to track
this reservation.
Example: myreservationID
:type db_instance_count: integer
:param db_instance_count: The number of instances to reserve.
Default: `1`
:type tags: list
:param tags: A list of tags.
"""
params = {
'ReservedDBInstancesOfferingId': reserved_db_instances_offering_id,
}
if reserved_db_instance_id is not None:
params['ReservedDBInstanceId'] = reserved_db_instance_id
if db_instance_count is not None:
params['DBInstanceCount'] = db_instance_count
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='PurchaseReservedDBInstancesOffering',
verb='POST',
path='/', params=params)
def reboot_db_instance(self, db_instance_identifier, force_failover=None):
"""
Rebooting a DB instance restarts the database engine service.
A reboot also applies to the DB instance any modifications to
the associated DB parameter group that were pending. Rebooting
a DB instance results in a momentary outage of the instance,
during which the DB instance status is set to rebooting. If
the RDS instance is configured for MultiAZ, it is possible
that the reboot will be conducted through a failover. An
Amazon RDS event is created when the reboot is completed.
If your DB instance is deployed in multiple Availability
Zones, you can force a failover from one AZ to the other
during the reboot. You might force a failover to test the
availability of your DB instance deployment or to restore
operations to the original AZ after a failover occurs.
The time required to reboot is a function of the specific
database engine's crash recovery process. To improve the
reboot time, we recommend that you reduce database activities
as much as possible during the reboot process to reduce
rollback activity for in-transit transactions.
:type db_instance_identifier: string
:param db_instance_identifier:
The DB instance identifier. This parameter is stored as a lowercase
string.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type force_failover: boolean
:param force_failover: When `True`, the reboot will be conducted
through a MultiAZ failover.
Constraint: You cannot specify `True` if the instance is not configured
for MultiAZ.
"""
params = {'DBInstanceIdentifier': db_instance_identifier, }
if force_failover is not None:
params['ForceFailover'] = str(
force_failover).lower()
return self._make_request(
action='RebootDBInstance',
verb='POST',
path='/', params=params)
def remove_source_identifier_from_subscription(self, subscription_name,
source_identifier):
"""
Removes a source identifier from an existing RDS event
notification subscription.
:type subscription_name: string
:param subscription_name: The name of the RDS event notification
subscription you want to remove a source identifier from.
:type source_identifier: string
:param source_identifier: The source identifier to be removed from the
subscription, such as the **DB instance identifier** for a DB
instance or the name of a security group.
"""
params = {
'SubscriptionName': subscription_name,
'SourceIdentifier': source_identifier,
}
return self._make_request(
action='RemoveSourceIdentifierFromSubscription',
verb='POST',
path='/', params=params)
def remove_tags_from_resource(self, resource_name, tag_keys):
"""
Removes metadata tags from an Amazon RDS resource.
For an overview on tagging an Amazon RDS resource, see
`Tagging Amazon RDS Resources`_.
:type resource_name: string
:param resource_name: The Amazon RDS resource the tags will be removed
from. This value is an Amazon Resource Name (ARN). For information
about creating an ARN, see ` Constructing an RDS Amazon Resource
Name (ARN)`_.
:type tag_keys: list
:param tag_keys: The tag key (name) of the tag to be removed.
"""
params = {'ResourceName': resource_name, }
self.build_list_params(params,
tag_keys,
'TagKeys.member')
return self._make_request(
action='RemoveTagsFromResource',
verb='POST',
path='/', params=params)
def reset_db_parameter_group(self, db_parameter_group_name,
reset_all_parameters=None, parameters=None):
"""
Modifies the parameters of a DB parameter group to the
engine/system default value. To reset specific parameters
submit a list of the following: `ParameterName` and
`ApplyMethod`. To reset the entire DB parameter group, specify
the `DBParameterGroup` name and `ResetAllParameters`
parameters. When resetting the entire group, dynamic
parameters are updated immediately and static parameters are
set to `pending-reboot` to take effect on the next DB instance
restart or `RebootDBInstance` request.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of the DB parameter group.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type reset_all_parameters: boolean
:param reset_all_parameters: Specifies whether ( `True`) or not (
`False`) to reset all parameters in the DB parameter group to
default values.
Default: `True`
:type parameters: list
:param parameters: An array of parameter names, values, and the apply
method for the parameter update. At least one parameter name,
value, and apply method must be supplied; subsequent arguments are
optional. A maximum of 20 parameters may be modified in a single
request.
**MySQL**
Valid Values (for Apply method): `immediate` | `pending-reboot`
You can use the immediate value with dynamic parameters only. You can
use the `pending-reboot` value for both dynamic and static
parameters, and changes are applied when DB instance reboots.
**Oracle**
Valid Values (for Apply method): `pending-reboot`
"""
params = {'DBParameterGroupName': db_parameter_group_name, }
if reset_all_parameters is not None:
params['ResetAllParameters'] = str(
reset_all_parameters).lower()
if parameters is not None:
self.build_complex_list_params(
params, parameters,
'Parameters.member',
('ParameterName', 'ParameterValue', 'Description', 'Source', 'ApplyType', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion', 'ApplyMethod'))
return self._make_request(
action='ResetDBParameterGroup',
verb='POST',
path='/', params=params)
def restore_db_instance_from_db_snapshot(self, db_instance_identifier,
db_snapshot_identifier,
db_instance_class=None,
port=None,
availability_zone=None,
db_subnet_group_name=None,
multi_az=None,
publicly_accessible=None,
auto_minor_version_upgrade=None,
license_model=None,
db_name=None, engine=None,
iops=None,
option_group_name=None,
tags=None):
"""
Creates a new DB instance from a DB snapshot. The target
database is created from the source database restore point
with the same configuration as the original source database,
except that the new RDS instance is created with the default
security group.
:type db_instance_identifier: string
:param db_instance_identifier:
The identifier for the DB snapshot to restore from.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type db_snapshot_identifier: string
:param db_snapshot_identifier: Name of the DB instance to create from
the DB snapshot. This parameter isn't case sensitive.
Constraints:
+ Must contain from 1 to 255 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
Example: `my-snapshot-id`
:type db_instance_class: string
:param db_instance_class: The compute and memory capacity of the Amazon
RDS DB instance.
Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large |
db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge`
:type port: integer
:param port: The port number on which the database accepts connections.
Default: The same port as the original DB instance
Constraints: Value must be `1150-65535`
:type availability_zone: string
:param availability_zone: The EC2 Availability Zone that the database
instance will be created in.
Default: A random, system-chosen Availability Zone.
Constraint: You cannot specify the AvailabilityZone parameter if the
MultiAZ parameter is set to `True`.
Example: `us-east-1a`
:type db_subnet_group_name: string
:param db_subnet_group_name: The DB subnet group name to use for the
new instance.
:type multi_az: boolean
:param multi_az: Specifies if the DB instance is a Multi-AZ deployment.
Constraint: You cannot specify the AvailabilityZone parameter if the
MultiAZ parameter is set to `True`.
:type publicly_accessible: boolean
:param publicly_accessible: Specifies the accessibility options for the
DB instance. A value of true specifies an Internet-facing instance
with a publicly resolvable DNS name, which resolves to a public IP
address. A value of false specifies an internal instance with a DNS
name that resolves to a private IP address.
Default: The default behavior varies depending on whether a VPC has
been requested or not. The following list shows the default
behavior in each case.
+ **Default VPC:**true
+ **VPC:**false
If no DB subnet group has been specified as part of the request and the
PubliclyAccessible value has not been set, the DB instance will be
publicly accessible. If a specific DB subnet group has been
specified as part of the request and the PubliclyAccessible value
has not been set, the DB instance will be private.
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Indicates that minor version
upgrades will be applied automatically to the DB instance during
the maintenance window.
:type license_model: string
:param license_model: License model information for the restored DB
instance.
Default: Same as source.
Valid values: `license-included` | `bring-your-own-license` | `general-
public-license`
:type db_name: string
:param db_name:
The database name for the restored DB instance.
This parameter doesn't apply to the MySQL engine.
:type engine: string
:param engine: The database engine to use for the new instance.
Default: The same as source
Constraint: Must be compatible with the engine of the source
Example: `oracle-ee`
:type iops: integer
:param iops: Specifies the amount of provisioned IOPS for the DB
instance, expressed in I/O operations per second. If this parameter
is not specified, the IOPS value will be taken from the backup. If
this parameter is set to 0, the new instance will be converted to a
non-PIOPS instance, which will take additional time, though your DB
instance will be available for connections before the conversion
starts.
Constraints: Must be an integer greater than 1000.
:type option_group_name: string
:param option_group_name: The name of the option group to be used for
the restored DB instance.
Permanent options, such as the TDE option for Oracle Advanced Security
TDE, cannot be removed from an option group, and that option group
cannot be removed from a DB instance once it is associated with a
DB instance
:type tags: list
:param tags: A list of tags.
"""
params = {
'DBInstanceIdentifier': db_instance_identifier,
'DBSnapshotIdentifier': db_snapshot_identifier,
}
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if port is not None:
params['Port'] = port
if availability_zone is not None:
params['AvailabilityZone'] = availability_zone
if db_subnet_group_name is not None:
params['DBSubnetGroupName'] = db_subnet_group_name
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if publicly_accessible is not None:
params['PubliclyAccessible'] = str(
publicly_accessible).lower()
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
if license_model is not None:
params['LicenseModel'] = license_model
if db_name is not None:
params['DBName'] = db_name
if engine is not None:
params['Engine'] = engine
if iops is not None:
params['Iops'] = iops
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='RestoreDBInstanceFromDBSnapshot',
verb='POST',
path='/', params=params)
def restore_db_instance_to_point_in_time(self,
source_db_instance_identifier,
target_db_instance_identifier,
restore_time=None,
use_latest_restorable_time=None,
db_instance_class=None,
port=None,
availability_zone=None,
db_subnet_group_name=None,
multi_az=None,
publicly_accessible=None,
auto_minor_version_upgrade=None,
license_model=None,
db_name=None, engine=None,
iops=None,
option_group_name=None,
tags=None):
"""
Restores a DB instance to an arbitrary point-in-time. Users
can restore to any point in time before the
latestRestorableTime for up to backupRetentionPeriod days. The
target database is created from the source database with the
same configuration as the original database except that the DB
instance is created with the default DB security group.
:type source_db_instance_identifier: string
:param source_db_instance_identifier:
The identifier of the source DB instance from which to restore.
Constraints:
+ Must be the identifier of an existing database instance
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type target_db_instance_identifier: string
:param target_db_instance_identifier:
The name of the new database instance to be created.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type restore_time: timestamp
:param restore_time: The date and time to restore from.
Valid Values: Value must be a UTC time
Constraints:
+ Must be before the latest restorable time for the DB instance
+ Cannot be specified if UseLatestRestorableTime parameter is true
Example: `2009-09-07T23:45:00Z`
:type use_latest_restorable_time: boolean
:param use_latest_restorable_time: Specifies whether ( `True`) or not (
`False`) the DB instance is restored from the latest backup time.
Default: `False`
Constraints: Cannot be specified if RestoreTime parameter is provided.
:type db_instance_class: string
:param db_instance_class: The compute and memory capacity of the Amazon
RDS DB instance.
Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large |
db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge`
Default: The same DBInstanceClass as the original DB instance.
:type port: integer
:param port: The port number on which the database accepts connections.
Constraints: Value must be `1150-65535`
Default: The same port as the original DB instance.
:type availability_zone: string
:param availability_zone: The EC2 Availability Zone that the database
instance will be created in.
Default: A random, system-chosen Availability Zone.
Constraint: You cannot specify the AvailabilityZone parameter if the
MultiAZ parameter is set to true.
Example: `us-east-1a`
:type db_subnet_group_name: string
:param db_subnet_group_name: The DB subnet group name to use for the
new instance.
:type multi_az: boolean
:param multi_az: Specifies if the DB instance is a Multi-AZ deployment.
Constraint: You cannot specify the AvailabilityZone parameter if the
MultiAZ parameter is set to `True`.
:type publicly_accessible: boolean
:param publicly_accessible: Specifies the accessibility options for the
DB instance. A value of true specifies an Internet-facing instance
with a publicly resolvable DNS name, which resolves to a public IP
address. A value of false specifies an internal instance with a DNS
name that resolves to a private IP address.
Default: The default behavior varies depending on whether a VPC has
been requested or not. The following list shows the default
behavior in each case.
+ **Default VPC:**true
+ **VPC:**false
If no DB subnet group has been specified as part of the request and the
PubliclyAccessible value has not been set, the DB instance will be
publicly accessible. If a specific DB subnet group has been
specified as part of the request and the PubliclyAccessible value
has not been set, the DB instance will be private.
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Indicates that minor version
upgrades will be applied automatically to the DB instance during
the maintenance window.
:type license_model: string
:param license_model: License model information for the restored DB
instance.
Default: Same as source.
Valid values: `license-included` | `bring-your-own-license` | `general-
public-license`
:type db_name: string
:param db_name:
The database name for the restored DB instance.
This parameter is not used for the MySQL engine.
:type engine: string
:param engine: The database engine to use for the new instance.
Default: The same as source
Constraint: Must be compatible with the engine of the source
Example: `oracle-ee`
:type iops: integer
:param iops: The amount of Provisioned IOPS (input/output operations
per second) to be initially allocated for the DB instance.
Constraints: Must be an integer greater than 1000.
:type option_group_name: string
:param option_group_name: The name of the option group to be used for
the restored DB instance.
Permanent options, such as the TDE option for Oracle Advanced Security
TDE, cannot be removed from an option group, and that option group
cannot be removed from a DB instance once it is associated with a
DB instance
:type tags: list
:param tags: A list of tags.
"""
params = {
'SourceDBInstanceIdentifier': source_db_instance_identifier,
'TargetDBInstanceIdentifier': target_db_instance_identifier,
}
if restore_time is not None:
params['RestoreTime'] = restore_time
if use_latest_restorable_time is not None:
params['UseLatestRestorableTime'] = str(
use_latest_restorable_time).lower()
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if port is not None:
params['Port'] = port
if availability_zone is not None:
params['AvailabilityZone'] = availability_zone
if db_subnet_group_name is not None:
params['DBSubnetGroupName'] = db_subnet_group_name
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if publicly_accessible is not None:
params['PubliclyAccessible'] = str(
publicly_accessible).lower()
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
if license_model is not None:
params['LicenseModel'] = license_model
if db_name is not None:
params['DBName'] = db_name
if engine is not None:
params['Engine'] = engine
if iops is not None:
params['Iops'] = iops
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='RestoreDBInstanceToPointInTime',
verb='POST',
path='/', params=params)
def revoke_db_security_group_ingress(self, db_security_group_name,
cidrip=None,
ec2_security_group_name=None,
ec2_security_group_id=None,
ec2_security_group_owner_id=None):
"""
Revokes ingress from a DBSecurityGroup for previously
authorized IP ranges or EC2 or VPC Security Groups. Required
parameters for this API are one of CIDRIP, EC2SecurityGroupId
for VPC, or (EC2SecurityGroupOwnerId and either
EC2SecurityGroupName or EC2SecurityGroupId).
:type db_security_group_name: string
:param db_security_group_name: The name of the DB security group to
revoke ingress from.
:type cidrip: string
:param cidrip: The IP range to revoke access from. Must be a valid CIDR
range. If `CIDRIP` is specified, `EC2SecurityGroupName`,
`EC2SecurityGroupId` and `EC2SecurityGroupOwnerId` cannot be
provided.
:type ec2_security_group_name: string
:param ec2_security_group_name: The name of the EC2 security group to
revoke access from. For VPC DB security groups,
`EC2SecurityGroupId` must be provided. Otherwise,
EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or
`EC2SecurityGroupId` must be provided.
:type ec2_security_group_id: string
:param ec2_security_group_id: The id of the EC2 security group to
revoke access from. For VPC DB security groups,
`EC2SecurityGroupId` must be provided. Otherwise,
EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or
`EC2SecurityGroupId` must be provided.
:type ec2_security_group_owner_id: string
:param ec2_security_group_owner_id: The AWS Account Number of the owner
of the EC2 security group specified in the `EC2SecurityGroupName`
parameter. The AWS Access Key ID is not an acceptable value. For
VPC DB security groups, `EC2SecurityGroupId` must be provided.
Otherwise, EC2SecurityGroupOwnerId and either
`EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.
"""
params = {'DBSecurityGroupName': db_security_group_name, }
if cidrip is not None:
params['CIDRIP'] = cidrip
if ec2_security_group_name is not None:
params['EC2SecurityGroupName'] = ec2_security_group_name
if ec2_security_group_id is not None:
params['EC2SecurityGroupId'] = ec2_security_group_id
if ec2_security_group_owner_id is not None:
params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id
return self._make_request(
action='RevokeDBSecurityGroupIngress',
verb='POST',
path='/', params=params)
def _make_request(self, action, verb, path, params):
params['ContentType'] = 'JSON'
response = self.make_request(action=action, verb='POST',
path='/', params=params)
body = response.read()
boto.log.debug(body)
if response.status == 200:
return json.loads(body)
else:
json_body = json.loads(body)
fault_name = json_body.get('Error', {}).get('Code', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/eggs/boto-2.27.0-py2.7.egg/boto/rds2/layer1.py
|
Python
|
gpl-3.0
| 158,263
|
#!/usr/bin/env python3
# Copyright (C) 2012-2015 ASTRON (Netherlands Institute for Radio Astronomy)
# P.O. Box 2, 7990 AA Dwingeloo, The Netherlands
#
# This file is part of the LOFAR software suite.
# The LOFAR software suite is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The LOFAR software suite is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>.
from lofar.messagebus.message import MessageContent
class TaskFeedbackProcessing(MessageContent):
def __init__(self, from_, forUser, summary, momID, sasID, feedback):
super(TaskFeedbackProcessing, self).__init__(
from_,
forUser,
summary,
"task.feedback.processing",
"1.0.0",
momID,
sasID)
self.payload = str(feedback)
|
kernsuite-debian/lofar
|
LCS/MessageBus/src/Protocols/taskfeedbackprocessing.py
|
Python
|
gpl-3.0
| 1,244
|
###########################################################################
## PyBot ##
## Copyright (C) 2015, Kyle Repinski ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
###########################################################################
import __main__, requests
from pybotutils import fixHTMLChars, strbetween
info = { "names" : [ "pun", "joke" ], "access" : 0, "version" : 1 }
def command( message, user, recvfrom ):
pun = fixHTMLChars( strbetween( requests.get( "http://www.punoftheday.com/cgi-bin/randompun.pl" ).text, "<p>", "</p>" ) )
if pun == "":
pun = "No pun for you!"
__main__.sendMessage( pun, recvfrom )
return True
|
MWisBest/PyBot
|
Commands/pun/pun.py
|
Python
|
gpl-3.0
| 1,692
|
# coding=utf-8
from django.shortcuts import render,render_to_response,get_object_or_404
from django.http import HttpResponse,HttpResponseRedirect
from interfapp.interface_form import InterfaceForm,ProjectForm,CaseForm,OwnerForm
from interfapp.models import Project,Interfaces,Case
from django.views.decorators.csrf import csrf_exempt
from django.template import RequestContext
from django.core.paginator import Paginator,EmptyPage,PageNotAnInteger
from django import forms
from interfapp.models import Owner
import requests
import json
import copy
from django.db.models.deletion import ProtectedError
# Create your views here.
def projconf(request):
#分页处理
limit = 15
data = Project.objects.all()
paginator = Paginator(data,limit)
page = request.GET.get('page')
try:
data = paginator.page(page)
except PageNotAnInteger:
data = paginator.page(1)
except EmptyPage:
data = paginator.page(paginator.num_pages)
#取数
return render_to_response('projconf.html',{'data':data})
@csrf_exempt
def projadd(request):
error = []
if request.method == 'POST':
form = ProjectForm(request.POST)
if form.is_valid():
data = form.cleaned_data
projectName = data['projectName']
name = data['name']
data = Project(projectName = projectName,name=name)
data.save()
return HttpResponseRedirect('/interfapp/projconf/')
else:
return render_to_response("projadd.html", locals(), RequestContext(request))
else:
form = ProjectForm()
return render_to_response('projadd.html', {'form': form}, context_instance=RequestContext(request))
def projdel(request,id):
entry = get_object_or_404(Project,pk=int(id))
p = Project.objects.get(id=id)
if ( p.case_set.all().count()>0 ) :
return HttpResponse('有关联项请先删除子项')
else:
entry.delete()
return HttpResponseRedirect('/interfapp/projconf')
@csrf_exempt
def projupd(request,id):
if request.method=='POST':
projectName = request.POST['projectName']
name = request.POST['name']
owner = Owner.objects.get(name=name)
Project.objects.filter(id=int(id)).update(projectName=projectName,name=owner.id)
return HttpResponseRedirect('/interfapp/projconf/')
else:
data = Project.objects.filter(id=int(id))
names = Owner.objects.filter(role='开发')
return render(request,'projupd.html',locals())
def owqry(request):
limit = 15 # 每页显示的记录数
data = Owner.objects.all().order_by("id")
paginator = Paginator(data, limit) # 实例化一个分页对象
page = request.GET.get('page') # 获取页码
try:
data = paginator.page(page) # 获取某页对应的记录
except PageNotAnInteger: # 如果页码不是个整数
data = paginator.page(1) # 取第一页的记录
except EmptyPage: # 如果页码太大,没有相应的记录
data = paginator.page(paginator.num_pages) # 取最后一页的记录
return render_to_response('owcf.html',{'data':data})
@csrf_exempt
def owadd(request):
error = []
if request.method == 'POST':
form = OwnerForm(request.POST)
if form.is_valid():
data = form.cleaned_data
name = data['name']
um = data['um']
role = data['role']
data = Owner(name=name,um=um,role=role)
data.save()
return HttpResponseRedirect('/interfapp/owcf/')
else:
return render_to_response("owadd.html", locals(), RequestContext(request))
else:
form = OwnerForm()
return render_to_response('owadd.html', {'form': form}, context_instance=RequestContext(request))
@csrf_exempt
def owdel(request,id):
entry = get_object_or_404(Owner,pk=int(id))
p = Owner.objects.get(id=id)
if ( ( p.project_set.all().count()>0) or (p.case_set.all())>0 ) :
return HttpResponse('有关联项请先删除子项')
else:
print('无关联',entry)
entry.delete()
return HttpResponseRedirect('/interfapp/owcf/')
@csrf_exempt
def owupd(request,id):
if request.method=='POST':
name = request.POST['name1']
um = request.POST['um']
print (name)
Owner.objects.filter(id=int(id)).update(name=name,um=um)
return HttpResponseRedirect('/interfapp/owcf/')
else:
data = Owner.objects.filter(id=int(id))
return render(request,'owupd.html',{'data':data})
@csrf_exempt
def intfcf(request):
if request.method=='POST':
id = request.POST[u'id']
interfName = request.POST[u'interfName']
projectName = request.POST[u'projectName']
if id:
data = Interfaces.objects.filter(id=int(id)).order_by("interfName")
elif interfName :
data = Interfaces.objects.filter(interfName=interfName).order_by("interfName")
if interfName and projectName:
p = Project.objects.get(projectName=projectName)
data = Interfaces.objects.filter(interfName=interfName,projectName=p.id).order_by("interfName")
elif projectName :
p = Project.objects.get(projectName=projectName)
data = Interfaces.objects.filter(projectName=p.id).order_by("interfName")
else:
data = Interfaces.objects.all().order_by("projectName","interfName")
else:
data = Interfaces.objects.all().order_by("projectName","interfName")
limit = 10 # 每页显示的记录数
paginator = Paginator(data, limit) # 实例化一个分页对象
page = request.GET.get('page') # 获取页码
try:
data = paginator.page(page) # 获取某页对应的记录
except PageNotAnInteger: # 如果页码不是个整数
data = paginator.page(1) # 取第一页的记录
except EmptyPage: # 如果页码太大,没有相应的记录
data = paginator.page(paginator.num_pages) # 取最后一页的记录
return render_to_response('intfcf.html',{'data':data})
@csrf_exempt
def intfadd(request):
# error = []
if request.method == 'POST':
form = InterfaceForm(request.POST)
if form.is_valid():
data = form.cleaned_data
projectName = data['projectName']
interfName = data['interfName']
interfDns = data['interfDns']
interfPath = data['interfPath']
interfMethod = data['interfMethod']
interfParams = data['interfParams']
header = data['header']
data = Interfaces(projectName=projectName,interfName=interfName,interfDns=interfDns,header=header,
interfPath=interfPath,interfParams=interfParams,interfMethod=interfMethod)
data.save()
return HttpResponseRedirect('/interfapp/intfcf/')
else:
return render_to_response("intfadd.html", locals(), RequestContext(request))
else:
form = InterfaceForm()
return render_to_response('intfadd.html', {'form': form}, context_instance=RequestContext(request))
@csrf_exempt
def intfrun(request,id):
inid = Case.objects.get(id=id)
data = Interfaces.objects.get(id=inid.interfName.id)
if request.method=="GET":
reqobj = Case.objects.filter(id=id)
return render(request,'intfrun.html',{'data':reqobj})
if request.method=="POST":
result = request.POST['vresult']
Interfaces.objects.filter(id=inid.interfName.id).update(result=result)
return HttpResponseRedirect('/interfapp/casecf')
@csrf_exempt
def intfupd(request,id):
if request.method=='POST':
interfName = request.POST['interfName']
interfParams = request.POST['interfParams']
header=request.POST['header']
Interfaces.objects.filter(id=int(id)).update(interfName=interfName,interfParams=interfParams,header=header)
return HttpResponseRedirect('/interfapp/intfcf')
else:
data = Interfaces.objects.filter(id=int(id))
return render(request,'intfupd.html',{'data':data})
@csrf_exempt
# 复制接口同时打开编辑页面
def intfclo(request,id):
if request.method=='POST':
interfName = request.POST['interfName']
interfPath = request.POST['interfPath']
interfParams = request.POST['interfParams']
t= copy.deepcopy(Interfaces.objects.get(pk=int(id)))
t.interfName=interfName
t.interfPath=interfPath
t.interfParams=interfParams
t.pk=None
t.save()
return HttpResponseRedirect('/interfapp/intfcf')
else:
data = Interfaces.objects.filter(id=int(id))
return render(request,'intfupd.html',{'data':data})
# 发送请求
@csrf_exempt
def sendreq(request):
if request.method=='GET':
url = request.GET['url']
responses = requests.get(url)
return HttpResponse(responses.text)
else:
id = request.POST['id']
datas = Interfaces.objects.filter(id=int(id))
for i in datas:
url1=i.interfDns
url2=i.interfPath
data=i.interfParams
headers1=i.header
url=(url1+'/'+url2).encode('utf-8')
# 添加header
# headers1 = {'Content-Type':'application/x-www-form-urlencoded'}
data1=eval(data) #将unicode类型转换为字典类型
header=eval(headers1)
if header["Content-Type"]=="application/json":
data1=json.dumps(data1)
responses = requests.post(url,headers=header,data=data1)
return HttpResponse(responses)
def intfdel(request,id):
x = get_object_or_404(Interfaces,pk=int(id))
p = Interfaces.objects.get(id=id)
if ( p.case_set.all().count()>0) :
return HttpResponse('有关联项请先删除子项')
else:
print('无关联',p)
p.delete()
return HttpResponseRedirect('/interfapp/intfcf/')
@csrf_exempt
def casecf(request):
if request.method=='POST':
id = request.POST[u'id']
interfName = request.POST[u'interfName']
projectName = request.POST[u'projectName']
if id:
data = Case.objects.filter(id=int(id)).order_by("interfName")
elif interfName :
intf = Interfaces.objects.get(interfName=interfName)
data = Case.objects.filter(interfName=intf.id).order_by("interfName")
if interfName and projectName:
p = Project.objects.get(projectName=projectName)
data = Case.objects.filter(interfName=intf.id,projectName=p.id).order_by("interfName")
elif projectName :
p = Project.objects.get(projectName=projectName)
data = Case.objects.filter(projectName=p.id).order_by("interfName")
else:
data = Case.objects.all().order_by("interfName")
else:
data = Case.objects.all().order_by("interfName","projectName")
limit = 10 # 每页显示的记录数
paginator = Paginator(data, limit) # 实例化一个分页对象
page = request.GET.get('page') # 获取页码
try:
data = paginator.page(page) # 获取某页对应的记录
except PageNotAnInteger: # 如果页码不是个整数
data = paginator.page(1) # 取第一页的记录
except EmptyPage: # 如果页码太大,没有相应的记录
data = paginator.page(paginator.num_pages) # 取最后一页的记录
return render_to_response('casecf.html',locals())
@csrf_exempt
def caseadd(request):
if request.method == 'POST':
form = CaseForm(request.POST)
if form.is_valid():
data = form.cleaned_data
summary = data['summary']
details = data['details']
name = data['name']
projectName = data['projectName']
checkPoint = data['checkPoint']
interfName=data['interfName']
data = Case(summary=summary,details=details,projectName=projectName,name=name,checkPoint=checkPoint,interfName=interfName)
data.save()
return HttpResponseRedirect('/interfapp/casecf/')
else:
return render_to_response("caseadd.html", locals(), RequestContext(request))
else:
form = CaseForm()
return render_to_response('caseadd.html', {'form': form}, context_instance=RequestContext(request))
def casedel(request,id):
entry = get_object_or_404(Case,pk=int(id))
entry.delete()
return HttpResponseRedirect('/interfapp/casecf/')
def caseupd(request,id):
if request.method=='POST':
# interfParams = request.POST['interfParams']
checkPoint = request.POST['checkPoint']
summary = request.POST['summary']
t =Case.objects.get(id=int(id))
print(t.interfName.id)
# Interfaces.objects.filter(id=t.interfName.id).update(interfParams=interfParams)
Case.objects.filter(id=int(id)).update(checkPoint=checkPoint,summary=summary)
return HttpResponseRedirect('/interfapp/casecf')
else:
data = Case.objects.filter(id=int(id))
return render(request,'caseupd.html',{'data':data})
@csrf_exempt
def caseclo(request,id):
if request.method=='POST':
interfParams = request.POST['interfParams']
checkPoint = request.POST['checkPoint']
summary = request.POST['summary']
interfName = request.POST['interfName']
# Case.objects.filter(id=int(id)).update(checkPoint=checkPoint,summary=summary)
# Interfaces.objects.filter(interfName=interfName).update(interfParams=interfParams)
s1=Case.objects.get(id=int(id))
s =copy.deepcopy(s1)
t = copy.deepcopy(Interfaces.objects.get(id=s1.interfName.id))
print('修改前:',t.id)
t.interfParams=interfParams
t.pk = None
t.save()
print('修改后:',t.id)
s.summary=summary
s.checkPoint=checkPoint
s.interfName_id = t.id
s.pk=None
s.save()
return HttpResponseRedirect('/interfapp/casecf')
else:
data = Case.objects.filter(id=int(id))
return render(request,'caseupd.html',{'data':data})
|
EmerJuny/djcasemg
|
interfmg/interfapp/views.py
|
Python
|
gpl-3.0
| 14,221
|
# prosaicweb
# Copyright (C) 2016 nathaniel smith
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
from functools import lru_cache
from flask_login import UserMixin
from prosaic.models import Base, Source, Corpus, Phrase, corpora_sources, Database
from sqlalchemy import create_engine, Column, Boolean, ForeignKey, Table
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy.engine import Engine
from sqlalchemy.dialects.postgresql import ARRAY, TEXT, INTEGER, JSON
from sqlalchemy.ext.declarative import declarative_base
from werkzeug.security import generate_password_hash, check_password_hash
from .cfg import DB
def get_engine(db: Database) -> Engine:
return create_engine('postgresql://{user}:{password}@{host}:{port}/{dbname}'\
.format(**db))
Session = sessionmaker()
engine = get_engine(DB)
Session.configure(bind=engine)
def get_session(db: Database):
Session.configure(bind=get_engine(db))
return Session()
users_sources = Table('users_sources', Base.metadata,
Column('user_id', INTEGER, ForeignKey('users.id')),
Column('source_id', INTEGER, ForeignKey('sources.id')))
users_corpora = Table('users_corpora', Base.metadata,
Column('user_id', INTEGER, ForeignKey('users.id')),
Column('corpus_id', INTEGER, ForeignKey('corpora.id')))
users_templates = Table('users_templates', Base.metadata,
Column('user_id', INTEGER, ForeignKey('users.id')),
Column('template_id', INTEGER, ForeignKey('templates.id')))
class Template(Base):
__tablename__ = 'templates'
id = Column(INTEGER, primary_key=True)
name = Column(TEXT, nullable=False)
lines = Column(JSON, nullable=False)
@property
def json(self) -> str:
return json.dumps(self.lines)
@property
def pretty(self) -> str:
# TODO wtf
output = ''
for line in self.lines:
output += json.dumps(line) + '\n'
return output
def __repr__(self) -> str:
return "Template<'{}'>".format(self.lines)
class User(Base, UserMixin):
__tablename__ = 'users'
id = Column(INTEGER, primary_key=True)
pwhash = Column(TEXT, nullable=False)
email = Column(TEXT, nullable=False, unique=True)
# TODO check on table encoding, make sure it's utf-8
sources = relationship('Source', secondary=users_sources)
corpora = relationship('Corpus', secondary=users_corpora)
templates = relationship('Template', secondary=users_templates)
def get_id(self) -> str:
return self.email
def __repr__(self) -> str:
return "User(email='{}', pwhash='{}')".format(
self.email, self.pwhash
)
|
nathanielksmith/prosaicweb
|
prosaicweb/models.py
|
Python
|
agpl-3.0
| 3,373
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2016 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" This module defines all necessary config for the Exchange integration. """
from Cerebrum.config.configuration import (ConfigDescriptor,
Configuration,
Namespace)
from Cerebrum.config.loader import read, read_config
from Cerebrum.config.settings import (Boolean,
Integer,
Iterable,
String)
class ExchangeClientConfig(Configuration):
u"""Configuration for the Exchange client."""
mock = ConfigDescriptor(
Boolean,
default=False,
doc=u"Use mock client")
auth_user = ConfigDescriptor(
String,
default=u"cereauth",
doc=u"User to authenticate with WinRM")
auth_user_domain = ConfigDescriptor(
String,
default=u".",
doc=u"Domain the authentication user resides in")
exchange_admin = ConfigDescriptor(
String,
default=u"exchange_admin",
doc=u"The user authorized to call Exchange-related commands")
exchange_admin_domain = ConfigDescriptor(
String,
default=u".",
doc=u"Domain the exchange admin user resides in")
domain_reader = ConfigDescriptor(
String,
default=u"cerebrum_exchange_reader",
doc=u"Account that can read from AD")
domain_reader_domain = ConfigDescriptor(
String,
default=u".",
doc=u"Domain the account reading from AD resides in")
management_host = ConfigDescriptor(
String,
default=None,
doc=u"The hostname which management operations can be run on")
secondary_management_host = ConfigDescriptor(
String,
default=None,
doc=u"The hostname which Connect-ExchangeServer connects to")
jumphost = ConfigDescriptor(
String,
default=None,
doc=u"The WinRM jumphost used for connecting to the management server")
jumphost_port = ConfigDescriptor(
Integer,
default=5986,
doc=u"Port to connect to")
exchange_commands = ConfigDescriptor(
Iterable,
default=[],
doc=u"A map of specialized commands")
ca = ConfigDescriptor(
String,
default=None,
doc=u"Certificate authority of the jumphost")
client_key = ConfigDescriptor(
String,
default=None,
doc=u"Path to clients private certificate")
client_cert = ConfigDescriptor(
String,
default=None,
doc=u"Path to clients cert file")
hostname_verification = ConfigDescriptor(
Boolean,
default=True,
doc=u"Check for hostname match in certificate")
enabled_encryption = ConfigDescriptor(
Boolean,
default=True,
doc=u"Communicate via TLS")
mailbox_path = ConfigDescriptor(
String,
default=None,
doc=u"Path for mailbox placements")
group_ou = ConfigDescriptor(
String,
default=None,
doc=u"OU groups should reside in")
class ExchangeSelectionCriteria(Configuration):
mailbox_spread = ConfigDescriptor(
String,
default=u"exchange_account",
doc=u"The spread to select target accounts to Exchange by")
group_spread = ConfigDescriptor(
String,
default=u"exchange_group",
doc=u"The spread to select target groups for Exchange by")
shared_mbox_spread = ConfigDescriptor(
String,
default=u"exch_shared_mbox",
doc=u"The spread to select target shared mailboxes for Exchange by")
ad_spread = ConfigDescriptor(
String,
default=u"AD_account",
doc=u"Filter criteria for accounts")
group_name_translations = ConfigDescriptor(
Iterable,
default=[],
doc=u"Map of group name translations")
randzone_publishment_group = ConfigDescriptor(
String,
default=None,
doc=u"Mempership in this group denotes publishment "
"in address book for randzone users")
class ExchangeEventCollectorConfig(Configuration):
u"""Configuration for the Exchange event collector."""
run_interval = ConfigDescriptor(
Integer,
minval=1,
default=180,
doc=u'How often (in seconds) we run notification')
failed_limit = ConfigDescriptor(
Integer,
minval=1,
default=10,
doc=u'How many times we retry an event')
failed_delay = ConfigDescriptor(
Integer,
minval=1,
default=20*60,
doc=(u'How long (seconds) should we wait before processesing the '
'event again'))
unpropagated_delay = ConfigDescriptor(
Integer,
minval=1,
default=90*60,
doc=(u'How old (seconds) should an event not registered as '
'processesed be before we enqueue it'))
class ExchangeHandlerConfig(Configuration):
u"""Configuration for the event handler."""
handler_class = ConfigDescriptor(
String,
default=u"ExchangeEventHandler",
doc=u"Handler class used for processing events")
handler_mod = ConfigDescriptor(
String,
default=u"Cerebrum.modules.no.uio.exchange.consumer",
doc=u"Handler module used for processing events")
class DeferredExchangeHandlerConfig(Configuration):
u"""Configuration for the defered event handler."""
handler_class = ConfigDescriptor(
String,
default=None,
doc=u"Deferred handler class used for processing events")
handler_mod = ConfigDescriptor(
String,
default=None,
doc=u"Deferred handler module used for processing events")
class ExchangeConfig(Configuration):
u"""Configuration for the Exchange integration."""
client = ConfigDescriptor(
Namespace,
config=ExchangeClientConfig)
selection_criteria = ConfigDescriptor(
Namespace,
config=ExchangeSelectionCriteria)
eventcollector = ConfigDescriptor(
Namespace,
config=ExchangeEventCollectorConfig)
handler = ConfigDescriptor(
Namespace,
config=ExchangeHandlerConfig)
deferred_handler = ConfigDescriptor(
Namespace,
config=DeferredExchangeHandlerConfig)
def load_config(filepath=None):
config_cls = ExchangeConfig()
if filepath:
config_cls.load_dict(read_config(filepath))
else:
read(config_cls, 'exchange')
config_cls.validate()
return config_cls
|
unioslo/cerebrum
|
Cerebrum/modules/exchange/config.py
|
Python
|
gpl-2.0
| 7,395
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.