repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
openelections/openelections-core | openelex/us/wi/datasource.py | 1 | 3806 | """
Wisconsin has Excel files containing precinct(ward)-level results for elections back to 2002.
All of the files are pre-processed into CSV and available on GitHub at
https://github.com/openelections/openelections-data-wi.
There is one file per election, including national and state offices.
The CSV files are named according to whether they're general/primary, and whether they're special.
"""
from future import standard_library
standard_library.install_aliases()
from os.path import join
import json
import datetime
import urllib.parse
from openelex import PROJECT_ROOT
from openelex.base.datasource import BaseDatasource
from openelex.lib import build_github_url, build_raw_github_url
class Datasource(BaseDatasource):
# PUBLIC INTERFACE
def mappings(self, year=None):
"""
Return array of dicts containing source url and
standardized filename for raw results file, along
with other pieces of metadata
"""
mappings = []
for yr, elecs in list(self.elections(year).items()):
mappings.extend(self._build_metadata(yr, elecs))
return mappings
def target_urls(self, year=None):
"Get list of source data urls, optionally filtered by year"
return [item['raw_url'] for item in self.mappings(year)]
def filename_url_pairs(self, year=None):
return [(item['generated_filename'], self._url_for_fetch(item))
for item in self.mappings(year)]
def _url_for_fetch(self, item):
try:
return item['pre_processed_url']
except KeyError:
return item['raw_url']
def mappings_for_url(self, url):
return [mapping for mapping in self.mappings() if mapping['raw_url'] == url]
# PRIVATE METHODS
def _build_metadata(self, year, elections):
meta = []
year_int = int(year)
for election in elections:
try:
raw_url = election['direct_links'][0] # In reality, the election may have multiple source files, but we shouldn't be using the raw_url for anything
except IndexError:
raw_url = election['direct_link']
generated_filename = self._generate_filename(election)
ocd_id = 'ocd-division/country:us/state:wi'
name = "Wisconsin"
meta.append({
"generated_filename": generated_filename,
"raw_url": raw_url,
"pre_processed_url": build_raw_github_url(self.state, election['start_date'][:4], generated_filename),
"ocd_id": ocd_id,
"name": name,
"election": election['slug']
})
return meta
def _generate_filename(self, election):
if election['special']:
election_type = 'special__' + election['race_type']
else:
election_type = election['race_type']
bits = [
election['start_date'].replace('-',''),
self.state.lower(),
election_type,
'ward'
]
return "__".join(bits) + '.csv'
def _jurisdictions(self):
"""Wisconsin counties"""
m = self.jurisdiction_mappings()
mappings = [x for x in m if x['county'] != ""]
return mappings
def _ocd_id_for_county_map(self):
try:
return self.ocd_id_for_county_map
except AttributeError:
m = self.jurisdiction_mappings()
self.ocd_id_for_county_map = {j['county'].upper().strip(): j['ocd_id'] for j in m if j['county'] != ""}
return self.ocd_id_for_county_map
def _url_for_fetch(self, mapping):
if mapping['pre_processed_url']:
return mapping['pre_processed_url']
else:
return mapping['raw_url']
| mit | 884c6dafcbc2047385acfb970033862b | 34.90566 | 163 | 0.604572 | 3.895599 | false | false | false | false |
openelections/openelections-core | openelex/us/in/load.py | 1 | 4428 | """Converts csv file data into RawResults for Indiana election results.
Indiana elections have pre-processed CSV results files for primary and
general elections beginning in 2002. These files contain county-level
and, where available, precinct-level elections data for each of the
state's counties. The CSV versions of those are contained in the
https://github.com/openelections/openelections-data-in repository.
"""
from __future__ import print_function
from builtins import object
import unicodecsv
from openelex.base.load import BaseLoader
from openelex.models import RawResult
from openelex.lib.text import ocd_type_id
from .datasource import Datasource
class LoadResults(object):
"""Entry point for data loading.
Determines appropriate loader for file and triggers load process.
"""
def run(self, mapping):
election_id = mapping['pre_processed_url']
if 'precinct' in election_id and 'special' not in election_id:
loader = INPrecinctLoader()
else:
raise RuntimeError(
'Cannot process election mapping {}'.format(mapping))
loader.run(mapping)
class INPrecinctLoader(BaseLoader):
"""Loads Indiana precinct-level results for the 2014 general election.
Indiana has PDF files that have been converted to CSV files for
precinct-level election data from 2012-2016.
"""
datasource = Datasource()
def load(self):
self._common_kwargs = self._build_common_election_kwargs()
self._common_kwargs['reporting_level'] = 'precinct'
# Store result instances for bulk loading
results = []
num_skipped = 0
with self._file_handle as csvfile:
reader = unicodecsv.DictReader(csvfile)
for row in reader:
if self._skip_row(row):
num_skipped += 1
continue
rr_kwargs = self._common_kwargs.copy()
rr_kwargs.update(self._build_contest_kwargs(row))
rr_kwargs.update(self._build_candidate_kwargs(row))
# The 'votes' column gets screwed up a lot, so handle it
# by additionally printing debug information.
try:
rr_kwargs.update({'votes': int(row['votes'])})
except ValueError as e:
print('Bad votes in row {}'.format(row))
raise e
county = row['county'].strip()
county_ocd_id = self._get_county_ocd_id(county)
precinct = row['precinct'].strip()
if precinct:
precinct_ocd_id = "{}/precinct:{}".format(
county_ocd_id, ocd_type_id(precinct)),
rr_kwargs.update({
'ocd_id': precinct_ocd_id,
'jurisdiction': precinct,
'parent_jurisdiction': county,
})
else:
rr_kwargs.update({
'ocd_id': county_ocd_id,
'jurisdiction': county,
'parent_jurisdiction': 'ocd-division/country:us/state:in',
})
results.append(RawResult(**rr_kwargs))
print('\tInserting {} results (skipped {} rows)'.format(len(results),
num_skipped))
RawResult.objects.insert(results)
def _skip_row(self, row):
if not row['county'].strip(): # Some extraneous data
return True
elif row['votes'].strip() == '': # Unreported data
return True
return False
def _build_contest_kwargs(self, row):
return {
'office': row['office'].strip(),
'district': row['district'].strip() or None,
}
def _build_candidate_kwargs(self, row):
return {
'full_name': row['candidate'].strip(),
'party': row['party'].strip(),
}
def _get_county_ocd_id(self, county):
for j in self.datasource.jurisdiction_mappings():
if j['county'].upper() == county.upper():
return j['ocd_id']
counties = [j['county']
for j in self.datasource.jurisdiction_mappings()]
raise RuntimeError('Did not find county ocd id for {} in {}'.format(
county, sorted(counties)))
| mit | 92f4cd56352e7c7389bbd83acf847b85 | 36.525424 | 80 | 0.565944 | 4.225191 | false | false | false | false |
viblo/pymunk | pymunk/examples/threaded_space.py | 1 | 1652 | import time
import pymunk
from pymunk.vec2d import Vec2d
class PyramidDemo:
def __init__(self, threads=1):
### Init pymunk and create space
if threads == 0:
self.space = pymunk.Space(threaded=False)
else:
self.space = pymunk.Space(threaded=True)
self.space.gravity = (0.0, -900.0)
self.space.threads = threads
### ground
shape = pymunk.Segment(self.space.static_body, (5, 100), (595, 100), 1.0)
shape.friction = 1.0
self.space.add(shape)
### pyramid
x = Vec2d(-270, 7.5) + (300, 100)
y = Vec2d(0, 0)
deltaX = Vec2d(0.5625, 1.1) * 20
deltaY = Vec2d(1.125, 0.0) * 20
for i in range(25):
y = Vec2d(*x)
for j in range(i, 25):
size = 10
points = [(-size, -size), (-size, size), (size, size), (size, -size)]
mass = 1.0
moment = pymunk.moment_for_poly(mass, points, (0, 0))
body = pymunk.Body(mass, moment)
body.position = y
shape = pymunk.Poly(body, points)
shape.friction = 1
self.space.add(body, shape)
y += deltaY
x += deltaX
def step(self, n=1):
for x in range(n):
dt = 1.0 / 150.0
self.space.step(dt)
if __name__ == "__main__":
for num_threads in [0, 1, 2]:
demo = PyramidDemo(threads=num_threads)
start = time.time()
demo.step(10000)
end = time.time()
print("Threads {}, time {}".format(num_threads, end - start))
| mit | 41b49c144a645abe8db0f0001f55f4e9 | 26.533333 | 85 | 0.487893 | 3.357724 | false | false | false | false |
openelections/openelections-core | openelex/tasks/publish.py | 1 | 1472 | from __future__ import print_function
from blinker import signal
import click
from openelex.base.publish import GitHubPublisher
from .utils import default_state_options
def log_publish_started(sender, **kwargs):
filename = kwargs.get('filename')
print("Publishing {}".format(filename))
def log_publish_finished(sender, **kwargs):
filename = kwargs.get('filename')
print("Finished publishing {}".format(filename))
@click.command(help="Publish baked result files")
@default_state_options
@click.option('--raw', is_flag=True,
help="Publish raw result filess. Default is to publish cleaned/"
"standardized result files")
def publish(state, datefilter=None, raw=False):
"""
Publish baked result files
Args:
state: Two-letter state-abbreviation, e.g. NY
datefilter: Portion of a YYYYMMDD date, e.g. YYYY, YYYYMM, etc.
Result files will only be published if the date portion of the
filename matches the date string. Default is to publish all result
files for the specified state.
raw: Publish raw result files. Default is to publish
cleaned/standardized result files.
"""
pre_publish = signal('pre_publish')
pre_publish.connect(log_publish_started)
post_publish = signal('post_publish')
post_publish.connect(log_publish_finished)
publisher = GitHubPublisher()
publisher.publish(state, datefilter=datefilter, raw=raw)
| mit | cb5912e4a052b158d7e5af3492a5d012 | 34.902439 | 78 | 0.694293 | 4.043956 | false | false | false | false |
openelections/openelections-core | openelex/base/fetch.py | 1 | 3453 | from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from os.path import exists, join
import urllib.request, urllib.parse, urllib.error
import urllib.parse
from .state import StateBase
class HTTPError(Exception):
def __init__(self, code, reason):
self.code = code
self.reason = reason
def __str__(self):
return "{}: {}".format(self.code, self.reason)
class ErrorHandlingURLopener(urllib.request.FancyURLopener):
def http_error_default(self, url, fp, errcode, errmsg, headers):
"""
Custom subclass of urllib.FancyURLopener that handles HTTP errors.
See https://docs.python.org/2/library/urllib.html#urllib.FancyURLopener
"""
if errcode == 404:
raise HTTPError(errcode, errmsg)
return urllib.request.FancyURLopener.http_error_default(self, url, fp, errcode,
errmsg, headers)
class BaseFetcher(StateBase):
"""
Base class for interacting with source data.
Primary use is fetching data files from source and standardizing names of files,
which are then cached on S3 by their standardized name and used downstream to load
results into data store.
Intended to be subclassed in state-specific fetch.py modules.
"""
def fetch(self, url, fname=None, overwrite=False):
"""Fetch and cache web page or data file
Args:
url: link to download
fname: file name for local storage in cache directory
overwrite: if True, overwrite cached copy with fresh download
"""
headers = None
local_file_name = self._standardized_filename(url, fname)
retriever = ErrorHandlingURLopener()
try:
if overwrite:
dl_name, headers = retriever.retrieve(url, local_file_name)
else:
if exists(local_file_name):
print("File is cached: {}".format(local_file_name))
else:
dl_name, headers = retriever.retrieve(url, local_file_name)
print("Added to cache: {}".format(local_file_name))
except HTTPError:
print("Error downloading {}".format(url))
assert not exists(local_file_name)
def _standardized_filename(self, url, fname):
"""A standardized, fully qualified path name"""
#TODO:apply filename standardization logic
# non-result pages/files use default urllib name conventions
# result files need standardization logic (TBD)
if fname:
filename = join(self.cache.abspath, fname)
else:
filename = self._filename_from_url(url)
return filename
def _filename_from_url(self, url):
#TODO: this is quick and dirty
# see urlretrieve code for more robust conversion of
# url to local filepath
result = urllib.parse.urlsplit(url)
bits = [
self.cache.abspath,
result.netloc + '_' +
result.path.strip('/'),
]
name = join(*bits)
return name
def _remove_local_file(self, local_file_name):
"""
Remove a downlaoded file.
This is mostly useful for removing a file when the request results in
an HTTP error.
Args:
local_file_name: Absolute path to the downloaded file.
"""
# BOOKMARK
| mit | d3e04d74154f3c3bc2b4c8bbee3f98f3 | 32.524272 | 87 | 0.617434 | 4.387548 | false | false | false | false |
openelections/openelections-core | openelex/us/wy/fetch.py | 1 | 2629 | from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
import os
import os.path
import urllib.request, urllib.parse, urllib.error
import urllib.parse
from zipfile import ZipFile
from bs4 import BeautifulSoup
import requests
from openelex.base.fetch import BaseFetcher
from openelex.us.wy.datasource import Datasource
class FetchResults(BaseFetcher):
def __init__(self):
super(FetchResults, self).__init__()
self._datasource = Datasource()
self._fetched = set()
def fetch(self, url, fname=None, overwrite=False):
# We keep track of URLs we've already fetched in this run since
# there will be multiple output files mapped to a single zip
# file. If we've already fetched this URL, exit early.
if url in self._fetched:
return
if url.endswith('.zip'):
# Fetch the zip file, using the automatically generated filename
zip_fname = self._local_zip_file_name(url)
super(FetchResults, self).fetch(url, zip_fname, overwrite)
self._extract_zip(url, zip_fname, overwrite)
else:
super(FetchResults, self).fetch(url, fname, overwrite)
self._fetched.add(url)
def _local_zip_file_name(self, url):
"""
Return a normalized local file name for a results zip file.
We don't care too much about the format because we can delete the
zip file later.
"""
parsed = urllib.parse.urlsplit(url)
fname = parsed.path.split('/')[-1]
return os.path.join(self.cache.abspath, fname)
def _extract_zip(self, url, zip_fname=None, overwrite=False, remove=True):
if zip_fname is None:
zip_fname = self._local_zip_file_name(url)
with ZipFile(zip_fname, 'r') as zipf:
for mapping in self._datasource.mappings_for_url(url):
local_file_name = os.path.join(self.cache.abspath,
mapping['generated_filename'])
if overwrite or not os.path.exists(local_file_name):
zipf.extract(mapping['raw_extracted_filename'],
self.cache.abspath)
extracted_file_name = os.path.join(self.cache.abspath,
mapping['raw_extracted_filename'])
os.rename(extracted_file_name, local_file_name)
print("Added to cache: %s" % local_file_name)
else:
print("File is cached: %s" % local_file_name)
if remove:
os.remove(zip_fname)
| mit | 3b32e264bb1f25d4d49a368463df2981 | 37.101449 | 78 | 0.611639 | 4.107813 | false | false | false | false |
viblo/pymunk | pymunk/examples/basic_test.py | 2 | 1447 | """Very simple example that does not depend on any third party library such
as pygame or pyglet like the other examples.
"""
import random
import sys
import pymunk
import pymunk.util
from pymunk import Vec2d
def main():
print(f"basic example of pymunk {pymunk.version}")
space = pymunk.Space()
space.gravity = (0.0, -900.0)
## Balls
balls = []
ticks_to_next_ball = 10
for x in range(5000):
ticks_to_next_ball -= 1
if ticks_to_next_ball <= 0:
ticks_to_next_ball = 10000
mass = 10
radius = 25
inertia = pymunk.moment_for_circle(mass, 0, radius, (0, 0))
body = pymunk.Body(mass, inertia)
x = random.randint(115, 350)
body.position = x, 400
shape = pymunk.Circle(body, radius, Vec2d(0, 0))
space.add(body, shape)
balls.append(shape)
balls_to_remove = []
for ball in balls:
if ball.body.position.y < 0:
balls_to_remove.append(ball)
for ball in balls_to_remove:
space.remove(ball, ball.body)
balls.remove(ball)
if len(balls) >= 1:
v = balls[0].body.position
print("(in on_draw): point = %.2f, %.2f" % (v.x, v.y))
### Update physics
for x in range(1):
space.step(1 / 50.0)
print("done!")
if __name__ == "__main__":
sys.exit(main())
| mit | b8e4c1ff5808a0689f783f92377f8e12 | 24.839286 | 76 | 0.538355 | 3.281179 | false | false | false | false |
benoitc/restkit | tests/_server_test.py | 2 | 14960 | # -*- coding: utf-8 -
#
# Copyright (c) 2008 (c) Benoit Chesneau <benoitc@e-engura.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import base64
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import cgi
import os
import socket
import tempfile
import threading
import unittest
import urlparse
import Cookie
try:
from urlparse import parse_qsl, parse_qs
except ImportError:
from cgi import parse_qsl, parse_qs
import urllib
from restkit.util import to_bytestring
HOST = 'localhost'
PORT = (os.getpid() % 31000) + 1024
class HTTPTestHandler(BaseHTTPRequestHandler):
def __init__(self, request, client_address, server):
self.auth = 'Basic ' + base64.encodestring('test:test')[:-1]
self.count = 0
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
def do_GET(self):
self.parsed_uri = urlparse.urlparse(urllib.unquote(self.path))
self.query = {}
for k, v in parse_qsl(self.parsed_uri[4]):
self.query[k] = v.decode('utf-8')
path = self.parsed_uri[2]
if path == "/":
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, "welcome")
elif path == "/unicode":
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, u"éàù@")
elif path == "/json":
content_type = self.headers.get('content-type', 'text/plain')
if content_type != "application/json":
self.error_Response("bad type")
else:
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, "ok")
elif path == "/éàù":
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, "ok")
elif path == "/test":
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, "ok")
elif path == "/query":
test = self.query.get("test", False)
if test and test == "testing":
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, "ok")
else:
self.error_Response()
elif path == "/qint":
test = self.query.get("test", False)
if test and test == "1":
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, "ok")
else:
self.error_Response()
elif path == "/auth":
extra_headers = [('Content-type', 'text/plain')]
if not 'Authorization' in self.headers:
realm = "test"
extra_headers.append(('WWW-Authenticate', 'Basic realm="%s"' % realm))
self._respond(401, extra_headers, "")
else:
auth = self.headers['Authorization'][len('Basic')+1:]
auth = base64.b64decode(auth).split(':')
if auth[0] == "test" and auth[1] == "test":
self._respond(200, extra_headers, "ok")
else:
self._respond(403, extra_headers, "niet!")
elif path == "/redirect":
extra_headers = [('Content-type', 'text/plain'),
('Location', '/complete_redirect')]
self._respond(301, extra_headers, "")
elif path == "/complete_redirect":
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, "ok")
elif path == "/redirect_to_url":
extra_headers = [('Content-type', 'text/plain'),
('Location', 'http://localhost:%s/complete_redirect' % PORT)]
self._respond(301, extra_headers, "")
elif path == "/pool":
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, "ok")
elif path == "/cookie":
c = Cookie.SimpleCookie()
c["fig"] = "newton"
c['fig']['path'] = "/"
for k in c.keys():
extra_headers = [('Set-Cookie', str(c[k].output(header='')))]
self._respond(200, extra_headers, "ok")
elif path == "/cookies":
c = Cookie.SimpleCookie()
c["fig"] = "newton"
c['fig']['path'] = "/"
c["sugar"] = "wafer"
c['sugar']['path'] = "/"
extra_headers = []
for k in c.keys():
extra_headers.append(('Set-Cookie', str(c[k].output(header=''))))
self._respond(200, extra_headers, "ok")
else:
self._respond(404,
[('Content-type', 'text/plain')], "Not Found" )
def do_POST(self):
self.parsed_uri = urlparse.urlparse(self.path)
self.query = {}
for k, v in parse_qsl(self.parsed_uri[4]):
self.query[k] = v.decode('utf-8')
path = self.parsed_uri[2]
extra_headers = []
if path == "/":
content_type = self.headers.get('content-type', 'text/plain')
extra_headers.append(('Content-type', content_type))
content_length = int(self.headers.get('Content-length', '-1'))
body = self.rfile.read(content_length)
self._respond(200, extra_headers, body)
elif path == "/bytestring":
content_type = self.headers.get('content-type', 'text/plain')
extra_headers.append(('Content-type', content_type))
content_length = int(self.headers.get('Content-length', '-1'))
body = self.rfile.read(content_length)
self._respond(200, extra_headers, body)
elif path == "/unicode":
content_type = self.headers.get('content-type', 'text/plain')
extra_headers.append(('Content-type', content_type))
content_length = int(self.headers.get('Content-length', '-1'))
body = self.rfile.read(content_length)
self._respond(200, extra_headers, body)
elif path == "/json":
content_type = self.headers.get('content-type', 'text/plain')
if content_type != "application/json":
self.error_Response("bad type: %s" % content_type)
else:
extra_headers.append(('Content-type', content_type))
content_length = int(self.headers.get('Content-length', 0))
body = self.rfile.read(content_length)
self._respond(200, extra_headers, body)
elif path == "/empty":
content_type = self.headers.get('content-type', 'text/plain')
extra_headers.append(('Content-type', content_type))
content_length = int(self.headers.get('Content-length', 0))
body = self.rfile.read(content_length)
if body == "":
self._respond(200, extra_headers, "ok")
else:
self.error_Response()
elif path == "/query":
test = self.query.get("test", False)
if test and test == "testing":
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, "ok")
else:
self.error_Response()
elif path == "/form":
content_type = self.headers.get('content-type', 'text/plain')
extra_headers.append(('Content-type', content_type))
content_length = int(self.headers.get('Content-length', 0))
body = self.rfile.read(content_length)
form = parse_qs(body)
if form['a'] == ["a"] and form["b"] == ["b"]:
self._respond(200, extra_headers, "ok")
else:
self.error_Response()
elif path == "/multivalueform":
content_type = self.headers.get('content-type', 'text/plain')
extra_headers.append(('Content-type', content_type))
content_length = int(self.headers.get('Content-length', 0))
body = self.rfile.read(content_length)
form = parse_qs(body)
if form['a'] == ["a", "c"] and form["b"] == ["b"]:
self._respond(200, extra_headers, "ok")
else:
self.error_Response()
elif path == "/multipart":
ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))
content_length = int(self.headers.get('Content-length', 0))
if ctype == 'multipart/form-data':
req = cgi.parse_multipart(self.rfile, pdict)
body = req['t'][0]
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, body)
else:
self.error_Response()
elif path == "/multipart2":
ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))
content_length = int(self.headers.get('Content-length', 0))
if ctype == 'multipart/form-data':
req = cgi.parse_multipart(self.rfile, pdict)
f = req['f'][0]
if not req['a'] == ['aa']:
self.error_Response()
if not req['b'] == ['bb','éàù@']:
self.error_Response()
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, str(len(f)))
else:
self.error_Response()
elif path == "/multipart3":
ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))
content_length = int(self.headers.get('Content-length', 0))
if ctype == 'multipart/form-data':
req = cgi.parse_multipart(self.rfile, pdict)
f = req['f'][0]
if not req['a'] == ['aa']:
self.error_Response()
if not req['b'] == ['éàù@']:
self.error_Response()
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, str(len(f)))
else:
self.error_Response()
elif path == "/multipart4":
ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))
content_length = int(self.headers.get('Content-length', 0))
if ctype == 'multipart/form-data':
req = cgi.parse_multipart(self.rfile, pdict)
f = req['f'][0]
if not req['a'] == ['aa']:
self.error_Response()
if not req['b'] == ['éàù@']:
self.error_Response()
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, f)
else:
self.error_Response()
elif path == "/1M":
content_type = self.headers.get('content-type', 'text/plain')
extra_headers.append(('Content-type', content_type))
content_length = int(self.headers.get('Content-length', 0))
body = self.rfile.read(content_length)
self._respond(200, extra_headers, str(len(body)))
elif path == "/large":
content_type = self.headers.get('content-type', 'text/plain')
extra_headers.append(('Content-Type', content_type))
content_length = int(self.headers.get('Content-length', 0))
body = self.rfile.read(content_length)
extra_headers.append(('Content-Length', str(len(body))))
self._respond(200, extra_headers, body)
elif path == "/list":
content_length = int(self.headers.get('Content-length', 0))
body = self.rfile.read(content_length)
extra_headers.append(('Content-Length', str(len(body))))
self._respond(200, extra_headers, body)
elif path == "/chunked":
te = (self.headers.get("transfer-encoding") == "chunked")
if te:
body = self.rfile.read(29)
extra_headers.append(('Content-Length', "29"))
self._respond(200, extra_headers, body)
else:
self.error_Response()
else:
self.error_Response('Bad path')
do_PUT = do_POST
def do_DELETE(self):
if self.path == "/delete":
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, '')
else:
self.error_Response()
def do_HEAD(self):
if self.path == "/ok":
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, '')
else:
self.error_Response()
def error_Response(self, message=None):
req = [
('HTTP method', self.command),
('path', self.path),
]
if message:
req.append(('message', message))
body_parts = ['Bad request:\r\n']
for k, v in req:
body_parts.append(' %s: %s\r\n' % (k, v))
body = ''.join(body_parts)
self._respond(400, [('Content-type', 'text/plain'),
('Content-Length', str(len(body)))], body)
def _respond(self, http_code, extra_headers, body):
self.send_response(http_code)
keys = []
for k, v in extra_headers:
self.send_header(k, v)
keys.append(k)
if body:
body = to_bytestring(body)
#if body and "Content-Length" not in keys:
# self.send_header("Content-Length", len(body))
self.end_headers()
self.wfile.write(body)
self.wfile.close()
def finish(self):
if not self.wfile.closed:
self.wfile.flush()
self.wfile.close()
self.rfile.close()
server_thread = None
def run_server_test():
global server_thread
if server_thread is not None:
return
server = HTTPServer((HOST, PORT), HTTPTestHandler)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.setDaemon(True)
server_thread.start()
| mit | 9dfcc64df1db3cd2d39b55c743ad459c | 39.833333 | 86 | 0.531884 | 4.016393 | false | true | false | false |
benoitc/restkit | restkit/oauth2.py | 3 | 22155 | # -*- coding: utf-8 -
#
# This file is part of restkit released under the MIT license.
# See the NOTICE for more information.
import base64
import urllib
import time
import random
import urlparse
import hmac
import binascii
try:
from urlparse import parse_qs, parse_qsl
except ImportError:
from cgi import parse_qs, parse_qsl
from restkit.util import to_bytestring
try:
from hashlib import sha1
sha = sha1
except ImportError:
# hashlib was added in Python 2.5
import sha
from restkit.version import __version__
OAUTH_VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class Error(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occurred.'):
self._message = message
@property
def message(self):
"""A hack to get around the deprecation errors in 2.6."""
return self._message
def __str__(self):
return self._message
class MissingSignature(Error):
pass
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def build_xoauth_string(url, consumer, token=None):
"""Build an XOAUTH string for use in SMTP/IMPA authentication."""
request = Request.from_consumer_and_token(consumer, token,
"GET", url)
signing_method = SignatureMethod_HMAC_SHA1()
request.sign_request(signing_method, consumer, token)
params = []
for k, v in sorted(request.iteritems()):
if v is not None:
params.append('%s="%s"' % (k, escape(v)))
return "%s %s %s" % ("GET", url, ','.join(params))
def to_unicode(s):
""" Convert to unicode, raise exception with instructive error
message if s is not unicode, ascii, or utf-8. """
if not isinstance(s, unicode):
if not isinstance(s, str):
raise TypeError('You are required to pass either unicode or string here, not: %r (%s)' % (type(s), s))
try:
s = s.decode('utf-8')
except UnicodeDecodeError, le:
raise TypeError('You are required to pass either a unicode object or a utf-8 string here. You passed a Python string object which contained non-utf-8: %r. The UnicodeDecodeError that resulted from attempting to interpret it as utf-8 was: %s' % (s, le,))
return s
def to_utf8(s):
return to_unicode(s).encode('utf-8')
def to_unicode_if_string(s):
if isinstance(s, basestring):
return to_unicode(s)
else:
return s
def to_utf8_if_string(s):
if isinstance(s, basestring):
return to_utf8(s)
else:
return s
def to_unicode_optional_iterator(x):
"""
Raise TypeError if x is a str containing non-utf8 bytes or if x is
an iterable which contains such a str.
"""
if isinstance(x, basestring):
return to_unicode(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_unicode(e) for e in l ]
def to_utf8_optional_iterator(x):
"""
Raise TypeError if x is a str or if x is an iterable which
contains a str.
"""
if isinstance(x, basestring):
return to_utf8(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_utf8_if_string(e) for e in l ]
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s.encode('utf-8'), safe='~')
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class Consumer(object):
"""A consumer of OAuth-protected services.
The OAuth consumer is a "third-party" service that wants to access
protected resources from an OAuth service provider on behalf of an end
user. It's kind of the OAuth client.
Usually a consumer must be registered with the service provider by the
developer of the consumer software. As part of that process, the service
provider gives the consumer a *key* and a *secret* with which the consumer
software can identify itself to the service. The consumer will include its
key in each request to identify itself, but will use its secret only when
signing requests, to prove that the request is from that particular
registered consumer.
Once registered, the consumer can then use its consumer credentials to ask
the service provider for a request token, kicking off the OAuth
authorization process.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def __str__(self):
data = {'oauth_consumer_key': self.key,
'oauth_consumer_secret': self.secret}
return urllib.urlencode(data)
class Token(object):
"""An OAuth credential used to request authorization or a protected
resource.
Tokens in OAuth comprise a *key* and a *secret*. The key is included in
requests to identify the token being used, but the secret is used only in
the signature, to prove that the requester is who the server gave the
token to.
When first negotiating the authorization, the consumer asks for a *request
token* that the live user authorizes with the service provider. The
consumer then exchanges the request token for an *access token* that can
be used to access protected resources.
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
"""Returns this token as a plain string, suitable for storage.
The resulting string includes the token's secret, so you should never
send or store this string where a third party can read it.
"""
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
@staticmethod
def from_string(s):
"""Deserializes a token from a string like one returned by
`to_string()`."""
if not len(s):
raise ValueError("Invalid parameter string.")
params = parse_qs(s, keep_blank_values=False)
if not len(params):
raise ValueError("Invalid parameter string.")
try:
key = params['oauth_token'][0]
except Exception:
raise ValueError("'oauth_token' not found in OAuth request.")
try:
secret = params['oauth_token_secret'][0]
except Exception:
raise ValueError("'oauth_token_secret' not found in "
"OAuth request.")
token = Token(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
def __str__(self):
return self.to_string()
def setter(attr):
name = attr.__name__
def getter(self):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
def deleter(self):
del self.__dict__[name]
return property(getter, attr, deleter)
class Request(dict):
"""The parameters and information for an HTTP request, suitable for
authorizing with OAuth credentials.
When a consumer wants to access a service's protected resources, it does
so using a signed HTTP request identifying itself (the consumer) with its
key, and providing an access token authorized by the end user to access
those resources.
"""
version = OAUTH_VERSION
def __init__(self, method=HTTP_METHOD, url=None, parameters=None,
body='', is_form_encoded=False):
if url is not None:
self.url = to_unicode(url)
self.method = method
if parameters is not None:
for k, v in parameters.iteritems():
k = to_unicode(k)
v = to_unicode_optional_iterator(v)
self[k] = v
self.body = body
self.is_form_encoded = is_form_encoded
@setter
def url(self, value):
self.__dict__['url'] = value
if value is not None:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(value)
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
if scheme not in ('http', 'https'):
raise ValueError("Unsupported URL %s (%s)." % (value, scheme))
# Normalized URL excludes params, query, and fragment.
self.normalized_url = urlparse.urlunparse((scheme, netloc, path, None, None, None))
else:
self.normalized_url = None
self.__dict__['url'] = None
@setter
def method(self, value):
self.__dict__['method'] = value.upper()
def _get_timestamp_nonce(self):
return self['oauth_timestamp'], self['oauth_nonce']
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
return dict([(k, v) for k, v in self.iteritems()
if not k.startswith('oauth_')])
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
oauth_params = ((k, v) for k, v in self.items()
if k.startswith('oauth_'))
stringy_params = ((k, escape(str(v))) for k, v in oauth_params)
header_params = ('%s="%s"' % (k, v) for k, v in stringy_params)
params_header = ', '.join(header_params)
auth_header = 'OAuth realm="%s"' % realm
if params_header:
auth_header = "%s, %s" % (auth_header, params_header)
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
d = {}
for k, v in self.iteritems():
d[k.encode('utf-8')] = to_utf8_optional_iterator(v)
# tell urlencode to deal with sequence values and map them correctly
# to resulting querystring. for example self["k"] = ["v1", "v2"] will
# result in 'k=v1&k=v2' and not k=%5B%27v1%27%2C+%27v2%27%5D
return urllib.urlencode(d, True).replace('+', '%20')
def to_url(self):
"""Serialize as a URL for a GET request."""
base_url = urlparse.urlparse(self.url)
try:
query = base_url.query
except AttributeError:
# must be python <2.5
query = base_url[4]
query = parse_qs(query)
for k, v in self.items():
if isinstance(v, unicode):
v = v.encode("utf-8")
query.setdefault(k, []).append(v)
try:
scheme = base_url.scheme
netloc = base_url.netloc
path = base_url.path
params = base_url.params
fragment = base_url.fragment
except AttributeError:
# must be python <2.5
scheme = base_url[0]
netloc = base_url[1]
path = base_url[2]
params = base_url[3]
fragment = base_url[5]
url = (scheme, netloc, path, params,
urllib.urlencode(query, True), fragment)
return urlparse.urlunparse(url)
def get_parameter(self, parameter):
ret = self.get(parameter)
if ret is None:
raise Error('Parameter not found: %s' % parameter)
return ret
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
items = []
for key, value in self.iteritems():
if key == 'oauth_signature':
continue
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if isinstance(value, basestring):
items.append((to_utf8_if_string(key), to_utf8(value)))
else:
try:
value = list(value)
except TypeError, e:
assert 'is not iterable' in str(e)
items.append((to_utf8_if_string(key), to_utf8_if_string(value)))
else:
items.extend((to_utf8_if_string(key), to_utf8_if_string(item)) for item in value)
# Include any query string parameters from the provided URL
query = urlparse.urlparse(self.url)[4]
url_items = self._split_url_string(query).items()
url_items = [(to_utf8(k), to_utf8(v)) for k, v in url_items if k != 'oauth_signature' ]
items.extend(url_items)
items.sort()
encoded_str = urllib.urlencode(items)
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20').replace('%7E', '~')
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of sign."""
if not self.is_form_encoded:
# according to
# http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html
# section 4.1.1 "OAuth Consumers MUST NOT include an
# oauth_body_hash parameter on requests with form-encoded
# request bodies."
self['oauth_body_hash'] = base64.b64encode(sha(self.body).digest())
if 'oauth_consumer_key' not in self:
self['oauth_consumer_key'] = consumer.key
if token and 'oauth_token' not in self:
self['oauth_token'] = token.key
self['oauth_signature_method'] = signature_method.name
self['oauth_signature'] = signature_method.sign(self, consumer, token)
@classmethod
def make_timestamp(cls):
"""Get seconds since epoch (UTC)."""
return str(int(time.time()))
@classmethod
def make_nonce(cls):
"""Generate pseudorandom number."""
return str(random.randint(0, 100000000))
@classmethod
def from_request(cls, http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = cls._split_header(auth_header)
parameters.update(header_params)
except:
raise Error('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = cls._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = cls._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return cls(http_method, http_url, parameters)
return None
@classmethod
def from_consumer_and_token(cls, consumer, token=None,
http_method=HTTP_METHOD, http_url=None, parameters=None,
body='', is_form_encoded=False):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': consumer.key,
'oauth_timestamp': cls.make_timestamp(),
'oauth_nonce': cls.make_nonce(),
'oauth_version': cls.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.verifier:
parameters['oauth_verifier'] = token.verifier
return Request(http_method, http_url, parameters, body=body,
is_form_encoded=is_form_encoded)
@classmethod
def from_token_and_callback(cls, token, callback=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return cls(http_method, http_url, parameters)
@staticmethod
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
@staticmethod
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = parse_qs(param_str.encode('utf-8'), keep_blank_values=True)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
class SignatureMethod(object):
"""A way of signing requests.
The OAuth protocol lets consumers and service providers pick a way to sign
requests. This interface shows the methods expected by the other `oauth`
modules for signing requests. Subclass it and implement its methods to
provide a new way to sign requests.
"""
def signing_base(self, request, consumer, token):
"""Calculates the string that needs to be signed.
This method returns a 2-tuple containing the starting key for the
signing and the message to be signed. The latter may be used in error
messages to help clients debug their software.
"""
raise NotImplementedError
def sign(self, request, consumer, token):
"""Returns the signature for the given request, based on the consumer
and token also provided.
You should use your implementation of `signing_base()` to build the
message to sign. Otherwise it may be less useful for debugging.
"""
raise NotImplementedError
def check(self, request, consumer, token, signature):
"""Returns whether the given signature is the correct signature for
the given consumer and token signing the given request."""
built = self.sign(request, consumer, token)
return built == signature
class SignatureMethod_HMAC_SHA1(SignatureMethod):
name = 'HMAC-SHA1'
def signing_base(self, request, consumer, token):
if not hasattr(request, 'normalized_url') or request.normalized_url is None:
raise ValueError("Base URL for request is not set.")
sig = (
escape(request.method),
escape(request.normalized_url),
escape(request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return to_bytestring(key), raw
def sign(self, request, consumer, token):
"""Builds the base signature string."""
key, raw = self.signing_base(request, consumer, token)
hashed = hmac.new(to_bytestring(key), raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class SignatureMethod_PLAINTEXT(SignatureMethod):
name = 'PLAINTEXT'
def signing_base(self, request, consumer, token):
"""Concatenates the consumer key and secret with the token's
secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def sign(self, request, consumer, token):
key, raw = self.signing_base(request, consumer, token)
return raw
| mit | a599aafb833f8a1ee9bdb539c9093ebb | 31.822222 | 265 | 0.59657 | 4.188091 | false | false | false | false |
benoitc/restkit | restkit/contrib/webob_helper.py | 5 | 1808 | # -*- coding: utf-8 -
#
# This file is part of restkit released under the MIT license.
# See the NOTICE for more information.
import webob.exc
from restkit import errors
class WebobResourceError(webob.exc.WSGIHTTPException):
"""
Wrapper to return webob exceptions instead of restkit errors. Usefull
for those who want to build `WSGI <http://wsgi.org/wsgi/>`_ applications
speaking directly to others via HTTP.
To do it place somewhere in your application the function
`wrap_exceptions`::
wrap_exceptions()
It will automatically replace restkit errors by webob exceptions.
"""
def __init__(self, msg=None, http_code=None, response=None):
webob.exc.WSGIHTTPException.__init__(self)
http_code = http_code or 500
klass = webob.exc.status_map[http_code]
self.code = http_code
self.title = klass.title
self.status = '%s %s' % (self.code, self.title)
self.explanation = msg
self.response = response
# default params
self.msg = msg
def _status_int__get(self):
"""
The status as an integer
"""
return int(self.status.split()[0])
def _status_int__set(self, value):
self.status = value
status_int = property(_status_int__get, _status_int__set,
doc=_status_int__get.__doc__)
def _get_message(self):
return self.explanation
def _set_message(self, msg):
self.explanation = msg or ''
message = property(_get_message, _set_message)
webob_exceptions = False
def wrap_exceptions():
""" wrap restkit exception to return WebBob exceptions"""
global webob_exceptions
if webob_exceptions: return
errors.ResourceError = WebobResourceError
webob_exceptions = True
| mit | ed79a9b8e72c1d8befa66179209efc90 | 28.639344 | 76 | 0.639381 | 4.053812 | false | false | false | false |
geopy/geopy | test/proxy_server.py | 1 | 9539 | import base64
import http.server as SimpleHTTPServer
import select
import socket
import socketserver as SocketServer
import threading
from urllib.request import urlopen
def pipe_sockets(sock1, sock2, timeout):
"""Pipe data from one socket to another and vice-versa."""
sockets = [sock1, sock2]
try:
while True:
rlist, _, xlist = select.select(sockets, [], sockets, timeout)
if xlist:
break
for sock in rlist:
data = sock.recv(8096)
if not data: # disconnected
break
other = next(s for s in sockets if s is not sock)
other.sendall(data)
except OSError:
pass
finally:
for sock in sockets:
sock.close()
class Future:
# concurrent.futures.Future docs say that they shouldn't be instantiated
# directly, so this is a simple implementation which mimics the Future
# which can safely be instantiated!
def __init__(self):
self._event = threading.Event()
self._result = None
self._exc = None
def result(self, timeout=None):
if not self._event.wait(timeout):
raise AssertionError("Future timed out")
if self._exc is not None:
raise self._exc
return self._result
def set_result(self, result):
self._result = result
self._event.set()
def set_exception(self, exception):
self._exc = exception
self._event.set()
class ProxyServerThread(threading.Thread):
spinup_timeout = 10
def __init__(self, timeout=None):
self.proxy_host = 'localhost'
self.proxy_port = None # randomly selected by OS
self.timeout = timeout
self.proxy_server = None
self.socket_created_future = Future()
self.requests = []
self.auth = None
super().__init__()
self.daemon = True
def reset(self):
self.requests.clear()
self.auth = None
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
self.join()
def set_auth(self, username, password):
self.auth = "%s:%s" % (username, password)
def get_proxy_url(self, with_scheme=True):
assert self.socket_created_future.result(self.spinup_timeout)
if self.auth:
auth = "%s@" % self.auth
else:
auth = ""
if with_scheme:
scheme = "http://"
else:
scheme = ""
return "%s%s%s:%s" % (scheme, auth, self.proxy_host, self.proxy_port)
def run(self):
assert not self.proxy_server, ("This class is not reentrable. "
"Please create a new instance.")
requests = self.requests
proxy_thread = self
class Proxy(SimpleHTTPServer.SimpleHTTPRequestHandler):
timeout = self.timeout
def check_auth(self):
if proxy_thread.auth is not None:
auth_header = self.headers.get('Proxy-Authorization')
b64_auth = base64.standard_b64encode(
proxy_thread.auth.encode()
).decode()
expected_auth = "Basic %s" % b64_auth
if auth_header != expected_auth:
self.send_response(401)
self.send_header('Connection', 'close')
self.end_headers()
self.wfile.write(
(
"not authenticated. Expected %r, received %r"
% (expected_auth, auth_header)
).encode()
)
self.connection.close()
return False
return True
def do_GET(self):
if not self.check_auth():
return
requests.append(self.path)
req = urlopen(self.path, timeout=self.timeout)
self.send_response(req.getcode())
content_type = req.info().get('content-type', None)
if content_type:
self.send_header('Content-Type', content_type)
self.send_header('Connection', 'close')
self.end_headers()
self.copyfile(req, self.wfile)
self.connection.close()
req.close()
def do_CONNECT(self):
if not self.check_auth():
return
requests.append(self.path)
# Make a raw TCP connection to the target server
host, port = self.path.split(':')
try:
addr = host, int(port)
other_connection = \
socket.create_connection(addr, timeout=self.timeout)
except OSError:
self.send_error(502, 'Bad gateway')
return
# Respond that a tunnel has been created
self.send_response(200)
self.send_header('Connection', 'close')
self.end_headers()
pipe_sockets(self.connection, # it closes sockets
other_connection, self.timeout)
# ThreadingTCPServer offloads connections to separate threads, so
# the serve_forever loop doesn't block until connection is closed
# (unlike TCPServer). This allows to shutdown the serve_forever loop
# even if there's an open connection.
try:
self.proxy_server = SocketServer.ThreadingTCPServer(
(self.proxy_host, 0),
Proxy
)
# don't hang if there're some open connections
self.proxy_server.daemon_threads = True
self.proxy_port = self.proxy_server.server_address[1]
except Exception as e:
self.socket_created_future.set_exception(e)
raise
else:
self.socket_created_future.set_result(True)
self.proxy_server.serve_forever()
def stop(self):
self.proxy_server.shutdown() # stop serve_forever()
self.proxy_server.server_close()
class HttpServerThread(threading.Thread):
spinup_timeout = 10
def __init__(self, timeout=None):
self.server_host = 'localhost'
self.server_port = None # randomly selected by OS
self.timeout = timeout
self.http_server = None
self.socket_created_future = Future()
super().__init__()
self.daemon = True
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
self.join()
def get_server_url(self):
assert self.socket_created_future.result(self.spinup_timeout)
return "http://%s:%s" % (self.server_host, self.server_port)
def run(self):
assert not self.http_server, ("This class is not reentrable. "
"Please create a new instance.")
class Server(SimpleHTTPServer.SimpleHTTPRequestHandler):
timeout = self.timeout
def do_GET(self):
if self.path == "/":
self.send_response(200)
self.send_header('Connection', 'close')
self.end_headers()
self.wfile.write(b"Hello world")
elif self.path == "/json":
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.send_header('Connection', 'close')
self.end_headers()
self.wfile.write(b'{"hello":"world"}')
elif self.path == "/json/plain":
self.send_response(200)
self.send_header('Content-type', 'text/plain;charset=utf-8')
self.send_header('Connection', 'close')
self.end_headers()
self.wfile.write(b'{"hello":"world"}')
else:
self.send_response(404)
self.send_header('Connection', 'close')
self.send_header('X-test-header', 'hello')
self.end_headers()
self.wfile.write(b"Not found")
self.connection.close()
# ThreadingTCPServer offloads connections to separate threads, so
# the serve_forever loop doesn't block until connection is closed
# (unlike TCPServer). This allows to shutdown the serve_forever loop
# even if there's an open connection.
try:
self.http_server = SocketServer.ThreadingTCPServer(
(self.server_host, 0),
Server
)
# don't hang if there're some open connections
self.http_server.daemon_threads = True
self.server_port = self.http_server.server_address[1]
except Exception as e:
self.socket_created_future.set_exception(e)
raise
else:
self.socket_created_future.set_result(True)
self.http_server.serve_forever()
def stop(self):
self.http_server.shutdown() # stop serve_forever()
self.http_server.server_close()
| mit | b706bcdbe786a4ba4c48512ae177ce11 | 33.189964 | 80 | 0.525736 | 4.55105 | false | false | false | false |
geopy/geopy | test/test_location.py | 2 | 4902 | import pickle
import unittest
from geopy.location import Location
from geopy.point import Point
GRAND_CENTRAL_STR = "89 E 42nd St New York, NY 10017"
GRAND_CENTRAL_COORDS_STR = "40.752662,-73.9773"
GRAND_CENTRAL_COORDS_TUPLE = (40.752662, -73.9773, 0)
GRAND_CENTRAL_POINT = Point(GRAND_CENTRAL_COORDS_STR)
GRAND_CENTRAL_RAW = {
'id': '1',
'class': 'place',
'lat': '40.752662',
'lon': '-73.9773',
'display_name':
"89, East 42nd Street, New York, "
"New York, 10017, United States of America",
}
class LocationTestCase(unittest.TestCase):
def _location_iter_test(
self,
loc,
ref_address=GRAND_CENTRAL_STR,
ref_longitude=GRAND_CENTRAL_COORDS_TUPLE[0],
ref_latitude=GRAND_CENTRAL_COORDS_TUPLE[1]
):
address, (latitude, longitude) = loc
self.assertEqual(address, ref_address)
self.assertEqual(latitude, ref_longitude)
self.assertEqual(longitude, ref_latitude)
def _location_properties_test(self, loc, raw=None):
self.assertEqual(loc.address, GRAND_CENTRAL_STR)
self.assertEqual(loc.latitude, GRAND_CENTRAL_COORDS_TUPLE[0])
self.assertEqual(loc.longitude, GRAND_CENTRAL_COORDS_TUPLE[1])
self.assertEqual(loc.altitude, GRAND_CENTRAL_COORDS_TUPLE[2])
if raw is not None:
self.assertEqual(loc.raw, raw)
def test_location_str(self):
loc = Location(GRAND_CENTRAL_STR, GRAND_CENTRAL_COORDS_STR, {})
self._location_iter_test(loc)
self.assertEqual(loc.point, GRAND_CENTRAL_POINT)
def test_location_point(self):
loc = Location(GRAND_CENTRAL_STR, GRAND_CENTRAL_POINT, {})
self._location_iter_test(loc)
self.assertEqual(loc.point, GRAND_CENTRAL_POINT)
def test_location_none(self):
with self.assertRaises(TypeError):
Location(GRAND_CENTRAL_STR, None, {})
def test_location_iter(self):
loc = Location(GRAND_CENTRAL_STR, GRAND_CENTRAL_COORDS_TUPLE, {})
self._location_iter_test(loc)
self.assertEqual(loc.point, GRAND_CENTRAL_POINT)
def test_location_point_typeerror(self):
with self.assertRaises(TypeError):
Location(GRAND_CENTRAL_STR, 1, {})
def test_location_array_access(self):
loc = Location(GRAND_CENTRAL_STR, GRAND_CENTRAL_COORDS_TUPLE, {})
self.assertEqual(loc[0], GRAND_CENTRAL_STR)
self.assertEqual(loc[1][0], GRAND_CENTRAL_COORDS_TUPLE[0])
self.assertEqual(loc[1][1], GRAND_CENTRAL_COORDS_TUPLE[1])
def test_location_properties(self):
loc = Location(GRAND_CENTRAL_STR, GRAND_CENTRAL_POINT, {})
self._location_properties_test(loc)
def test_location_raw(self):
loc = Location(
GRAND_CENTRAL_STR, GRAND_CENTRAL_POINT, raw=GRAND_CENTRAL_RAW
)
self._location_properties_test(loc, GRAND_CENTRAL_RAW)
def test_location_string(self):
loc = Location(GRAND_CENTRAL_STR, GRAND_CENTRAL_POINT, {})
self.assertEqual(str(loc), loc.address)
def test_location_len(self):
loc = Location(GRAND_CENTRAL_STR, GRAND_CENTRAL_POINT, {})
self.assertEqual(len(loc), 2)
def test_location_eq(self):
loc1 = Location(GRAND_CENTRAL_STR, GRAND_CENTRAL_POINT, {})
loc2 = Location(GRAND_CENTRAL_STR, GRAND_CENTRAL_COORDS_TUPLE, {})
self.assertEqual(loc1, loc2)
def test_location_ne(self):
loc1 = Location(GRAND_CENTRAL_STR, GRAND_CENTRAL_POINT, {})
loc2 = Location(GRAND_CENTRAL_STR, Point(0, 0), {})
self.assertNotEqual(loc1, loc2)
def test_location_repr(self):
address = (
"22, Ksi\u0119dza Paw\u0142a Po\u015bpiecha, "
"Centrum Po\u0142udnie, Zabrze, wojew\xf3dztwo "
"\u015bl\u0105skie, 41-800, Polska"
)
point = (0.0, 0.0, 0.0)
loc = Location(address, point, {})
self.assertEqual(
repr(loc),
"Location(%s, %r)" % (address, point)
)
def test_location_is_picklable(self):
loc = Location(GRAND_CENTRAL_STR, GRAND_CENTRAL_POINT, {})
# https://docs.python.org/2/library/pickle.html#data-stream-format
for protocol in (0, 1, 2, -1):
pickled = pickle.dumps(loc, protocol=protocol)
loc_unp = pickle.loads(pickled)
self.assertEqual(loc, loc_unp)
def test_location_with_unpicklable_raw(self):
some_class = type('some_class', (object,), {})
raw_unpicklable = dict(missing=some_class())
del some_class
loc_unpicklable = Location(GRAND_CENTRAL_STR, GRAND_CENTRAL_POINT,
raw_unpicklable)
for protocol in (0, 1, 2, -1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickle.dumps(loc_unpicklable, protocol=protocol)
| mit | bdbdbc277159b53ed5c463c1c5b77368 | 36.136364 | 75 | 0.626479 | 3.088847 | false | true | false | false |
geopy/geopy | geopy/geocoders/bing.py | 2 | 9344 | import collections.abc
from functools import partial
from urllib.parse import quote, urlencode
from geopy.exc import (
GeocoderAuthenticationFailure,
GeocoderInsufficientPrivileges,
GeocoderRateLimited,
GeocoderServiceError,
GeocoderUnavailable,
)
from geopy.geocoders.base import DEFAULT_SENTINEL, Geocoder
from geopy.location import Location
from geopy.util import join_filter, logger
__all__ = ("Bing", )
class Bing(Geocoder):
"""Geocoder using the Bing Maps Locations API.
Documentation at:
https://msdn.microsoft.com/en-us/library/ff701715.aspx
"""
structured_query_params = {
'addressLine',
'locality',
'adminDistrict',
'countryRegion',
'postalCode',
}
geocode_path = '/REST/v1/Locations'
reverse_path = '/REST/v1/Locations/%(point)s'
def __init__(
self,
api_key,
*,
scheme=None,
timeout=DEFAULT_SENTINEL,
proxies=DEFAULT_SENTINEL,
user_agent=None,
ssl_context=DEFAULT_SENTINEL,
adapter_factory=None
):
"""
:param str api_key: Should be a valid Bing Maps API key
(https://www.microsoft.com/en-us/maps/create-a-bing-maps-key).
:param str scheme:
See :attr:`geopy.geocoders.options.default_scheme`.
:param int timeout:
See :attr:`geopy.geocoders.options.default_timeout`.
:param dict proxies:
See :attr:`geopy.geocoders.options.default_proxies`.
:param str user_agent:
See :attr:`geopy.geocoders.options.default_user_agent`.
:type ssl_context: :class:`ssl.SSLContext`
:param ssl_context:
See :attr:`geopy.geocoders.options.default_ssl_context`.
:param callable adapter_factory:
See :attr:`geopy.geocoders.options.default_adapter_factory`.
.. versionadded:: 2.0
"""
super().__init__(
scheme=scheme,
timeout=timeout,
proxies=proxies,
user_agent=user_agent,
ssl_context=ssl_context,
adapter_factory=adapter_factory,
)
self.api_key = api_key
domain = 'dev.virtualearth.net'
self.geocode_api = '%s://%s%s' % (self.scheme, domain, self.geocode_path)
self.reverse_api = '%s://%s%s' % (self.scheme, domain, self.reverse_path)
def geocode(
self,
query,
*,
exactly_one=True,
user_location=None,
timeout=DEFAULT_SENTINEL,
culture=None,
include_neighborhood=None,
include_country_code=False
):
"""
Return a location point by address.
:param query: The address or query you wish to geocode.
For a structured query, provide a dictionary whose keys
are one of: `addressLine`, `locality` (city),
`adminDistrict` (state), `countryRegion`, or `postalCode`.
:type query: str or dict
:param bool exactly_one: Return one result or a list of results, if
available.
:param user_location: Prioritize results closer to
this location.
:type user_location: :class:`geopy.point.Point`
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:param str culture: Affects the language of the response,
must be a two-letter country code.
:param bool include_neighborhood: Sets whether to include the
neighborhood field in the response.
:param bool include_country_code: Sets whether to include the
two-letter ISO code of the country in the response (field name
'countryRegionIso2').
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
if isinstance(query, collections.abc.Mapping):
params = {
key: val
for key, val
in query.items()
if key in self.structured_query_params
}
params['key'] = self.api_key
else:
params = {
'query': query,
'key': self.api_key
}
if user_location:
params['userLocation'] = ",".join(
(str(user_location.latitude), str(user_location.longitude))
)
if exactly_one:
params['maxResults'] = 1
if culture:
params['culture'] = culture
if include_neighborhood is not None:
params['includeNeighborhood'] = include_neighborhood
if include_country_code:
params['include'] = 'ciso2' # the only acceptable value
url = "?".join((self.geocode_api, urlencode(params)))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
callback = partial(self._parse_json, exactly_one=exactly_one)
return self._call_geocoder(url, callback, timeout=timeout)
def reverse(
self,
query,
*,
exactly_one=True,
timeout=DEFAULT_SENTINEL,
culture=None,
include_country_code=False
):
"""
Return an address by location point.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of ``(latitude,
longitude)``, or string as ``"%(latitude)s, %(longitude)s"``.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:param str culture: Affects the language of the response,
must be a two-letter country code.
:param bool include_country_code: Sets whether to include the
two-letter ISO code of the country in the response (field name
'countryRegionIso2').
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
point = self._coerce_point_to_string(query)
params = {'key': self.api_key}
if culture:
params['culture'] = culture
if include_country_code:
params['include'] = 'ciso2' # the only acceptable value
quoted_point = quote(point.encode('utf-8'))
url = "?".join((self.reverse_api % dict(point=quoted_point),
urlencode(params)))
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
callback = partial(self._parse_json, exactly_one=exactly_one)
return self._call_geocoder(url, callback, timeout=timeout)
def _parse_json(self, doc, exactly_one=True):
"""
Parse a location name, latitude, and longitude from an JSON response.
"""
status_code = doc.get("statusCode", 200)
if status_code != 200:
err = doc.get("errorDetails", "")
if status_code == 401:
raise GeocoderAuthenticationFailure(err)
elif status_code == 403:
raise GeocoderInsufficientPrivileges(err)
elif status_code == 429:
raise GeocoderRateLimited(err)
elif status_code == 503:
raise GeocoderUnavailable(err)
else:
raise GeocoderServiceError(err)
resources = doc['resourceSets'][0]['resources']
if resources is None or not len(resources):
return None
def parse_resource(resource):
"""
Parse each return object.
"""
stripchars = ", \n"
addr = resource['address']
address = addr.get('addressLine', '').strip(stripchars)
city = addr.get('locality', '').strip(stripchars)
state = addr.get('adminDistrict', '').strip(stripchars)
zipcode = addr.get('postalCode', '').strip(stripchars)
country = addr.get('countryRegion', '').strip(stripchars)
city_state = join_filter(", ", [city, state])
place = join_filter(" ", [city_state, zipcode])
location = join_filter(", ", [address, place, country])
latitude = resource['point']['coordinates'][0] or None
longitude = resource['point']['coordinates'][1] or None
if latitude and longitude:
latitude = float(latitude)
longitude = float(longitude)
return Location(location, (latitude, longitude), resource)
if exactly_one:
return parse_resource(resources[0])
else:
return [parse_resource(resource) for resource in resources]
| mit | 881ccdeab8fcac25e9ab9ef97f204cb1 | 34.393939 | 81 | 0.574379 | 4.245343 | false | false | false | false |
geopy/geopy | test/geocoders/__init__.py | 2 | 10806 | import importlib
import inspect
import pkgutil
import docutils.core
import docutils.utils
import pytest
import geopy.geocoders
from geopy.geocoders.base import DEFAULT_SENTINEL, Geocoder
skip_modules = [
"geopy.geocoders.base", # doesn't contain actual geocoders
"geopy.geocoders.googlev3", # deprecated
"geopy.geocoders.osm", # deprecated
]
geocoder_modules = sorted(
[
importlib.import_module(name)
for _, name, _ in pkgutil.iter_modules(
geopy.geocoders.__path__, "geopy.geocoders."
)
if name not in skip_modules
],
key=lambda m: m.__name__,
)
geocoder_classes = sorted(
{
v
for v in (
getattr(module, name) for module in geocoder_modules for name in dir(module)
)
if inspect.isclass(v) and issubclass(v, Geocoder) and v is not Geocoder
},
key=lambda cls: cls.__name__,
)
def assert_no_varargs(sig):
assert not [
str(p)
for p in sig.parameters.values()
if p.kind in (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD)
], (
"Geocoders must not have any (*args) or (**kwargs). "
"See CONTRIBUTING.md for explanation."
)
def assert_rst(sig, doc, allowed_rtypes=(None,)):
# Parse RST from the docstring and generate an XML tree:
doctree = docutils.core.publish_doctree(
doc,
settings_overrides={
"report_level": docutils.utils.Reporter.SEVERE_LEVEL + 1,
},
).asdom()
def get_all_text(node):
if node.nodeType == node.TEXT_NODE:
return node.data
else:
text_string = ""
for child_node in node.childNodes:
if child_node.nodeName == "system_message":
# skip warnings/errors
continue
if child_node.nodeName == "literal":
tmpl = "``%s``"
else:
tmpl = "%s"
text_string += tmpl % (get_all_text(child_node),)
return text_string
documented_rtype = None
documented_types = {}
documented_params = []
for field in doctree.getElementsByTagName("field"):
field_name = get_all_text(field.getElementsByTagName("field_name")[0])
if field_name == "rtype":
assert documented_rtype is None, "There must be single :rtype: directive"
field_body = get_all_text(field.getElementsByTagName("field_body")[0])
assert field_body, ":rtype: directive must have a value"
documented_rtype = field_body.replace("\n", " ")
if field_name.startswith("type"):
parts = field_name.split(" ") # ['type', 'ssl_context']
param_name = parts[-1]
assert param_name not in documented_types, "Duplicate `type` definition"
field_body = get_all_text(field.getElementsByTagName("field_body")[0])
documented_types[param_name] = field_body
if field_name.startswith("param"):
parts = field_name.split(" ") # ['param', 'str', 'query']
param_name = parts[-1]
documented_params.append(param_name)
if len(parts) == 3:
assert param_name not in documented_types, "Duplicate `type` definition"
documented_types[param_name] = parts[1]
method_params = list(sig.parameters.keys())[1:] # skip `self`
assert method_params == documented_params, (
"Actual method params set or order doesn't match the documented "
":param ...: directives in the docstring."
)
missing_types = set(documented_params) - documented_types.keys()
assert not missing_types, "Not all params have types"
assert set(documented_types.keys()) == set(documented_params), (
"There are extraneous :type: directives"
)
assert documented_rtype in allowed_rtypes
def test_all_geocoders_are_exported_from_package():
expected = {cls.__name__ for cls in geocoder_classes}
actual = set(dir(geopy.geocoders))
not_exported = expected - actual
assert not not_exported, (
"These geocoders must be exported (via imports) "
"in geopy/geocoders/__init__.py"
)
def test_all_geocoders_are_listed_in_all():
expected = {cls.__name__ for cls in geocoder_classes}
actual = set(geopy.geocoders.__all__)
not_exported = expected - actual
assert not not_exported, (
"These geocoders must be listed in the `__all__` tuple "
"in geopy/geocoders/__init__.py"
)
def test_all_geocoders_are_listed_in_service_to_geocoder():
assert set(geocoder_classes) == set(geopy.geocoders.SERVICE_TO_GEOCODER.values()), (
"All geocoders must be listed in the `SERVICE_TO_GEOCODER` dict "
"in geopy/geocoders/__init__.py"
)
@pytest.mark.parametrize("geocoder_module", geocoder_modules, ids=lambda m: m.__name__)
def test_geocoder_module_all(geocoder_module):
current_all = geocoder_module.__all__
expected_all = tuple(
cls.__name__
for cls in geocoder_classes
if cls.__module__ == geocoder_module.__name__
)
assert expected_all == current_all
@pytest.mark.parametrize("geocoder_cls", geocoder_classes)
def test_init_method_signature(geocoder_cls):
method = geocoder_cls.__init__
sig = inspect.signature(method)
assert_no_varargs(sig)
sig_timeout = sig.parameters["timeout"]
assert sig_timeout.kind == inspect.Parameter.KEYWORD_ONLY
assert sig_timeout.default is DEFAULT_SENTINEL
sig_proxies = sig.parameters["proxies"]
assert sig_proxies.kind == inspect.Parameter.KEYWORD_ONLY
assert sig_proxies.default is DEFAULT_SENTINEL
sig_user_agent = sig.parameters["user_agent"]
assert sig_user_agent.kind == inspect.Parameter.KEYWORD_ONLY
assert sig_user_agent.default is None
sig_ssl_context = sig.parameters["ssl_context"]
assert sig_ssl_context.kind == inspect.Parameter.KEYWORD_ONLY
assert sig_ssl_context.default is DEFAULT_SENTINEL
sig_adapter_factory = sig.parameters["adapter_factory"]
assert sig_adapter_factory.kind == inspect.Parameter.KEYWORD_ONLY
assert sig_adapter_factory.default is None
assert_rst(sig, method.__doc__)
@pytest.mark.parametrize("geocoder_cls", geocoder_classes)
def test_geocode_method_signature(geocoder_cls):
# Every geocoder should have at least a `geocode` method.
method = geocoder_cls.geocode
sig = inspect.signature(method)
assert_no_varargs(sig)
# The first arg (except self) must be called `query`:
sig_query = list(sig.parameters.values())[1]
assert sig_query.name == "query"
assert sig_query.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
# The rest must be kwargs-only:
sig_kwargs = list(sig.parameters.values())[2:]
assert all(p.kind == inspect.Parameter.KEYWORD_ONLY for p in sig_kwargs), (
"All method args except `query` must be keyword-only "
"(i.e. separated with an `*`)."
)
# kwargs must contain `exactly_one`:
sig_exactly_one = sig.parameters["exactly_one"]
assert sig_exactly_one.default is True, "`exactly_one` must be True"
# kwargs must contain `timeout`:
sig_timeout = sig.parameters["timeout"]
assert sig_timeout.default is DEFAULT_SENTINEL, "`timeout` must be DEFAULT_SENTINEL"
assert_rst(
sig,
method.__doc__,
allowed_rtypes=[
":class:`geopy.location.Location` or a list of them, "
"if ``exactly_one=False``.", # what3words
"``None``, :class:`geopy.location.Location` or a list of them, "
"if ``exactly_one=False``.",
],
)
@pytest.mark.parametrize(
"geocoder_cls",
[cls for cls in geocoder_classes if getattr(cls, "reverse", None)],
)
def test_reverse_method_signature(geocoder_cls):
# `reverse` method is optional.
method = geocoder_cls.reverse
sig = inspect.signature(method)
assert_no_varargs(sig)
# First arg (except self) must be called `query`:
sig_query = list(sig.parameters.values())[1]
assert sig_query.name == "query"
assert sig_query.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
# The rest must be kwargs-only:
sig_kwargs = list(sig.parameters.values())[2:]
assert all(p.kind == inspect.Parameter.KEYWORD_ONLY for p in sig_kwargs), (
"All method args except `query` must be keyword-only "
"(i.e. separated with an `*`)."
)
# kwargs must contain `exactly_one`:
sig_exactly_one = sig.parameters["exactly_one"]
assert sig_exactly_one.default is True, "`exactly_one` must be True"
# kwargs must contain `timeout`:
sig_timeout = sig.parameters["timeout"]
assert sig_timeout.default is DEFAULT_SENTINEL, "`timeout` must be DEFAULT_SENTINEL"
assert_rst(
sig,
method.__doc__,
allowed_rtypes=[
":class:`geopy.location.Location` or a list of them, " # what3words
"if ``exactly_one=False``.",
"``None``, :class:`geopy.location.Location` or a list of them, "
"if ``exactly_one=False``.",
],
)
@pytest.mark.parametrize(
"geocoder_cls",
[cls for cls in geocoder_classes if getattr(cls, "reverse_timezone", None)],
)
def test_reverse_timezone_method_signature(geocoder_cls):
method = geocoder_cls.reverse_timezone
sig = inspect.signature(method)
assert_no_varargs(sig)
# First arg (except self) must be called `query`:
sig_query = list(sig.parameters.values())[1]
assert sig_query.name == "query"
assert sig_query.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
# The rest must be kwargs-only:
sig_kwargs = list(sig.parameters.values())[2:]
assert all(p.kind == inspect.Parameter.KEYWORD_ONLY for p in sig_kwargs), (
"All method args except `query` must be keyword-only "
"(i.e. separated with an `*`)."
)
# kwargs must contain `timeout`:
sig_timeout = sig.parameters["timeout"]
assert sig_timeout.default is DEFAULT_SENTINEL, "`timeout` must be DEFAULT_SENTINEL"
assert_rst(
sig,
method.__doc__,
allowed_rtypes=[
":class:`geopy.timezone.Timezone`.",
"``None`` or :class:`geopy.timezone.Timezone`.",
],
)
@pytest.mark.parametrize("geocoder_cls", geocoder_classes)
def test_no_extra_public_methods(geocoder_cls):
methods = {
n
for n in dir(geocoder_cls)
if not n.startswith("_") and inspect.isfunction(getattr(geocoder_cls, n))
}
allowed = {
"geocode",
"reverse",
"reverse_timezone",
}
assert methods <= allowed, (
"Geopy geocoders are currently allowed to only have these methods: %s" % allowed
)
| mit | 20b2908364ee4594333febb43bafbafa | 33.196203 | 88 | 0.629187 | 3.700685 | false | true | false | false |
geopy/geopy | geopy/units.py | 2 | 2950 | """``geopy.units`` module provides utility functions for performing
angle and distance unit conversions.
Some shortly named aliases are provided for convenience (e.g.
:func:`.km` is an alias for :func:`.kilometers`).
"""
import math
# Angles
def degrees(radians=0, arcminutes=0, arcseconds=0):
"""
Convert angle to degrees.
"""
deg = 0.
if radians:
deg = math.degrees(radians)
if arcminutes:
deg += arcminutes / arcmin(degrees=1.)
if arcseconds:
deg += arcseconds / arcsec(degrees=1.)
return deg
def radians(degrees=0, arcminutes=0, arcseconds=0):
"""
Convert angle to radians.
"""
if arcminutes:
degrees += arcminutes / arcmin(degrees=1.)
if arcseconds:
degrees += arcseconds / arcsec(degrees=1.)
return math.radians(degrees)
def arcminutes(degrees=0, radians=0, arcseconds=0):
"""
Convert angle to arcminutes.
"""
if radians:
degrees += math.degrees(radians)
if arcseconds:
degrees += arcseconds / arcsec(degrees=1.)
return degrees * 60.
def arcseconds(degrees=0, radians=0, arcminutes=0):
"""
Convert angle to arcseconds.
"""
if radians:
degrees += math.degrees(radians)
if arcminutes:
degrees += arcminutes / arcmin(degrees=1.)
return degrees * 3600.
# Lengths
def kilometers(meters=0, miles=0, feet=0, nautical=0):
"""
Convert distance to kilometers.
"""
ret = 0.
if meters:
ret += meters / 1000.
if feet:
ret += feet / ft(1.)
if nautical:
ret += nautical / nm(1.)
ret += miles * 1.609344
return ret
def meters(kilometers=0, miles=0, feet=0, nautical=0):
"""
Convert distance to meters.
"""
return (kilometers + km(nautical=nautical, miles=miles, feet=feet)) * 1000
def miles(kilometers=0, meters=0, feet=0, nautical=0):
"""
Convert distance to miles.
"""
ret = 0.
if nautical:
kilometers += nautical / nm(1.)
if feet:
kilometers += feet / ft(1.)
if meters:
kilometers += meters / 1000.
ret += kilometers / 1.609344
return ret
def feet(kilometers=0, meters=0, miles=0, nautical=0):
"""
Convert distance to feet.
"""
ret = 0.
if nautical:
kilometers += nautical / nm(1.)
if meters:
kilometers += meters / 1000.
if kilometers:
miles += mi(kilometers=kilometers)
ret += miles * 5280
return ret
def nautical(kilometers=0, meters=0, miles=0, feet=0):
"""
Convert distance to nautical miles.
"""
ret = 0.
if feet:
kilometers += feet / ft(1.)
if miles:
kilometers += km(miles=miles)
if meters:
kilometers += meters / 1000.
ret += kilometers / 1.852
return ret
# Compatible names
rad = radians
arcmin = arcminutes
arcsec = arcseconds
km = kilometers
m = meters
mi = miles
ft = feet
nm = nautical
| mit | 3cf9e838d1f9dc4594fe9915de956419 | 20.376812 | 78 | 0.600339 | 3.281424 | false | false | false | false |
geopy/geopy | test/geocoders/arcgis.py | 1 | 3150 | import pytest
from geopy import exc
from geopy.geocoders import ArcGIS
from geopy.point import Point
from test.geocoders.util import BaseTestGeocoder, env
class TestUnitArcGIS:
def test_user_agent_custom(self):
geocoder = ArcGIS(
user_agent='my_user_agent/1.0'
)
assert geocoder.headers['User-Agent'] == 'my_user_agent/1.0'
class TestArcGIS(BaseTestGeocoder):
@classmethod
def make_geocoder(cls, **kwargs):
return ArcGIS(timeout=3, **kwargs)
async def test_missing_password_error(self):
with pytest.raises(exc.ConfigurationError):
ArcGIS(username='a')
async def test_scheme_config_error(self):
with pytest.raises(exc.ConfigurationError):
ArcGIS(
username='a',
password='b',
referer='http://www.example.com',
scheme='http'
)
async def test_geocode(self):
await self.geocode_run(
{"query": "435 north michigan ave, chicago il 60611 usa"},
{"latitude": 41.890, "longitude": -87.624},
)
async def test_empty_response(self):
await self.geocode_run(
{"query": "dksahdksahdjksahdoufydshf"},
{},
expect_failure=True
)
async def test_geocode_with_out_fields_string(self):
result = await self.geocode_run(
{"query": "Trafalgar Square, London",
"out_fields": "Country"},
{}
)
assert result.raw['attributes'] == {'Country': 'GBR'}
async def test_geocode_with_out_fields_list(self):
result = await self.geocode_run(
{"query": "Trafalgar Square, London",
"out_fields": ["City", "Type"]},
{}
)
assert result.raw['attributes'] == {
'City': 'London', 'Type': 'Tourist Attraction'
}
async def test_reverse_point(self):
location = await self.reverse_run(
{"query": Point(40.753898, -73.985071)},
{"latitude": 40.75376406311989, "longitude": -73.98489005863667},
)
assert 'New York' in location.address
async def test_reverse_not_exactly_one(self):
await self.reverse_run(
{"query": Point(40.753898, -73.985071), "exactly_one": False},
{"latitude": 40.75376406311989, "longitude": -73.98489005863667},
)
async def test_reverse_long_label_address(self):
await self.reverse_run(
{"query": (35.173809, -37.485351)},
{"address": "Atlantic Ocean"},
)
class TestArcGISAuthenticated(BaseTestGeocoder):
@classmethod
def make_geocoder(cls, **kwargs):
return ArcGIS(
username=env['ARCGIS_USERNAME'],
password=env['ARCGIS_PASSWORD'],
referer=env['ARCGIS_REFERER'],
timeout=3,
**kwargs
)
async def test_basic_address(self):
await self.geocode_run(
{"query": "Potsdamer Platz, Berlin, Deutschland"},
{"latitude": 52.5094982, "longitude": 13.3765983, "delta": 4},
)
| mit | cc6ff082689231d1ec73dc40185e2afd | 29.288462 | 77 | 0.565714 | 3.604119 | false | true | false | false |
geopy/geopy | test/geocoders/pelias.py | 1 | 3216 | import warnings
from geopy.geocoders import Pelias
from geopy.point import Point
from test.geocoders.util import BaseTestGeocoder, env
class BaseTestPelias(BaseTestGeocoder):
delta = 0.04
known_state_de = "Verwaltungsregion Ionische Inseln"
known_state_en = "Ionian Islands Periphery"
async def test_geocode(self):
await self.geocode_run(
{"query": "435 north michigan ave, chicago il 60611 usa"},
{"latitude": 41.890, "longitude": -87.624},
)
await self.geocode_run(
{"query": "san josé california"},
{"latitude": 37.33939, "longitude": -121.89496},
)
async def test_reverse(self):
await self.reverse_run(
{"query": Point(40.75376406311989, -73.98489005863667)},
{"latitude": 40.75376406311989, "longitude": -73.98489005863667}
)
async def test_boundary_rect(self):
await self.geocode_run(
{"query": "moscow", # Idaho USA
"boundary_rect": [[50.1, -130.1], [44.1, -100.9]]},
{"latitude": 46.7323875, "longitude": -117.0001651},
)
async def test_geocode_language_parameter(self):
query = "Graben 7, Wien"
result_geocode = await self.geocode_run(
{"query": query, "language": "de"}, {}
)
assert result_geocode.raw['properties']['country'] == "Österreich"
result_geocode = await self.geocode_run(
{"query": query, "language": "en"}, {}
)
assert result_geocode.raw['properties']['country'] == "Austria"
async def test_reverse_language_parameter(self):
query = "48.198674, 16.348388"
result_reverse_de = await self.reverse_run(
{"query": query, "language": "de"},
{},
)
assert result_reverse_de.raw['properties']['country'] == "Österreich"
result_reverse_en = await self.reverse_run(
{"query": query, "language": "en"},
{},
)
assert result_reverse_en.raw['properties']['country'] == "Austria"
async def test_geocode_country_bias(self):
await self.geocode_run(
{"query": "moscow"},
{"latitude": 55.7504461, "longitude": 37.6174943},
)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
await self.geocode_run(
{"query": "moscow", # Idaho USA
"country_bias": "USA"},
{"latitude": 46.7323875, "longitude": -117.0001651},
)
assert len(w) == 1
async def test_geocode_countries(self):
await self.geocode_run(
{"query": "moscow"},
{"latitude": 55.7504461, "longitude": 37.6174943},
)
await self.geocode_run(
{"query": "moscow", # Idaho USA
"countries": ["USA", "CAN"]},
{"latitude": 46.7323875, "longitude": -117.0001651},
)
class TestPelias(BaseTestPelias):
@classmethod
def make_geocoder(cls, **kwargs):
return Pelias(
env['PELIAS_DOMAIN'],
api_key=env['PELIAS_KEY'],
**kwargs,
)
| mit | 7d1febb65b02e63efe831e5c28913eb6 | 32.123711 | 77 | 0.549331 | 3.523026 | false | true | false | false |
materialsproject/pymatgen-db | pymatgen/db/query_engine.py | 1 | 27387 | """
This module provides a QueryEngine that simplifies queries for Mongo databases
generated using hive.
"""
__author__ = "Shyue Ping Ong, Michael Kocher, Dan Gunter"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "2.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Mar 2 2013"
import itertools
import json
import logging
import os
import zlib
from collections import OrderedDict
from collections.abc import Iterable
import gridfs
import pymongo
from pymatgen.core import Composition, Structure
from pymatgen.electronic_structure.core import Orbital, Spin
from pymatgen.electronic_structure.dos import CompleteDos, Dos
from pymatgen.entries.computed_entries import ComputedEntry, ComputedStructureEntry
_log = logging.getLogger("mg." + __name__)
class QueryEngine:
"""This class defines a QueryEngine interface to a Mongo Collection based on
a set of aliases. This query engine also provides convenient translation
between various pymatgen objects and database objects.
The major difference between the QueryEngine's query() method and pymongo's
find() method is the treatment of nested fields. QueryEngine's query
will map the final result to a root level string, while pymmongo will
return the doc as is. For example, let's say you have a document
that is of the following form::
{"a": {"b" : 1}}
Using pymongo.find({}, fields=["a.b"]), you will get a doc where you need
to do doc["a"]["b"] to access the final result (1). Using
QueryEngine.query(properties=["a.b"], you will obtain a result that can be
accessed simply as doc["a.b"].
"""
# avoid hard-coding these in other places
ALIASES_CONFIG_KEY = "aliases_config"
COLLECTION_KEY = "collection"
HOST_KEY = "host"
PORT_KEY = "port"
DB_KEY = "database"
USER_KEY = "user"
PASSWORD_KEY = "password"
# Aliases and defaults
aliases = None #: See `aliases` arg to constructor
default_criteria = None #: See `default_criteria` arg to constructor
default_properties = None #: See `default_properties` arg to constructor
# Post-processing operations
query_post = None #: See `query_post` arg to constructor
result_post = None #: See `result_post` arg to constructor
def __init__(
self,
host="127.0.0.1",
port=27017,
database="vasp",
user=None,
password=None,
collection="tasks",
aliases_config=None,
default_properties=None,
query_post=None,
result_post=None,
connection=None,
replicaset=None,
**ignore,
):
"""Constructor.
Args:
host (str): Hostname of database machine.
port (int): Port for db access.
database (str): Name of database to access.
user (str): User for db access. `None` means no authentication.
password (str): Password for db access. `None` means no auth.
collection (str): Collection to query. Defaults to "tasks".
connection (pymongo.Connection): If given, ignore 'host' and 'port'
and use existing connection.
aliases_config(dict):
An alias dict to use. Defaults to None, which means the default
aliases defined in "aliases.json" is used. The aliases config
should be of the following format::
{
"aliases": {
"e_above_hull": "analysis.e_above_hull",
"energy": "output.final_energy",
....
},
"defaults": {
"state": "successful"
}
}
aliases (dict): Keys are the incoming property, values are the
property it will be translated to. This makes it easier
to organize the doc format in a way that is different from the
query format.
defaults (dict): Criteria that should be applied
by default to all queries. For example, a collection may
contain data from both successful and unsuccessful runs but
for most querying purposes, you may want just successful runs
only. Note that defaults do not affect explicitly specified
criteria, i.e., if you suppy a query for {"state": "killed"},
this will override the default for {"state": "successful"}.
default_properties (list): Property names (strings) to use by
default, if no `properties` are given to query().
query_post (list): Functions to post-process the `criteria` passed
to `query()`, after aliases are resolved.
Function takes two args, the criteria dict and list of
result properties. Both may be modified in-place.
result_post (list): Functions to post-process the cursor records.
Function takes one arg, the document for the current record,
that is modified in-place.
"""
self.host = host
self.port = port
self.replicaset = replicaset
self.database_name = database
if connection is None:
# can't pass replicaset=None to MongoClient (fails validation)
if self.replicaset:
self.connection = pymongo.MongoClient(self.host, self.port, replicaset=self.replicaset)
else:
self.connection = pymongo.MongoClient(self.host, self.port)
else:
self.connection = connection
self.db = self.connection[database]
if user:
self.db.authenticate(user, password)
self.collection_name = collection
self.set_aliases_and_defaults(aliases_config=aliases_config, default_properties=default_properties)
# Post-processing functions
self.query_post = query_post or []
self.result_post = result_post or []
@property
def collection_name(self):
"""
Returns collection name.
"""
return self._collection_name
@collection_name.setter
def collection_name(self, value):
"""Switch to another collection.
Note that you may have to set the aliases and default properties if the
schema of the new collection differs from the current collection.
"""
self._collection_name = value
self.collection = self.db[value]
def set_aliases_and_defaults(self, aliases_config=None, default_properties=None):
"""
Set the alias config and defaults to use. Typically used when
switching to a collection with a different schema.
Args:
aliases_config:
An alias dict to use. Defaults to None, which means the default
aliases defined in "aliases.json" is used. See constructor
for format.
default_properties:
List of property names (strings) to use by default, if no
properties are given to the 'properties' argument of
query().
"""
if aliases_config is None:
with open(os.path.join(os.path.dirname(__file__), "aliases.json")) as f:
d = json.load(f)
self.aliases = d.get("aliases", {})
self.default_criteria = d.get("defaults", {})
else:
self.aliases = aliases_config.get("aliases", {})
self.default_criteria = aliases_config.get("defaults", {})
# set default properties
if default_properties is None:
self._default_props, self._default_prop_dict = None, None
else:
self._default_props, self._default_prop_dict = self._parse_properties(default_properties)
def __enter__(self):
"""Allows for use with the 'with' context manager"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Allows for use with the 'with' context manager"""
self.close()
def close(self):
"""Disconnects the connection."""
self.connection.disconnect()
def get_entries_in_system(
self,
elements,
inc_structure=False,
optional_data=None,
additional_criteria=None,
):
"""
Gets all entries in a chemical system, e.g. Li-Fe-O will return all
Li-O, Fe-O, Li-Fe, Li-Fe-O compounds.
.. note::
The get_entries_in_system and get_entries methods should be used
with care. In essence, all entries, GGA, GGA+U or otherwise,
are returned. The dataset is very heterogeneous and not
directly comparable. It is highly recommended that you perform
post-processing using pymatgen.entries.compatibility.
Args:
elements:
Sequence of element symbols, e.g. ['Li','Fe','O']
inc_structure:
Optional parameter as to whether to include a structure with
the ComputedEntry. Defaults to False. Use with care - including
structures with a large number of entries can potentially slow
down your code to a crawl.
optional_data:
Optional data to include with the entry. This allows the data
to be access via entry.data[key].
additional_criteria:
Added ability to provide additional criteria other than just
the chemical system.
Returns:
List of ComputedEntries in the chemical system.
"""
chemsys_list = []
for i in range(len(elements)):
for combi in itertools.combinations(elements, i + 1):
chemsys = "-".join(sorted(combi))
chemsys_list.append(chemsys)
crit = {"chemsys": {"$in": chemsys_list}}
if additional_criteria is not None:
crit.update(additional_criteria)
return self.get_entries(crit, inc_structure, optional_data=optional_data)
def get_entries(self, criteria, inc_structure=False, optional_data=None):
"""
Get ComputedEntries satisfying a particular criteria.
.. note::
The get_entries_in_system and get_entries methods should be used
with care. In essence, all entries, GGA, GGA+U or otherwise,
are returned. The dataset is very heterogeneous and not
directly comparable. It is highly recommended that you perform
post-processing using pymatgen.entries.compatibility.
Args:
criteria:
Criteria obeying the same syntax as query.
inc_structure:
Optional parameter as to whether to include a structure with
the ComputedEntry. Defaults to False. Use with care - including
structures with a large number of entries can potentially slow
down your code to a crawl.
optional_data:
Optional data to include with the entry. This allows the data
to be access via entry.data[key].
Returns:
List of pymatgen.entries.ComputedEntries satisfying criteria.
"""
all_entries = []
optional_data = [] if not optional_data else list(optional_data)
optional_data.append("oxide_type")
fields = list(optional_data)
fields.extend(
[
"task_id",
"unit_cell_formula",
"energy",
"is_hubbard",
"hubbards",
"pseudo_potential.labels",
"pseudo_potential.functional",
"run_type",
"input.is_lasph",
"input.xc_override",
"input.potcar_spec",
]
)
if inc_structure:
fields.append("output.crystal")
for c in self.query(fields, criteria):
func = c["pseudo_potential.functional"]
labels = c["pseudo_potential.labels"]
symbols = [f"{func} {label}" for label in labels]
parameters = {
"run_type": c["run_type"],
"is_hubbard": c["is_hubbard"],
"hubbards": c["hubbards"],
"potcar_symbols": symbols,
"is_lasph": c.get("input.is_lasph") or False,
"potcar_spec": c.get("input.potcar_spec"),
"xc_override": c.get("input.xc_override"),
}
optional_data = {k: c[k] for k in optional_data}
if inc_structure:
struct = Structure.from_dict(c["output.crystal"])
entry = ComputedStructureEntry(
struct,
c["energy"],
0.0,
parameters=parameters,
data=optional_data,
entry_id=c["task_id"],
)
else:
entry = ComputedEntry(
Composition(c["unit_cell_formula"]),
c["energy"],
0.0,
parameters=parameters,
data=optional_data,
entry_id=c["task_id"],
)
all_entries.append(entry)
return all_entries
def _parse_criteria(self, criteria):
"""
Internal method to perform mapping of criteria to proper mongo queries
using aliases, as well as some useful sanitization. For example, string
formulas such as "Fe2O3" are auto-converted to proper mongo queries of
{"Fe":2, "O":3}.
If 'criteria' is None, returns an empty dict. Putting this logic here
simplifies callers and allows subclasses to insert something even
when there are no criteria.
"""
if criteria is None:
return {}
parsed_crit = {}
for k, v in self.default_criteria.items():
if k not in criteria:
parsed_crit[self.aliases.get(k, k)] = v
for key, crit in list(criteria.items()):
if key in ["normalized_formula", "reduced_cell_formula"]:
comp = Composition(crit)
parsed_crit["pretty_formula"] = comp.reduced_formula
elif key == "unit_cell_formula":
comp = Composition(crit)
crit = comp.as_dict()
for el, amt in crit.items():
parsed_crit[f"{self.aliases[key]}.{el}"] = amt
parsed_crit["nelements"] = len(crit)
parsed_crit["pretty_formula"] = comp.reduced_formula
elif key in ["$or", "$and"]:
parsed_crit[key] = [self._parse_criteria(m) for m in crit]
else:
parsed_crit[self.aliases.get(key, key)] = crit
return parsed_crit
def ensure_index(self, key, unique=False):
"""Wrapper for pymongo.Collection.ensure_index"""
return self.collection.ensure_index(key, unique=unique)
def query(self, properties=None, criteria=None, distinct_key=None, **kwargs):
r"""
Convenience method for database access. All properties and criteria
can be specified using simplified names defined in Aliases. You can
use the supported_properties property to get the list of supported
properties.
Results are returned as an iterator of dicts to ensure memory and cpu
efficiency.
Note that the dict returned have keys also in the simplified names
form, not in the mongo format. For example, if you query for
"analysis.e_above_hull", the returned result must be accessed as
r['analysis.e_above_hull'] instead of mongo's
r['analysis']['e_above_hull']. This is a *feature* of the query engine
to allow simple access to deeply nested docs without having to resort
to some recursion to go deep into the result.
However, if you query for 'analysis', the entire 'analysis' key is
returned as r['analysis'] and then the subkeys can be accessed in the
usual form, i.e., r['analysis']['e_above_hull']
:param properties: Properties to query for. Defaults to None which means all supported properties.
:param criteria: Criteria to query for as a dict.
:param distinct_key: If not None, the key for which to get distinct results
:param \*\*kwargs: Other kwargs supported by pymongo.collection.find.
Useful examples are limit, skip, sort, etc.
:return: A QueryResults Iterable, which is somewhat like pymongo's
cursor except that it performs mapping. In general, the dev does
not need to concern himself with the form. It is sufficient to know
that the results are in the form of an iterable of dicts.
"""
if properties is not None:
props, prop_dict = self._parse_properties(properties)
else:
props, prop_dict = None, None
crit = self._parse_criteria(criteria)
if self.query_post:
for func in self.query_post:
func(crit, props)
cur = self.collection.find(filter=crit, projection=props, **kwargs)
if distinct_key is not None:
cur = cur.distinct(distinct_key)
return QueryListResults(prop_dict, cur, postprocess=self.result_post)
return QueryResults(prop_dict, cur, postprocess=self.result_post)
def _parse_properties(self, properties):
"""Make list of properties into 2 things:
(1) dictionary of { 'aliased-field': 1, ... } for a mongodb query eg. {''}
(2) dictionary, keyed by aliased field, for display
"""
props = {}
# TODO: clean up prop_dict?
prop_dict = OrderedDict()
# We use a dict instead of list to provide for a richer syntax
for p in properties:
if p in self.aliases:
if isinstance(properties, dict):
props[self.aliases[p]] = properties[p]
else:
props[self.aliases[p]] = 1
prop_dict[p] = self.aliases[p].split(".")
else:
if isinstance(properties, dict):
props[p] = properties[p]
else:
props[p] = 1
prop_dict[p] = p.split(".")
# including a lower-level key after a higher level key e.g.:
# {'output': 1, 'output.crystal': 1} instead of
# {'output.crystal': 1, 'output': 1}
# causes mongo to skip the other higher level keys.
# this is a (sketchy) workaround for that. Note this problem
# doesn't appear often in python2 because the dictionary ordering
# is more stable.
props = OrderedDict(sorted(props.items(), reverse=True))
return props, prop_dict
def query_one(self, *args, **kwargs):
"""Return first document from :meth:`query`, with same parameters."""
for r in self.query(*args, **kwargs):
return r
return None
def get_structure_from_id(self, task_id, final_structure=True):
"""
Returns a structure from the database given the task id.
Args:
task_id:
The task_id to query for.
final_structure:
Whether to obtain the final or initial structure. Defaults to
True.
"""
args = {"task_id": task_id}
field = "output.crystal" if final_structure else "input.crystal"
results = tuple(self.query([field], args))
if len(results) > 1:
raise QueryError(f"More than one result found for task_id {task_id}!")
if len(results) == 0:
raise QueryError(f"No structure found for task_id {task_id}!")
c = results[0]
return Structure.from_dict(c[field])
def __repr__(self):
return f"QueryEngine: {self.host}:{self.port}/{self.database_name}"
@staticmethod
def from_config(config_file, use_admin=False):
"""
Initialize a QueryEngine from a JSON config file generated using mgdb
init.
Args:
config_file:
Filename of config file.
use_admin:
If True, the admin user and password in the config file is
used. Otherwise, the readonly_user and password is used.
Defaults to False.
Returns:
QueryEngine
"""
with open(config_file) as f:
d = json.load(f)
user = d["admin_user"] if use_admin else d["readonly_user"]
password = d["admin_password"] if use_admin else d["readonly_password"]
return QueryEngine(
host=d["host"],
port=d["port"],
database=d["database"],
user=user,
password=password,
collection=d["collection"],
aliases_config=d.get("aliases_config", None),
)
def __getitem__(self, item):
"""Support pymongo.Database syntax db['collection'] to access collections.
Simply delegate this to the pymongo.Database instance, so behavior is the same.
"""
return self.db[item]
def get_dos_from_id(self, task_id):
"""
Overrides the get_dos_from_id for the MIT gridfs format.
"""
args = {"task_id": task_id}
fields = ["calculations"]
structure = self.get_structure_from_id(task_id)
dosid = None
for r in self.query(fields, args):
dosid = r["calculations"][-1]["dos_fs_id"]
if dosid is not None:
self._fs = gridfs.GridFS(self.db, "dos_fs")
with self._fs.get(dosid) as dosfile:
s = dosfile.read()
try:
d = json.loads(s)
except Exception:
s = zlib.decompress(s)
d = json.loads(s.decode("utf-8"))
tdos = Dos.from_dict(d)
pdoss = {}
for i in range(len(d["pdos"])):
ados = d["pdos"][i]
all_ados = {}
for j in range(len(ados)):
orb = Orbital(j)
odos = ados[str(orb)]
all_ados[orb] = {Spin(int(k)): v for k, v in odos["densities"].items()}
pdoss[structure[i]] = all_ados
return CompleteDos(structure, tdos, pdoss)
return None
class QueryResults(Iterable):
"""
Iterable wrapper for results from QueryEngine.
Like pymongo's cursor, this object should generally not be instantiated,
but should be obtained from a queryengine.
It delegates many attributes to the underlying pymongo cursor, and should
support nearly all cursor like attributes such as count(), explain(),
hint(), etc. Please see pymongo cursor documentation for details.
"""
def __init__(self, prop_dict, result_cursor, postprocess=None):
"""Constructor.
:param prop_dict: Properties
:param result_cursor: Iterable returning records
:param postprocess: List of functions, each taking a record and
modifying it in-place, or None, or an empty list
"""
self._results = result_cursor
self._prop_dict = prop_dict
self._pproc = postprocess or [] # make empty values iterable
def _wrapper(self, func):
"""
This function wraps all callable objects returned by self.__getattr__.
If the result is a cursor, wrap it into a QueryResults object
so that you can invoke postprocess functions in self._pproc
"""
def wrapped(*args, **kwargs):
ret_val = func(*args, **kwargs)
if isinstance(ret_val, pymongo.cursor.Cursor):
ret_val = self.from_cursor(ret_val)
return ret_val
return wrapped
def __getattr__(self, attr):
"""
Override getattr to make QueryResults inherit all pymongo cursor
attributes.
Wrap any callable object with _wrapper to intercept cursors and wrap them
as a QueryResults object.
"""
if hasattr(self._results, attr):
ret_val = getattr(self._results, attr)
# wrap callable objects to convert returned cursors into QueryResults
if callable(ret_val):
return self._wrapper(ret_val)
return ret_val
raise AttributeError
def clone(self):
"""
Provide a clone of the QueryResults.
"""
return QueryResults(self._prop_dict, self._results.clone())
def from_cursor(self, cursor):
"""
Create a QueryResults object from a cursor object
"""
return QueryResults(self._prop_dict, cursor, self._pproc)
def __len__(self):
"""Return length as a `count()` on the MongoDB cursor."""
return len(list(self._results.clone()))
def __getitem__(self, i):
return self._mapped_result(self._results[i])
def __iter__(self):
return self._result_generator()
def _mapped_result(self, r):
"""Transform/map a result."""
# Apply result_post funcs for pulling out sandbox properties
for func in self._pproc:
func(r)
# If we haven't asked for specific properties, just return object
if not self._prop_dict:
result = r
else:
result = {}
# Map aliased keys back to original key
for k, v in self._prop_dict.items():
try:
result[k] = self._mapped_result_path(v[1:], data=r[v[0]])
except (IndexError, KeyError, ValueError):
result[k] = None
return result
@staticmethod
def _mapped_result_path(path, data=None):
if not path:
return data
if isinstance(data, list):
return [QueryResults._mapped_result_path(path, d) for d in data]
try:
return QueryResults._mapped_result_path(path[1:], data[path[0]])
except (IndexError, KeyError, ValueError):
return None
def _result_generator(self):
for r in self._results:
yield self._mapped_result(r)
class QueryListResults(QueryResults):
"""Set of QueryResults on a list instead of a MongoDB cursor."""
def clone(self):
"""
Return a clone of the QueryListResults.
"""
return QueryResults(self._prop_dict, self._results[:])
def __len__(self):
"""Return length of iterable, as a list if possible; otherwise,
fall back to the superclass' implementation.
"""
if hasattr(self._results, "__len__"):
return len(self._results)
return QueryResults.__len__(self)
class QueryError(Exception):
"""
Exception class for errors occuring during queries.
"""
pass
| mit | b3794d8f67b0ab164a4a798ba1c8cd44 | 38.405755 | 107 | 0.573776 | 4.474269 | false | false | false | false |
ab77/netflix-proxy | auth/auth.py | 1 | 22236 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""auth.py: basic web front-end to auth/de-auth ipaddrs using iptables.
author: anton@belodedenko.me
"""
from subprocess import Popen, PIPE
from collections import defaultdict
import datetime, traceback, sys, socket
from settings import *
try:
import web
except ImportError:
sys.stderr.write('ERROR: Python module "web.py" not found, please run "pip install web.py".\n')
sys.exit(1)
try:
from passlib.hash import pbkdf2_sha256
except ImportError:
sys.stderr.write('ERROR: Python module "passlib" not found, please run "pip install passlib".\n')
sys.exit(1)
try:
from dns import (resolver, reversename)
except ImportError:
sys.stderr.write('ERROR: Python module "dnspython" not found, please run "pip install dnspython".\n')
sys.exit(1)
def run_ipt_cmd(ipaddr, op):
iface = get_iface()
web.debug('DEBUG: public iface=%s ipaddr=%s' % (iface, ipaddr))
ipt_cmd = 'iptables -t nat -%s PREROUTING -s %s/32 -i %s -j ACCEPT -v && iptables-save > /etc/iptables/rules.v4' % (op, ipaddr, iface)
web.debug('DEBUG: ipaddr=%s, op=%s, ipt_cmd=%s' % (ipaddr, op, ipt_cmd))
p = Popen(ipt_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
output, err = p.communicate()
rc = p.returncode
return rc, err, output
def run_ipt6_cmd(ipaddr, op):
iface = get_iface()
web.debug('DEBUG: public iface=%s ipaddr=%s' % (iface, ipaddr))
ipt_cmd = 'ip6tables -t nat -%s PREROUTING -s %s/128 -i %s -j ACCEPT -v && ip6tables-save > /etc/iptables/rules.v6' % (op, ipaddr, iface)
web.debug('DEBUG: ipaddr=%s, op=%s, ipt_cmd=%s' % (ipaddr, op, ipt_cmd))
p = Popen(ipt_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
output, err = p.communicate()
rc = p.returncode
return rc, err, output
def get_client_public_ip():
return web.ctx.env.get('HTTP_X_FORWARDED_FOR') or web.ctx.get('ip', None)
def get_iface():
cmd = "ip route | grep default | awk '{print $5}' | head -n 1"
web.debug('DEBUG: getting public iface name cmd=%s' % cmd)
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
output, err = p.communicate()
rc = p.returncode
web.debug('DEBUG: get_iface()=%s' % [rc, err, output])
if rc == 0:
try:
iface = output.rstrip().decode()
except:
iface = output.rstrip()
else:
iface = 'eth0'
web.debug('WARNING: get_iface() failed, guessing iface=%s' % iface)
return iface
def get_server_iface_ip():
iface = get_iface()
cmd = """ip addr show dev %s | \
grep inet | \
grep -v inet6 | \
awk '{print $2}' | \
grep -Po '[0-9]{1,3}+\.[0-9]{1,3}+\.[0-9]{1,3}+\.[0-9]{1,3}+(?=\/)'""" % iface
web.debug('DEBUG: getting ipaddr from iface=%s cmd=%s' % (iface, cmd))
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
output, err = p.communicate()
rc = p.returncode
web.debug('DEBUG: get_server_iface_ip()=%s' % [rc, err, output])
if rc == 0:
try:
ipaddr = output[0].rstrip().decode()
except:
ipaddr = output[0].rstrip()
else:
ipaddr = web.ctx.env['SERVER_NAME']
web.debug('WARNING: get_server_iface_ip() failed, guessing ipaddr=%s' % ipaddr)
return ipaddr
def get_server_external_ip():
try:
reslvr = resolver.Resolver()
reslvr.nameservers=[socket.gethostbyname('resolver1.opendns.com')]
return str(reslvr.query('myip.opendns.com', 'A').rrset[0]).lower()
except Exception as e:
web.debug('DEBUG: get_server_external_ip(): %s' % repr(e))
return get_server_iface_ip()
def get_server_public_fqdn():
try:
reslvr = resolver.Resolver()
ipaddr = reversename.from_address(get_server_external_ip())
return str(reslvr.query(ipaddr, 'PTR')[0]).rstrip('.').lower()
except Exception as e:
web.debug('DEBUG: get_server_public_fqdn(): %s' % repr(e))
return ipaddr
def get_http_host():
return web.ctx.environ['HTTP_HOST'].split(':')[0] or 'localhost'
def is_redirected():
ipaddr = get_server_external_ip()
fqdn = get_server_public_fqdn()
http_host = get_http_host()
if http_host == ipaddr or http_host == fqdn:
return False
else:
return True
def csrf_token():
if not 'csrf_token' in session:
from uuid import uuid4
session.csrf_token = uuid4().hex
return session.csrf_token
def csrf_protected(f):
def decorated(*args, **kwargs):
inp = web.input()
if not ('csrf_token' in inp and inp.csrf_token == session.pop('csrf_token', None)):
raise web.HTTPError(
"400 Bad request",
{'content-type':'text/html'},
"""Cross-site request forgery (CSRF) attempt (or stale browser form). <a href="">Back to the form</a>.""")
return f(*args, **kwargs)
return decorated
def validate_user(username,password):
try:
results = db.query("SELECT * FROM users WHERE username=$username ORDER BY ROWID ASC LIMIT 1",
vars={'username': username})
user = results[0]
try:
valid_hash = pbkdf2_sha256.verify(password, user.password)
except ValueError as e:
web.debug('%s user=%s' % (str(e), user.username))
valid_hash = None
pass
date_now = datetime.datetime.now()
date_expires = datetime.datetime.combine(user.expires, datetime.time.min)
if date_now <= date_expires:
if valid_hash:
web.debug('login_success_hash: user=%s' % user.username)
return user
else:
web.debug('login_failed_hash: incorrect password user=%s, fallback to plaintext' % user.username)
if password == user.password:
web.debug('login_success_plaintext: user=%s' % user.username)
return user
else:
web.debug('login_failed_plaintext: incorrect password user=%s' % user.username)
return None
else:
web.debug('login_failed: expired account user=%s' % user.username)
return None
except IndexError as e:
web.debug('login_failed: not found user=%s' % username)
return None
def get_ipaddrs():
results = db.query('SELECT * FROM ipaddrs WHERE user_id=$user_id',
vars={'user_id': session.user['ID']})
ipaddrs = [ip['ipaddr'] for ip in results]
session.auth_ip_count = len(ipaddrs)
ip = get_client_public_ip()
if ip in ipaddrs:
session.already_authorized = True
else:
session.already_authorized = False
return ipaddrs
def get_form(name='add'):
if FORM_INPUTS_HIDDEN:
ipaddr_input = web.form.Hidden('ipaddr')
else:
ipaddr_input = web.form.Textbox('ipaddr')
if session.user['privilege'] == 1:
if name == 'add':
frm = web.form.Form(ipaddr_input,
web.form.Button('Add', type='submit', value='submit', id='submit'))
if name == 'delete':
frm = web.form.Form(ipaddr_input,
web.form.Button('Delete', type='submit', value='submit', id='submit'))
frm.ipaddr.value = get_client_public_ip()
session.auth_ip_count = 0
session.already_authorized = False
frm.title = 'admin'
else:
ipaddrs = get_ipaddrs()
if name == 'add':
frm = web.form.Form(web.form.Dropdown('ipaddr', []),
web.form.Button('Add', type='submit', value='add', id='add'))
frm.ipaddr.args = [get_client_public_ip()]
frm.title = 'add'
if name == 'delete':
frm = web.form.Form(web.form.Dropdown('ipaddr', []),
web.form.Button('Delete', type='submit', value='delete', id='delete'))
frm.ipaddr.args = get_ipaddrs()
frm.title = 'delete'
if not ipaddrs:
frm = web.form.Form()
frm.title = 'delete'
return frm
def get_redirect_page():
content = web.form.Form()
content.title = 'Redirect to Google'
content.redirect_url = 'http://google.com/'
return content
# Set a custom 404 not found error message
def notfound():
web.ctx.status = '404 Not Found'
return web.notfound(str(render.__404()))
# Set a custom internal error message
def internalerror():
web.ctx.status = '500 Internal Server Error'
return web.internalerror(str(render.__500()))
def flash(group, message):
session.flash[group].append(message)
def flash_messages(group=None):
if not hasattr(web.ctx, 'flash'):
web.ctx.flash = session.flash
session.flash = defaultdict(list)
if group:
return web.ctx.flash.get(group, [])
else:
return web.ctx.flash
web.config.debug = DEBUG
web.config.session_parameters['cookie_name'] = 'netflix-proxy-admin'
urls = (
r'/login', 'Login',
r'/logout', 'Logout',
r'/add', 'Add',
r'/autoadd', 'AutoAdd',
r'/delete', 'Delete',
r'/ddns', 'DDNSIndex',
r'/ddns/add', 'DDNSAdd',
r'/ddns/delete', 'DDNSDelete',
r'.*', 'Index'
)
app = web.application(urls, globals())
db = web.database(dbn='sqlite', db=SQLITE_DB)
# Setup the application's error handlers
app.internalerror = internalerror
app.notfound = notfound
# Allow session to be reloadable in development mode.
if web.config.get('_session') is None:
session = web.session.Session(app, web.session.DiskStore('sessions'),
initializer={'flash': defaultdict(list)})
web.config._session = session
else:
session = web.config._session
render = web.template.render('templates/',
base='base',
cache=False)
t_globals = web.template.Template.globals
t_globals['datestr'] = web.datestr
t_globals['app_version'] = lambda: VERSION + ' - ' + VERSION
t_globals['flash_messages'] = flash_messages
t_globals['render'] = lambda t, *args: render._template(t)(*args)
t_globals['csrf_token'] = csrf_token
t_globals['context'] = session
class Index:
def GET(self):
ipaddr = get_client_public_ip()
is_ipv4 = web.net.validipaddr(ipaddr)
is_ipv6 = web.net.validip6addr(ipaddr)
if AUTO_AUTH:
if ipaddr:
web.debug('AUTO_AUTH: %s' % ipaddr)
if is_ipv4: result = run_ipt_cmd(ipaddr, 'I')
if is_ipv6: result = run_ipt6_cmd(ipaddr, 'I')
web.debug('iptables_update: %s' % [result])
if result[0] == 0:
flash('success', 'automatically authorized %s' % ipaddr)
return render.redirect(get_redirect_page())
else:
flash('error', 'unable to automatically authorize %s' % ipaddr)
raise web.seeother('/add')
else:
flash('error', 'something went wrong, please login to authorize')
raise web.seeother('/')
else:
try:
if session.user:
ipaddrs = get_ipaddrs()
if len(ipaddrs) == 0:
return web.seeother('/add')
return render.index(ipaddrs)
except Exception as e:
web.debug(traceback.print_exc())
raise web.seeother('/login')
class Login:
loginform = web.form.Form(web.form.Textbox('username',
web.form.notnull,
web.form.regexp('^[a-zA-Z0-9]+$', 'Alpha-numeric characters only (maximum %s)' % USERNAME_MAX_LEN),
web.form.Validator('Not more than %s characters.' % USERNAME_MAX_LEN, lambda x: len(x)<USERNAME_MAX_LEN)),
web.form.Password('password',
web.form.notnull,
web.form.regexp('[ -~]', 'Printable characters only (maximum %s)' % PASSWORD_MAX_LEN),
web.form.Validator('Not more than %s characters.' % PASSWORD_MAX_LEN, lambda x: len(x)<PASSWORD_MAX_LEN)))
def get_login_form(self):
login_form = Login.loginform()
login_form.title = 'login'
return login_form
def GET(self):
ipaddr = get_client_public_ip()
web.config.session_parameters['cookie_domain'] = web.ctx.environ['HTTP_HOST']
try:
if session.user:
raise web.seeother('/')
else:
flash('success', 'welcome, please login to authorize %s' % ipaddr)
return render.login(self.get_login_form())
except Exception as e:
web.debug(traceback.print_exc())
flash('success', 'welcome, please login to authorize %s' % ipaddr)
return render.login(self.get_login_form())
@csrf_protected # Verify this is not CSRF, or fail
def POST(self):
login_form = self.get_login_form()
if not login_form.validates():
flash('error', 'form validation failed')
return render.login(login_form)
username = login_form['username'].value
password = login_form['password'].value
user = validate_user(username,password)
if user:
session.user = user
web.debug(web.config.session_parameters)
flash('success', """you are now logged in, "Add" to authorize %s""" % get_client_public_ip())
raise web.seeother('/')
else:
session.user = None
flash('error', 'login failed for user %s' % username)
raise web.seeother('/login')
return render.login(login_form)
class Logout:
def GET(self):
session.user = None
session.already_authorized = None
session.auth_ip_count = None
session.kill()
raise web.seeother('/login')
class AutoAdd:
def GET(self):
try:
params = web.input(ip=get_client_public_ip())
user = validate_user(params.username,params.password)
if user is None: return 'Error: login'
ipadr = params.ip
is_ipv4 = web.net.validipaddr(ipadr)
is_ipv6 = web.net.validip6addr(ipadr)
if is_ipv4 == False and is_ipv6 == False:
return 'Error: IP not in right form'
# userid = int(user.ID)
userid = user.ID
results = db.query(
'SELECT * FROM ipaddrs WHERE user_id=$user_id',
vars={
'user_id': userid
}
)
ipaddrs = [ip['ipaddr'] for ip in results]
if ipadr in ipaddrs: return 'Error: already authorized.'
db_result = db.insert('ipaddrs', user_id=userid, ipaddr=ipadr)
web.debug('db_update: %s' % [db_result])
if is_ipv4: result = run_ipt_cmd(ipadr, 'I')
if is_ipv6: result = run_ipt6_cmd(ipadr, 'I')
web.debug('iptables_update: %s' % [result])
return 'OK'
except Exception as e:
web.debug(traceback.print_exc())
return user.ID
class Add:
def GET(self):
try:
if session.user:
return render.form(get_form())
else:
raise web.seeother('/login')
except Exception as e:
web.debug(traceback.print_exc())
raise web.seeother('/login')
@csrf_protected # Verify this is not CSRF, or fail
def POST(self):
auth_form = get_form()
if not auth_form.validates():
flash('error', 'form validation failed')
return render.form(get_form())
is_ipv4 = web.net.validipaddr(auth_form['ipaddr'].value)
is_ipv6 = web.net.validip6addr(auth_form['ipaddr'].value)
if is_ipv4 == False and is_ipv6 == False:
flash('error', '%s is not a valid ipv4/6 address' % auth_form['ipaddr'].value)
return render.form(get_form())
if session.already_authorized:
flash('error', '%s is already authorized' % auth_form['ipaddr'].value)
return render.form(get_form())
if session.auth_ip_count <= MAX_AUTH_IP_COUNT - 1 or session.user['privilege'] == 1:
web.debug('Authorising ipaddr=%s' % auth_form['ipaddr'].value)
web.header('Content-Type', 'text/html')
if is_ipv4: result = run_ipt_cmd(auth_form['ipaddr'].value, 'I')
if is_ipv6: result = run_ipt6_cmd(auth_form['ipaddr'].value, 'I')
web.debug('iptables_update: %s' % [result])
if result[0] == 0:
db_result = db.insert('ipaddrs',
user_id=session.user['ID'],
ipaddr=auth_form['ipaddr'].value)
web.debug('db.insert: %s' % db_result)
session.auth_ip_count += 1
flash('success', 'succesfully authorized %s' % auth_form['ipaddr'].value)
if is_redirected():
web.debug('is_redirected()=%s' % is_redirected())
return render.redirect(get_redirect_page())
else:
return render.form(get_form())
else:
flash('error', 'error authorizing %s' % auth_form['ipaddr'].value)
return render.form(get_form())
else:
flash('error', 'exceeded %s maximim authorized IPs' % MAX_AUTH_IP_COUNT)
return render.form(get_form())
class Delete:
def GET(self):
try:
if session.user:
frm = get_form(name='delete')
if not frm.inputs:flash('success', """all IP addresses de-authorized, please <a href="/add">authorize</a> one""")
return render.form(frm)
else:
raise web.seeother('/login')
except Exception as e:
web.debug(traceback.print_exc())
raise web.seeother('/login')
@csrf_protected # Verify this is not CSRF, or fail
def POST(self):
auth_form = get_form()
if not auth_form.validates():
flash('error', 'form validation failed')
return render.form(get_form(name='delete'))
is_ipv4 = web.net.validipaddr(auth_form['ipaddr'].value)
is_ipv6 = web.net.validip6addr(auth_form['ipaddr'].value)
if is_ipv4 == False and is_ipv6 == False:
flash('error', '%s is not a valid ipv4/6 address' % auth_form['ipaddr'].value)
return render.form(get_form(name='delete'))
web.debug('De-authorising ipaddr=%s' % auth_form['ipaddr'].value)
web.header('Content-Type', 'text/html')
db_result = db.delete('ipaddrs', where="user_id=%s AND ipaddr='%s'" % (session.user['ID'],
auth_form['ipaddr'].value))
web.debug('db.delete: %s' % db_result)
if db_result == 0: db_result = 1
for i in range(0, db_result):
if is_ipv4: result = run_ipt_cmd(auth_form['ipaddr'].value, 'D')
if is_ipv6: result = run_ipt6_cmd(auth_form['ipaddr'].value, 'D')
web.debug('iptables_update: %s' % [result])
session.auth_ip_count -= 1
flash('success', '%s de-authorized' % auth_form['ipaddr'].value)
return render.form(get_form(name='delete'))
class DDNSIndex:
ddns_add_form = web.form.Form(web.form.Textbox('domain', web.form.notnull))
def GET(self):
try:
if 'user' in session:
domains = db.query('SELECT * FROM DDNS WHERE user_id=$user_id',
vars={'user_id': session.user['ID']})
return render.ddns(domains, DDNSIndex.ddns_add_form())
else:
web.seeother('/login')
except Exception as e:
flash('error', 'Please update the database schema. See README for details.')
web.debug(traceback.print_exc())
raise web.seeother('/')
class DDNSAdd:
@csrf_protected # Verify this is not CSRF, or fail
def POST(self):
form = DDNSIndex.ddns_add_form()
if not form.validates():
flash('error', 'form validation failed')
raise web.seeother('/ddns')
web.debug('Adding domain=%s' % form['domain'].value)
web.header('Content-Type', 'text/html')
db_result = db.insert('DDNS',
user_id=session.user['ID'],
domain=form['domain'].value)
web.debug('db.insert: %s' % db_result)
flash('success', 'succesfully added %s' % form['domain'].value)
return web.seeother('/ddns')
class DDNSDelete:
@csrf_protected # Verify this is not CSRF, or fail
def POST(self):
form = DDNSIndex.ddns_add_form()
if not form.validates():
flash('error', 'form validation failed')
raise web.seeother('/ddns')
web.debug('Removing domain=%s' % form['domain'].value)
web.header('Content-Type', 'text/html')
db_result = db.delete('DDNS', where="user_id=%s AND domain='%s'" % (session.user['ID'],
form['domain'].value))
web.debug('db.delete: %s' % db_result)
flash('success', '%s removed' % form['domain'].value)
return web.seeother('/ddns')
# Adds a wsgi callable for uwsgi
application = app.wsgifunc()
if __name__ == "__main__":
app.run()
| mit | 775479d6a4f85182793cfb5269f39313 | 35.214984 | 154 | 0.546951 | 3.777136 | false | false | false | false |
commaai/openpilot | common/ffi_wrapper.py | 1 | 1319 | import os
import sys
import fcntl
import hashlib
import platform
from cffi import FFI
def suffix():
if platform.system() == "Darwin":
return ".dylib"
else:
return ".so"
def ffi_wrap(name, c_code, c_header, tmpdir="/tmp/ccache", cflags="", libraries=None):
if libraries is None:
libraries = []
cache = name + "_" + hashlib.sha1(c_code.encode('utf-8')).hexdigest()
try:
os.mkdir(tmpdir)
except OSError:
pass
fd = os.open(tmpdir, 0)
fcntl.flock(fd, fcntl.LOCK_EX)
try:
sys.path.append(tmpdir)
try:
mod = __import__(cache)
except Exception:
print(f"cache miss {cache}")
compile_code(cache, c_code, c_header, tmpdir, cflags, libraries)
mod = __import__(cache)
finally:
os.close(fd)
return mod.ffi, mod.lib
def compile_code(name, c_code, c_header, directory, cflags="", libraries=None):
if libraries is None:
libraries = []
ffibuilder = FFI()
ffibuilder.set_source(name, c_code, source_extension='.cpp', libraries=libraries)
ffibuilder.cdef(c_header)
os.environ['OPT'] = "-fwrapv -O2 -DNDEBUG -std=c++1z"
os.environ['CFLAGS'] = cflags
ffibuilder.compile(verbose=True, debug=False, tmpdir=directory)
def wrap_compiled(name, directory):
sys.path.append(directory)
mod = __import__(name)
return mod.ffi, mod.lib
| mit | 9a373cb3cee593c5d38b7fef469c0ef8 | 22.981818 | 86 | 0.658074 | 3.25679 | false | false | false | false |
commaai/openpilot | selfdrive/car/honda/hondacan.py | 1 | 6243 | from common.conversions import Conversions as CV
from selfdrive.car.honda.values import HondaFlags, HONDA_BOSCH, HONDA_BOSCH_RADARLESS, CAR, CarControllerParams
# CAN bus layout with relay
# 0 = ACC-CAN - radar side
# 1 = F-CAN B - powertrain
# 2 = ACC-CAN - camera side
# 3 = F-CAN A - OBDII port
def get_pt_bus(car_fingerprint):
return 1 if car_fingerprint in (HONDA_BOSCH - HONDA_BOSCH_RADARLESS) else 0
def get_lkas_cmd_bus(car_fingerprint, radar_disabled=False):
if radar_disabled:
# when radar is disabled, steering commands are sent directly to powertrain bus
return get_pt_bus(car_fingerprint)
# normally steering commands are sent to radar, which forwards them to powertrain bus
return 0
def create_brake_command(packer, apply_brake, pump_on, pcm_override, pcm_cancel_cmd, fcw, car_fingerprint, stock_brake):
# TODO: do we loose pressure if we keep pump off for long?
brakelights = apply_brake > 0
brake_rq = apply_brake > 0
pcm_fault_cmd = False
values = {
"COMPUTER_BRAKE": apply_brake,
"BRAKE_PUMP_REQUEST": pump_on,
"CRUISE_OVERRIDE": pcm_override,
"CRUISE_FAULT_CMD": pcm_fault_cmd,
"CRUISE_CANCEL_CMD": pcm_cancel_cmd,
"COMPUTER_BRAKE_REQUEST": brake_rq,
"SET_ME_1": 1,
"BRAKE_LIGHTS": brakelights,
"CHIME": stock_brake["CHIME"] if fcw else 0, # send the chime for stock fcw
"FCW": fcw << 1, # TODO: Why are there two bits for fcw?
"AEB_REQ_1": 0,
"AEB_REQ_2": 0,
"AEB_STATUS": 0,
}
bus = get_pt_bus(car_fingerprint)
return packer.make_can_msg("BRAKE_COMMAND", bus, values)
def create_acc_commands(packer, enabled, active, accel, gas, stopping, car_fingerprint):
commands = []
bus = get_pt_bus(car_fingerprint)
min_gas_accel = CarControllerParams.BOSCH_GAS_LOOKUP_BP[0]
control_on = 5 if enabled else 0
gas_command = gas if active and accel > min_gas_accel else -30000
accel_command = accel if active else 0
braking = 1 if active and accel < min_gas_accel else 0
standstill = 1 if active and stopping else 0
standstill_release = 1 if active and not stopping else 0
acc_control_values = {
# setting CONTROL_ON causes car to set POWERTRAIN_DATA->ACC_STATUS = 1
"CONTROL_ON": control_on,
"GAS_COMMAND": gas_command, # used for gas
"ACCEL_COMMAND": accel_command, # used for brakes
"BRAKE_LIGHTS": braking,
"BRAKE_REQUEST": braking,
"STANDSTILL": standstill,
"STANDSTILL_RELEASE": standstill_release,
}
commands.append(packer.make_can_msg("ACC_CONTROL", bus, acc_control_values))
acc_control_on_values = {
"SET_TO_3": 0x03,
"CONTROL_ON": enabled,
"SET_TO_FF": 0xff,
"SET_TO_75": 0x75,
"SET_TO_30": 0x30,
}
commands.append(packer.make_can_msg("ACC_CONTROL_ON", bus, acc_control_on_values))
return commands
def create_steering_control(packer, apply_steer, lkas_active, car_fingerprint, radar_disabled):
values = {
"STEER_TORQUE": apply_steer if lkas_active else 0,
"STEER_TORQUE_REQUEST": lkas_active,
}
bus = get_lkas_cmd_bus(car_fingerprint, radar_disabled)
return packer.make_can_msg("STEERING_CONTROL", bus, values)
def create_bosch_supplemental_1(packer, car_fingerprint):
# non-active params
values = {
"SET_ME_X04": 0x04,
"SET_ME_X80": 0x80,
"SET_ME_X10": 0x10,
}
bus = get_lkas_cmd_bus(car_fingerprint)
return packer.make_can_msg("BOSCH_SUPPLEMENTAL_1", bus, values)
def create_ui_commands(packer, CP, enabled, pcm_speed, hud, is_metric, acc_hud, lkas_hud):
commands = []
bus_pt = get_pt_bus(CP.carFingerprint)
radar_disabled = CP.carFingerprint in HONDA_BOSCH and CP.openpilotLongitudinalControl
bus_lkas = get_lkas_cmd_bus(CP.carFingerprint, radar_disabled)
if CP.openpilotLongitudinalControl:
acc_hud_values = {
'CRUISE_SPEED': hud.v_cruise,
'ENABLE_MINI_CAR': 1,
'HUD_DISTANCE': 0, # max distance setting on display
'IMPERIAL_UNIT': int(not is_metric),
'HUD_LEAD': 2 if enabled and hud.lead_visible else 1 if enabled else 0,
'SET_ME_X01_2': 1,
}
if CP.carFingerprint in HONDA_BOSCH:
acc_hud_values['ACC_ON'] = int(enabled)
acc_hud_values['FCM_OFF'] = 1
acc_hud_values['FCM_OFF_2'] = 1
else:
acc_hud_values['PCM_SPEED'] = pcm_speed * CV.MS_TO_KPH
acc_hud_values['PCM_GAS'] = hud.pcm_accel
acc_hud_values['SET_ME_X01'] = 1
acc_hud_values['FCM_OFF'] = acc_hud['FCM_OFF']
acc_hud_values['FCM_OFF_2'] = acc_hud['FCM_OFF_2']
acc_hud_values['FCM_PROBLEM'] = acc_hud['FCM_PROBLEM']
acc_hud_values['ICONS'] = acc_hud['ICONS']
commands.append(packer.make_can_msg("ACC_HUD", bus_pt, acc_hud_values))
lkas_hud_values = {
'SET_ME_X41': 0x41,
'STEERING_REQUIRED': hud.steer_required,
'SOLID_LANES': hud.lanes_visible,
'BEEP': 0,
}
if CP.carFingerprint in HONDA_BOSCH_RADARLESS:
lkas_hud_values['LANE_LINES'] = 3
lkas_hud_values['DASHED_LANES'] = hud.lanes_visible
# car likely needs to see LKAS_PROBLEM fall within a specific time frame, so forward from camera
lkas_hud_values['LKAS_PROBLEM'] = lkas_hud['LKAS_PROBLEM']
if not (CP.flags & HondaFlags.BOSCH_EXT_HUD):
lkas_hud_values['SET_ME_X48'] = 0x48
if CP.flags & HondaFlags.BOSCH_EXT_HUD and not CP.openpilotLongitudinalControl:
commands.append(packer.make_can_msg('LKAS_HUD_A', bus_lkas, lkas_hud_values))
commands.append(packer.make_can_msg('LKAS_HUD_B', bus_lkas, lkas_hud_values))
else:
commands.append(packer.make_can_msg('LKAS_HUD', bus_lkas, lkas_hud_values))
if radar_disabled and CP.carFingerprint in HONDA_BOSCH:
radar_hud_values = {
'CMBS_OFF': 0x01,
'SET_TO_1': 0x01,
}
commands.append(packer.make_can_msg('RADAR_HUD', bus_pt, radar_hud_values))
if CP.carFingerprint == CAR.CIVIC_BOSCH:
commands.append(packer.make_can_msg("LEGACY_BRAKE_COMMAND", bus_pt, {}))
return commands
def spam_buttons_command(packer, button_val, car_fingerprint):
values = {
'CRUISE_BUTTONS': button_val,
'CRUISE_SETTING': 0,
}
# send buttons to camera on radarless cars
bus = 2 if car_fingerprint in HONDA_BOSCH_RADARLESS else get_pt_bus(car_fingerprint)
return packer.make_can_msg("SCM_BUTTONS", bus, values)
| mit | 667a14f0f522ec588a4a61a52f8d1d1e | 34.471591 | 120 | 0.677078 | 2.674807 | false | false | false | false |
wbond/oscrypto | oscrypto/_win/_secur32_ctypes.py | 8 | 5305 | # coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import sys
import ctypes
from ctypes import windll, wintypes, POINTER, c_void_p, c_uint, Structure
from ctypes.wintypes import DWORD, ULONG
from .._ffi import FFIEngineError
from .._types import str_cls
from ..errors import LibraryNotFoundError
__all__ = [
'get_error',
'secur32',
]
try:
secur32 = windll.secur32
except (OSError) as e:
if str_cls(e).find('The specified module could not be found') != -1:
raise LibraryNotFoundError('secur32.dll could not be found')
raise
HCERTSTORE = wintypes.HANDLE
ALG_ID = c_uint
if sys.maxsize > 2 ** 32:
ULONG_PTR = ctypes.c_uint64
else:
ULONG_PTR = ctypes.c_ulong
SEC_GET_KEY_FN = c_void_p
LUID = c_void_p
SECURITY_STATUS = ctypes.c_ulong
SEC_WCHAR = wintypes.WCHAR
try:
class SecHandle(Structure):
_fields_ = [
('dwLower', ULONG_PTR),
('dwUpper', ULONG_PTR),
]
CredHandle = SecHandle
CtxtHandle = SecHandle
class SCHANNEL_CRED(Structure): # noqa
_fields_ = [
('dwVersion', DWORD),
('cCreds', DWORD),
('paCred', c_void_p),
('hRootStore', HCERTSTORE),
('cMappers', DWORD),
('aphMappers', POINTER(c_void_p)),
('cSupportedAlgs', DWORD),
('palgSupportedAlgs', POINTER(ALG_ID)),
('grbitEnabledProtocols', DWORD),
('dwMinimumCipherStrength', DWORD),
('dwMaximumCipherStrength', DWORD),
('dwSessionLifespan', DWORD),
('dwFlags', DWORD),
('dwCredFormat', DWORD),
]
class TimeStamp(Structure):
_fields_ = [
('dwLowDateTime', DWORD),
('dwHighDateTime', DWORD),
]
class SecBuffer(Structure):
_fields_ = [
('cbBuffer', ULONG),
('BufferType', ULONG),
('pvBuffer', POINTER(ctypes.c_byte)),
]
PSecBuffer = POINTER(SecBuffer)
class SecBufferDesc(Structure):
_fields_ = [
('ulVersion', ULONG),
('cBuffers', ULONG),
('pBuffers', PSecBuffer),
]
class SecPkgContext_StreamSizes(Structure): # noqa
_fields_ = [
('cbHeader', ULONG),
('cbTrailer', ULONG),
('cbMaximumMessage', ULONG),
('cBuffers', ULONG),
('cbBlockSize', ULONG),
]
class SecPkgContext_ConnectionInfo(Structure): # noqa
_fields_ = [
('dwProtocol', DWORD),
('aiCipher', ALG_ID),
('dwCipherStrength', DWORD),
('aiHash', ALG_ID),
('dwHashStrength', DWORD),
('aiExch', ALG_ID),
('dwExchStrength', DWORD),
]
secur32.AcquireCredentialsHandleW.argtypes = [
POINTER(SEC_WCHAR),
POINTER(SEC_WCHAR),
ULONG,
POINTER(LUID),
c_void_p,
SEC_GET_KEY_FN,
c_void_p,
POINTER(CredHandle),
POINTER(TimeStamp)
]
secur32.AcquireCredentialsHandleW.restype = SECURITY_STATUS
secur32.FreeCredentialsHandle.argtypes = [
POINTER(CredHandle)
]
secur32.FreeCredentialsHandle.restype = SECURITY_STATUS
secur32.InitializeSecurityContextW.argtypes = [
POINTER(CredHandle),
POINTER(CtxtHandle),
POINTER(SEC_WCHAR),
ULONG,
ULONG,
ULONG,
POINTER(SecBufferDesc),
ULONG,
POINTER(CtxtHandle),
POINTER(SecBufferDesc),
POINTER(ULONG),
POINTER(TimeStamp)
]
secur32.InitializeSecurityContextW.restype = SECURITY_STATUS
secur32.FreeContextBuffer.argtypes = [
c_void_p
]
secur32.FreeContextBuffer.restype = SECURITY_STATUS
secur32.ApplyControlToken.argtypes = [
POINTER(CtxtHandle),
POINTER(SecBufferDesc)
]
secur32.ApplyControlToken.restype = SECURITY_STATUS
secur32.DeleteSecurityContext.argtypes = [
POINTER(CtxtHandle)
]
secur32.DeleteSecurityContext.restype = SECURITY_STATUS
secur32.QueryContextAttributesW.argtypes = [
POINTER(CtxtHandle),
ULONG,
c_void_p
]
secur32.QueryContextAttributesW.restype = SECURITY_STATUS
secur32.EncryptMessage.argtypes = [
POINTER(CtxtHandle),
ULONG,
POINTER(SecBufferDesc),
ULONG
]
secur32.EncryptMessage.restype = SECURITY_STATUS
secur32.DecryptMessage.argtypes = [
POINTER(CtxtHandle),
POINTER(SecBufferDesc),
ULONG,
POINTER(ULONG)
]
secur32.DecryptMessage.restype = SECURITY_STATUS
except (AttributeError):
raise FFIEngineError('Error initializing ctypes')
setattr(secur32, 'ALG_ID', ALG_ID)
setattr(secur32, 'CredHandle', CredHandle)
setattr(secur32, 'CtxtHandle', CtxtHandle)
setattr(secur32, 'SecBuffer', SecBuffer)
setattr(secur32, 'SecBufferDesc', SecBufferDesc)
setattr(secur32, 'SecPkgContext_StreamSizes', SecPkgContext_StreamSizes)
setattr(secur32, 'SecPkgContext_ConnectionInfo', SecPkgContext_ConnectionInfo)
setattr(secur32, 'SCHANNEL_CRED', SCHANNEL_CRED)
def get_error():
error = ctypes.GetLastError()
return (error, ctypes.FormatError(error))
| mit | 3a3e82054d1f12e7583a674151a36a12 | 25.792929 | 82 | 0.605655 | 3.676369 | false | false | false | false |
wbond/oscrypto | oscrypto/_openssl/tls.py | 1 | 44082 | # coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import sys
import re
import socket as socket_
import select
import numbers
from ._libssl import error_code_version_info, libssl, LibsslConst
from ._libcrypto import libcrypto, libcrypto_version_info, handle_openssl_error, peek_openssl_error
from .. import _backend_config
from .._asn1 import Certificate as Asn1Certificate
from .._errors import pretty_message
from .._ffi import null, bytes_from_buffer, buffer_from_bytes, is_null, buffer_pointer
from .._types import type_name, str_cls, byte_cls, int_types
from ..errors import TLSError, TLSDisconnectError, TLSGracefulDisconnectError
from .._tls import (
detect_client_auth_request,
extract_chain,
get_dh_params_length,
parse_session_info,
raise_client_auth,
raise_dh_params,
raise_disconnection,
raise_expired_not_yet_valid,
raise_handshake,
raise_hostname,
raise_no_issuer,
raise_protocol_error,
raise_protocol_version,
raise_self_signed,
raise_verification,
raise_weak_signature,
parse_tls_records,
parse_handshake_messages,
)
from .asymmetric import load_certificate, Certificate
from ..keys import parse_certificate
from ..trust_list import get_path
if sys.version_info < (3,):
range = xrange # noqa
if sys.version_info < (3, 7):
Pattern = re._pattern_type
else:
Pattern = re.Pattern
__all__ = [
'TLSSession',
'TLSSocket',
]
_trust_list_path = _backend_config().get('trust_list_path')
_line_regex = re.compile(b'(\r\n|\r|\n)')
_PROTOCOL_MAP = {
'SSLv2': LibsslConst.SSL_OP_NO_SSLv2,
'SSLv3': LibsslConst.SSL_OP_NO_SSLv3,
'TLSv1': LibsslConst.SSL_OP_NO_TLSv1,
'TLSv1.1': LibsslConst.SSL_OP_NO_TLSv1_1,
'TLSv1.2': LibsslConst.SSL_OP_NO_TLSv1_2,
}
def _homogenize_openssl3_error(error_tuple):
"""
Takes a 3-element tuple from peek_openssl_error() and modifies it
to handle the changes in OpenSSL 3.0. That release removed the
concept of an error function, meaning the second item in the tuple
will always be 0.
:param error_tuple:
A 3-element tuple of integers
:return:
A 3-element tuple of integers
"""
if libcrypto_version_info < (3,):
return error_tuple
return (error_tuple[0], 0, error_tuple[2])
class TLSSession(object):
"""
A TLS session object that multiple TLSSocket objects can share for the
sake of session reuse
"""
_protocols = None
_ciphers = None
_manual_validation = None
_extra_trust_roots = None
_ssl_ctx = None
_ssl_session = None
def __init__(self, protocol=None, manual_validation=False, extra_trust_roots=None):
"""
:param protocol:
A unicode string or set of unicode strings representing allowable
protocols to negotiate with the server:
- "TLSv1.2"
- "TLSv1.1"
- "TLSv1"
- "SSLv3"
Default is: {"TLSv1", "TLSv1.1", "TLSv1.2"}
:param manual_validation:
If certificate and certificate path validation should be skipped
and left to the developer to implement
:param extra_trust_roots:
A list containing one or more certificates to be treated as trust
roots, in one of the following formats:
- A byte string of the DER encoded certificate
- A unicode string of the certificate filename
- An asn1crypto.x509.Certificate object
- An oscrypto.asymmetric.Certificate object
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
"""
if not isinstance(manual_validation, bool):
raise TypeError(pretty_message(
'''
manual_validation must be a boolean, not %s
''',
type_name(manual_validation)
))
self._manual_validation = manual_validation
if protocol is None:
protocol = set(['TLSv1', 'TLSv1.1', 'TLSv1.2'])
if isinstance(protocol, str_cls):
protocol = set([protocol])
elif not isinstance(protocol, set):
raise TypeError(pretty_message(
'''
protocol must be a unicode string or set of unicode strings,
not %s
''',
type_name(protocol)
))
valid_protocols = set(['SSLv3', 'TLSv1', 'TLSv1.1', 'TLSv1.2'])
unsupported_protocols = protocol - valid_protocols
if unsupported_protocols:
raise ValueError(pretty_message(
'''
protocol must contain only the unicode strings "SSLv3", "TLSv1",
"TLSv1.1", "TLSv1.2", not %s
''',
repr(unsupported_protocols)
))
self._protocols = protocol
self._extra_trust_roots = []
if extra_trust_roots:
for extra_trust_root in extra_trust_roots:
if isinstance(extra_trust_root, Certificate):
extra_trust_root = extra_trust_root.asn1
elif isinstance(extra_trust_root, byte_cls):
extra_trust_root = parse_certificate(extra_trust_root)
elif isinstance(extra_trust_root, str_cls):
with open(extra_trust_root, 'rb') as f:
extra_trust_root = parse_certificate(f.read())
elif not isinstance(extra_trust_root, Asn1Certificate):
raise TypeError(pretty_message(
'''
extra_trust_roots must be a list of byte strings, unicode
strings, asn1crypto.x509.Certificate objects or
oscrypto.asymmetric.Certificate objects, not %s
''',
type_name(extra_trust_root)
))
self._extra_trust_roots.append(extra_trust_root)
ssl_ctx = None
try:
if libcrypto_version_info < (1, 1):
method = libssl.SSLv23_method()
else:
method = libssl.TLS_method()
ssl_ctx = libssl.SSL_CTX_new(method)
if is_null(ssl_ctx):
handle_openssl_error(0)
self._ssl_ctx = ssl_ctx
libssl.SSL_CTX_set_timeout(ssl_ctx, 600)
# Allow caching SSL sessions
libssl.SSL_CTX_ctrl(
ssl_ctx,
LibsslConst.SSL_CTRL_SET_SESS_CACHE_MODE,
LibsslConst.SSL_SESS_CACHE_CLIENT,
null()
)
if sys.platform in set(['win32', 'darwin']):
trust_list_path = _trust_list_path
if trust_list_path is None:
trust_list_path = get_path()
if sys.platform == 'win32':
path_encoding = 'mbcs'
else:
path_encoding = 'utf-8'
result = libssl.SSL_CTX_load_verify_locations(
ssl_ctx,
trust_list_path.encode(path_encoding),
null()
)
else:
result = libssl.SSL_CTX_set_default_verify_paths(ssl_ctx)
handle_openssl_error(result)
verify_mode = LibsslConst.SSL_VERIFY_NONE if manual_validation else LibsslConst.SSL_VERIFY_PEER
libssl.SSL_CTX_set_verify(ssl_ctx, verify_mode, null())
# Modern cipher suite list from https://wiki.mozilla.org/Security/Server_Side_TLS late August 2015
result = libssl.SSL_CTX_set_cipher_list(
ssl_ctx,
(
b'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:'
b'ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:'
b'DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:'
b'kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:'
b'ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:'
b'ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:'
b'DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:'
b'DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:'
b'AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:'
b'AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:'
b'!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:'
b'!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA'
)
)
handle_openssl_error(result)
disabled_protocols = set(['SSLv2'])
disabled_protocols |= (valid_protocols - self._protocols)
for disabled_protocol in disabled_protocols:
libssl.SSL_CTX_ctrl(
ssl_ctx,
LibsslConst.SSL_CTRL_OPTIONS,
_PROTOCOL_MAP[disabled_protocol],
null()
)
if self._extra_trust_roots:
x509_store = libssl.SSL_CTX_get_cert_store(ssl_ctx)
for cert in self._extra_trust_roots:
oscrypto_cert = load_certificate(cert)
result = libssl.X509_STORE_add_cert(
x509_store,
oscrypto_cert.x509
)
handle_openssl_error(result)
except (Exception):
if ssl_ctx:
libssl.SSL_CTX_free(ssl_ctx)
self._ssl_ctx = None
raise
def __del__(self):
if self._ssl_ctx:
libssl.SSL_CTX_free(self._ssl_ctx)
self._ssl_ctx = None
if self._ssl_session:
libssl.SSL_SESSION_free(self._ssl_session)
self._ssl_session = None
class TLSSocket(object):
"""
A wrapper around a socket.socket that adds TLS
"""
_socket = None
# An oscrypto.tls.TLSSession object
_session = None
# An OpenSSL SSL struct pointer
_ssl = None
# OpenSSL memory bios used for reading/writing data to and
# from the socket
_rbio = None
_wbio = None
# Size of _bio_write_buffer and _read_buffer
_buffer_size = 8192
# A buffer used to pull bytes out of the _wbio memory bio to
# be written to the socket
_bio_write_buffer = None
# A buffer used to push bytes into the _rbio memory bio to
# be decrypted by OpenSSL
_read_buffer = None
# Raw ciphertext from the socker that hasn't need fed to OpenSSL yet
_raw_bytes = None
# Plaintext that has been decrypted, but not asked for yet
_decrypted_bytes = None
_hostname = None
_certificate = None
_intermediates = None
_protocol = None
_cipher_suite = None
_compression = None
_session_id = None
_session_ticket = None
# If we explicitly asked for the connection to be closed
_local_closed = False
_gracefully_closed = False
@classmethod
def wrap(cls, socket, hostname, session=None):
"""
Takes an existing socket and adds TLS
:param socket:
A socket.socket object to wrap with TLS
:param hostname:
A unicode string of the hostname or IP the socket is connected to
:param session:
An existing TLSSession object to allow for session reuse, specific
protocol or manual certificate validation
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
"""
if not isinstance(socket, socket_.socket):
raise TypeError(pretty_message(
'''
socket must be an instance of socket.socket, not %s
''',
type_name(socket)
))
if not isinstance(hostname, str_cls):
raise TypeError(pretty_message(
'''
hostname must be a unicode string, not %s
''',
type_name(hostname)
))
if session is not None and not isinstance(session, TLSSession):
raise TypeError(pretty_message(
'''
session must be an instance of oscrypto.tls.TLSSession, not %s
''',
type_name(session)
))
new_socket = cls(None, None, session=session)
new_socket._socket = socket
new_socket._hostname = hostname
new_socket._handshake()
return new_socket
def __init__(self, address, port, timeout=10, session=None):
"""
:param address:
A unicode string of the domain name or IP address to connect to
:param port:
An integer of the port number to connect to
:param timeout:
An integer timeout to use for the socket
:param session:
An oscrypto.tls.TLSSession object to allow for session reuse and
controlling the protocols and validation performed
"""
self._raw_bytes = b''
self._decrypted_bytes = b''
if address is None and port is None:
self._socket = None
else:
if not isinstance(address, str_cls):
raise TypeError(pretty_message(
'''
address must be a unicode string, not %s
''',
type_name(address)
))
if not isinstance(port, int_types):
raise TypeError(pretty_message(
'''
port must be an integer, not %s
''',
type_name(port)
))
if timeout is not None and not isinstance(timeout, numbers.Number):
raise TypeError(pretty_message(
'''
timeout must be a number, not %s
''',
type_name(timeout)
))
self._socket = socket_.create_connection((address, port), timeout)
self._socket.settimeout(timeout)
if session is None:
session = TLSSession()
elif not isinstance(session, TLSSession):
raise TypeError(pretty_message(
'''
session must be an instance of oscrypto.tls.TLSSession, not %s
''',
type_name(session)
))
self._session = session
if self._socket:
self._hostname = address
self._handshake()
def _handshake(self):
"""
Perform an initial TLS handshake
"""
self._ssl = None
self._rbio = None
self._wbio = None
try:
self._ssl = libssl.SSL_new(self._session._ssl_ctx)
if is_null(self._ssl):
self._ssl = None
handle_openssl_error(0)
mem_bio = libssl.BIO_s_mem()
self._rbio = libssl.BIO_new(mem_bio)
if is_null(self._rbio):
handle_openssl_error(0)
self._wbio = libssl.BIO_new(mem_bio)
if is_null(self._wbio):
handle_openssl_error(0)
libssl.SSL_set_bio(self._ssl, self._rbio, self._wbio)
utf8_domain = self._hostname.encode('utf-8')
libssl.SSL_ctrl(
self._ssl,
LibsslConst.SSL_CTRL_SET_TLSEXT_HOSTNAME,
LibsslConst.TLSEXT_NAMETYPE_host_name,
utf8_domain
)
libssl.SSL_set_connect_state(self._ssl)
if self._session._ssl_session:
libssl.SSL_set_session(self._ssl, self._session._ssl_session)
self._bio_write_buffer = buffer_from_bytes(self._buffer_size)
self._read_buffer = buffer_from_bytes(self._buffer_size)
handshake_server_bytes = b''
handshake_client_bytes = b''
while True:
result = libssl.SSL_do_handshake(self._ssl)
handshake_client_bytes += self._raw_write()
if result == 1:
break
error = libssl.SSL_get_error(self._ssl, result)
if error == LibsslConst.SSL_ERROR_WANT_READ:
chunk = self._raw_read()
if chunk == b'':
if handshake_server_bytes == b'':
raise_disconnection()
if detect_client_auth_request(handshake_server_bytes):
raise_client_auth()
raise_protocol_error(handshake_server_bytes)
handshake_server_bytes += chunk
elif error == LibsslConst.SSL_ERROR_WANT_WRITE:
handshake_client_bytes += self._raw_write()
elif error == LibsslConst.SSL_ERROR_ZERO_RETURN:
self._gracefully_closed = True
self._shutdown(False)
self._raise_closed()
else:
info = peek_openssl_error()
dh_key_info_1 = (
LibsslConst.ERR_LIB_SSL,
LibsslConst.SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM,
LibsslConst.SSL_R_DH_KEY_TOO_SMALL
)
dh_key_info_1 = _homogenize_openssl3_error(dh_key_info_1)
dh_key_info_2 = (
LibsslConst.ERR_LIB_SSL,
LibsslConst.SSL_F_TLS_PROCESS_SKE_DHE,
LibsslConst.SSL_R_DH_KEY_TOO_SMALL
)
dh_key_info_2 = _homogenize_openssl3_error(dh_key_info_2)
dh_key_info_3 = (
LibsslConst.ERR_LIB_SSL,
LibsslConst.SSL_F_SSL3_GET_KEY_EXCHANGE,
LibsslConst.SSL_R_BAD_DH_P_LENGTH
)
dh_key_info_3 = _homogenize_openssl3_error(dh_key_info_3)
if info == dh_key_info_1 or info == dh_key_info_2 or info == dh_key_info_3:
raise_dh_params()
if error_code_version_info < (1, 1):
unknown_protocol_info = (
LibsslConst.ERR_LIB_SSL,
LibsslConst.SSL_F_SSL23_GET_SERVER_HELLO,
LibsslConst.SSL_R_UNKNOWN_PROTOCOL
)
else:
unknown_protocol_info = (
LibsslConst.ERR_LIB_SSL,
LibsslConst.SSL_F_SSL3_GET_RECORD,
LibsslConst.SSL_R_WRONG_VERSION_NUMBER
)
unknown_protocol_info = _homogenize_openssl3_error(unknown_protocol_info)
if info == unknown_protocol_info:
raise_protocol_error(handshake_server_bytes)
tls_version_info_error = (
LibsslConst.ERR_LIB_SSL,
LibsslConst.SSL_F_SSL23_GET_SERVER_HELLO,
LibsslConst.SSL_R_TLSV1_ALERT_PROTOCOL_VERSION
)
tls_version_info_error = _homogenize_openssl3_error(tls_version_info_error)
if info == tls_version_info_error:
raise_protocol_version()
# There are multiple functions that can result in a handshake failure,
# but our custom handshake parsing code figures out what really happened,
# and what is more, OpenSSL 3 got rid of function codes. Because of this,
# we skip checking the function code.
handshake_error_info = (
LibsslConst.ERR_LIB_SSL,
LibsslConst.SSL_R_SSLV3_ALERT_HANDSHAKE_FAILURE
)
if (info[0], info[2]) == handshake_error_info:
saw_client_auth = False
for record_type, _, record_data in parse_tls_records(handshake_server_bytes):
if record_type != b'\x16':
continue
for message_type, message_data in parse_handshake_messages(record_data):
if message_type == b'\x0d':
saw_client_auth = True
break
if saw_client_auth:
raise_client_auth()
raise_handshake()
if error_code_version_info < (1, 1):
cert_verify_failed_info = (
LibsslConst.ERR_LIB_SSL,
LibsslConst.SSL_F_SSL3_GET_SERVER_CERTIFICATE,
LibsslConst.SSL_R_CERTIFICATE_VERIFY_FAILED
)
else:
cert_verify_failed_info = (
LibsslConst.ERR_LIB_SSL,
LibsslConst.SSL_F_TLS_PROCESS_SERVER_CERTIFICATE,
LibsslConst.SSL_R_CERTIFICATE_VERIFY_FAILED
)
cert_verify_failed_info = _homogenize_openssl3_error(cert_verify_failed_info)
# It would appear that some versions of OpenSSL (such as on Fedora 30)
# don't even have the MD5 digest algorithm included any longer? To
# give a more useful error message we handle this specifically.
unknown_hash_algo_info = (
LibsslConst.ERR_LIB_ASN1,
LibsslConst.ASN1_F_ASN1_ITEM_VERIFY,
LibsslConst.ASN1_R_UNKNOWN_MESSAGE_DIGEST_ALGORITHM
)
unknown_hash_algo_info = _homogenize_openssl3_error(unknown_hash_algo_info)
if info == unknown_hash_algo_info:
chain = extract_chain(handshake_server_bytes)
if chain:
cert = chain[0]
oscrypto_cert = load_certificate(cert)
if oscrypto_cert.asn1.hash_algo in set(['md5', 'md2']):
raise_weak_signature(oscrypto_cert)
if info == cert_verify_failed_info:
verify_result = libssl.SSL_get_verify_result(self._ssl)
chain = extract_chain(handshake_server_bytes)
self_signed = False
time_invalid = False
no_issuer = False
cert = None
oscrypto_cert = None
if chain:
cert = chain[0]
oscrypto_cert = load_certificate(cert)
self_signed = oscrypto_cert.self_signed
issuer_error_codes = set([
LibsslConst.X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT,
LibsslConst.X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN,
LibsslConst.X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY
])
if verify_result in issuer_error_codes:
no_issuer = not self_signed
time_error_codes = set([
LibsslConst.X509_V_ERR_CERT_HAS_EXPIRED,
LibsslConst.X509_V_ERR_CERT_NOT_YET_VALID
])
time_invalid = verify_result in time_error_codes
if time_invalid:
raise_expired_not_yet_valid(cert)
if no_issuer:
raise_no_issuer(cert)
if self_signed:
raise_self_signed(cert)
if oscrypto_cert and oscrypto_cert.asn1.hash_algo in set(['md5', 'md2']):
raise_weak_signature(oscrypto_cert)
raise_verification(cert)
handle_openssl_error(0, TLSError)
session_info = parse_session_info(
handshake_server_bytes,
handshake_client_bytes
)
self._protocol = session_info['protocol']
self._cipher_suite = session_info['cipher_suite']
self._compression = session_info['compression']
self._session_id = session_info['session_id']
self._session_ticket = session_info['session_ticket']
if self._cipher_suite.find('_DHE_') != -1:
dh_params_length = get_dh_params_length(handshake_server_bytes)
if dh_params_length < 1024:
self.close()
raise_dh_params()
# When saving the session for future requests, we use
# SSL_get1_session() variant to increase the reference count. This
# prevents the session from being freed when one connection closes
# before another is opened. However, since we increase the ref
# count, we also have to explicitly free any previous session.
if self._session_id == 'new' or self._session_ticket == 'new':
if self._session._ssl_session:
libssl.SSL_SESSION_free(self._session._ssl_session)
self._session._ssl_session = libssl.SSL_get1_session(self._ssl)
if not self._session._manual_validation:
if self.certificate.hash_algo in set(['md5', 'md2']):
raise_weak_signature(self.certificate)
# OpenSSL does not do hostname or IP address checking in the end
# entity certificate, so we must perform that check
if not self.certificate.is_valid_domain_ip(self._hostname):
raise_hostname(self.certificate, self._hostname)
except (OSError, socket_.error):
if self._ssl:
libssl.SSL_free(self._ssl)
self._ssl = None
self._rbio = None
self._wbio = None
# The BIOs are freed by SSL_free(), so we only need to free
# them if for some reason SSL_free() was not called
else:
if self._rbio:
libssl.BIO_free(self._rbio)
self._rbio = None
if self._wbio:
libssl.BIO_free(self._wbio)
self._wbio = None
self.close()
raise
def _raw_read(self):
"""
Reads data from the socket and writes it to the memory bio
used by libssl to decrypt the data. Returns the unencrypted
data for the purpose of debugging handshakes.
:return:
A byte string of ciphertext from the socket. Used for
debugging the handshake only.
"""
data = self._raw_bytes
try:
data += self._socket.recv(8192)
except (socket_.error):
pass
output = data
written = libssl.BIO_write(self._rbio, data, len(data))
self._raw_bytes = data[written:]
return output
def _raw_write(self):
"""
Takes ciphertext from the memory bio and writes it to the
socket.
:return:
A byte string of ciphertext going to the socket. Used
for debugging the handshake only.
"""
data_available = libssl.BIO_ctrl_pending(self._wbio)
if data_available == 0:
return b''
to_read = min(self._buffer_size, data_available)
read = libssl.BIO_read(self._wbio, self._bio_write_buffer, to_read)
to_write = bytes_from_buffer(self._bio_write_buffer, read)
output = to_write
while len(to_write):
raise_disconnect = False
try:
sent = self._socket.send(to_write)
except (socket_.error) as e:
# Handle ECONNRESET and EPIPE
if e.errno == 104 or e.errno == 32:
raise_disconnect = True
# Handle EPROTOTYPE. Newer versions of macOS will return this
# if we try to call send() while the socket is being torn down
elif sys.platform == 'darwin' and e.errno == 41:
raise_disconnect = True
else:
raise
if raise_disconnect:
raise_disconnection()
to_write = to_write[sent:]
if len(to_write):
self.select_write()
return output
def read(self, max_length):
"""
Reads data from the TLS-wrapped socket
:param max_length:
The number of bytes to read - output may be less than this
:raises:
socket.socket - when a non-TLS socket error occurs
oscrypto.errors.TLSError - when a TLS-related error occurs
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the data read
"""
if not isinstance(max_length, int_types):
raise TypeError(pretty_message(
'''
max_length must be an integer, not %s
''',
type_name(max_length)
))
buffered_length = len(self._decrypted_bytes)
# If we already have enough buffered data, just use that
if buffered_length >= max_length:
output = self._decrypted_bytes[0:max_length]
self._decrypted_bytes = self._decrypted_bytes[max_length:]
return output
if self._ssl is None:
self._raise_closed()
# Don't block if we have buffered data available, since it is ok to
# return less than the max_length
if buffered_length > 0 and not self.select_read(0):
output = self._decrypted_bytes
self._decrypted_bytes = b''
return output
# Only read enough to get the requested amount when
# combined with buffered data
to_read = min(self._buffer_size, max_length - buffered_length)
output = self._decrypted_bytes
# The SSL_read() loop handles renegotiations, so we need to handle
# requests for both reads and writes
again = True
while again:
again = False
result = libssl.SSL_read(self._ssl, self._read_buffer, to_read)
self._raw_write()
if result <= 0:
error = libssl.SSL_get_error(self._ssl, result)
if error == LibsslConst.SSL_ERROR_WANT_READ:
if self._raw_read() != b'':
again = True
continue
raise_disconnection()
elif error == LibsslConst.SSL_ERROR_WANT_WRITE:
self._raw_write()
again = True
continue
elif error == LibsslConst.SSL_ERROR_ZERO_RETURN:
self._gracefully_closed = True
self._shutdown(False)
break
else:
handle_openssl_error(0, TLSError)
output += bytes_from_buffer(self._read_buffer, result)
if self._gracefully_closed and len(output) == 0:
self._raise_closed()
self._decrypted_bytes = output[max_length:]
return output[0:max_length]
def select_read(self, timeout=None):
"""
Blocks until the socket is ready to be read from, or the timeout is hit
:param timeout:
A float - the period of time to wait for data to be read. None for
no time limit.
:return:
A boolean - if data is ready to be read. Will only be False if
timeout is not None.
"""
# If we have buffered data, we consider a read possible
if len(self._decrypted_bytes) > 0:
return True
read_ready, _, _ = select.select([self._socket], [], [], timeout)
return len(read_ready) > 0
def read_until(self, marker):
"""
Reads data from the socket until a marker is found. Data read includes
the marker.
:param marker:
A byte string or regex object from re.compile(). Used to determine
when to stop reading. Regex objects are more inefficient since
they must scan the entire byte string of read data each time data
is read off the socket.
:return:
A byte string of the data read, including the marker
"""
if not isinstance(marker, byte_cls) and not isinstance(marker, Pattern):
raise TypeError(pretty_message(
'''
marker must be a byte string or compiled regex object, not %s
''',
type_name(marker)
))
output = b''
is_regex = isinstance(marker, Pattern)
while True:
if len(self._decrypted_bytes) > 0:
chunk = self._decrypted_bytes
self._decrypted_bytes = b''
else:
if self._ssl is None:
self._raise_closed()
to_read = libssl.SSL_pending(self._ssl) or 8192
chunk = self.read(to_read)
offset = len(output)
output += chunk
if is_regex:
match = marker.search(output)
if match is not None:
end = match.end()
break
else:
# If the marker was not found last time, we have to start
# at a position where the marker would have its final char
# in the newly read chunk
start = max(0, offset - len(marker) - 1)
match = output.find(marker, start)
if match != -1:
end = match + len(marker)
break
self._decrypted_bytes = output[end:] + self._decrypted_bytes
return output[0:end]
def read_line(self):
r"""
Reads a line from the socket, including the line ending of "\r\n", "\r",
or "\n"
:return:
A byte string of the next line from the socket
"""
return self.read_until(_line_regex)
def read_exactly(self, num_bytes):
"""
Reads exactly the specified number of bytes from the socket
:param num_bytes:
An integer - the exact number of bytes to read
:return:
A byte string of the data that was read
"""
output = b''
remaining = num_bytes
while remaining > 0:
output += self.read(remaining)
remaining = num_bytes - len(output)
return output
def write(self, data):
"""
Writes data to the TLS-wrapped socket
:param data:
A byte string to write to the socket
:raises:
socket.socket - when a non-TLS socket error occurs
oscrypto.errors.TLSError - when a TLS-related error occurs
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
"""
data_len = len(data)
while data_len:
if self._ssl is None:
self._raise_closed()
result = libssl.SSL_write(self._ssl, data, data_len)
self._raw_write()
if result <= 0:
error = libssl.SSL_get_error(self._ssl, result)
if error == LibsslConst.SSL_ERROR_WANT_READ:
if self._raw_read() != b'':
continue
raise_disconnection()
elif error == LibsslConst.SSL_ERROR_WANT_WRITE:
self._raw_write()
continue
elif error == LibsslConst.SSL_ERROR_ZERO_RETURN:
self._gracefully_closed = True
self._shutdown(False)
self._raise_closed()
else:
handle_openssl_error(0, TLSError)
data = data[result:]
data_len = len(data)
def select_write(self, timeout=None):
"""
Blocks until the socket is ready to be written to, or the timeout is hit
:param timeout:
A float - the period of time to wait for the socket to be ready to
written to. None for no time limit.
:return:
A boolean - if the socket is ready for writing. Will only be False
if timeout is not None.
"""
_, write_ready, _ = select.select([], [self._socket], [], timeout)
return len(write_ready) > 0
def _shutdown(self, manual):
"""
Shuts down the TLS session and then shuts down the underlying socket
:param manual:
A boolean if the connection was manually shutdown
"""
if self._ssl is None:
return
while True:
result = libssl.SSL_shutdown(self._ssl)
# Don't be noisy if the socket is already closed
try:
self._raw_write()
except (TLSDisconnectError):
pass
if result >= 0:
break
if result < 0:
error = libssl.SSL_get_error(self._ssl, result)
if error == LibsslConst.SSL_ERROR_WANT_READ:
if self._raw_read() != b'':
continue
else:
break
elif error == LibsslConst.SSL_ERROR_WANT_WRITE:
self._raw_write()
continue
else:
handle_openssl_error(0, TLSError)
if manual:
self._local_closed = True
libssl.SSL_free(self._ssl)
self._ssl = None
# BIOs are freed by SSL_free()
self._rbio = None
self._wbio = None
try:
self._socket.shutdown(socket_.SHUT_RDWR)
except (socket_.error):
pass
def shutdown(self):
"""
Shuts down the TLS session and then shuts down the underlying socket
"""
self._shutdown(True)
def close(self):
"""
Shuts down the TLS session and socket and forcibly closes it
"""
try:
self.shutdown()
finally:
if self._socket:
try:
self._socket.close()
except (socket_.error):
pass
self._socket = None
def _read_certificates(self):
"""
Reads end-entity and intermediate certificate information from the
TLS session
"""
stack_pointer = libssl.SSL_get_peer_cert_chain(self._ssl)
if is_null(stack_pointer):
handle_openssl_error(0, TLSError)
if libcrypto_version_info < (1, 1):
number_certs = libssl.sk_num(stack_pointer)
else:
number_certs = libssl.OPENSSL_sk_num(stack_pointer)
self._intermediates = []
for index in range(0, number_certs):
if libcrypto_version_info < (1, 1):
x509_ = libssl.sk_value(stack_pointer, index)
else:
x509_ = libssl.OPENSSL_sk_value(stack_pointer, index)
buffer_size = libcrypto.i2d_X509(x509_, null())
cert_buffer = buffer_from_bytes(buffer_size)
cert_pointer = buffer_pointer(cert_buffer)
cert_length = libcrypto.i2d_X509(x509_, cert_pointer)
handle_openssl_error(cert_length)
cert_data = bytes_from_buffer(cert_buffer, cert_length)
cert = Asn1Certificate.load(cert_data)
if index == 0:
self._certificate = cert
else:
self._intermediates.append(cert)
def _raise_closed(self):
"""
Raises an exception describing if the local or remote end closed the
connection
"""
if self._local_closed:
raise TLSDisconnectError('The connection was already closed')
elif self._gracefully_closed:
raise TLSGracefulDisconnectError('The remote end closed the connection')
else:
raise TLSDisconnectError('The connection was closed')
@property
def certificate(self):
"""
An asn1crypto.x509.Certificate object of the end-entity certificate
presented by the server
"""
if self._ssl is None:
self._raise_closed()
if self._certificate is None:
self._read_certificates()
return self._certificate
@property
def intermediates(self):
"""
A list of asn1crypto.x509.Certificate objects that were presented as
intermediates by the server
"""
if self._ssl is None:
self._raise_closed()
if self._certificate is None:
self._read_certificates()
return self._intermediates
@property
def cipher_suite(self):
"""
A unicode string of the IANA cipher suite name of the negotiated
cipher suite
"""
return self._cipher_suite
@property
def protocol(self):
"""
A unicode string of: "TLSv1.2", "TLSv1.1", "TLSv1", "SSLv3"
"""
return self._protocol
@property
def compression(self):
"""
A boolean if compression is enabled
"""
return self._compression
@property
def session_id(self):
"""
A unicode string of "new" or "reused" or None for no ticket
"""
return self._session_id
@property
def session_ticket(self):
"""
A unicode string of "new" or "reused" or None for no ticket
"""
return self._session_ticket
@property
def session(self):
"""
The oscrypto.tls.TLSSession object used for this connection
"""
return self._session
@property
def hostname(self):
"""
A unicode string of the TLS server domain name or IP address
"""
return self._hostname
@property
def port(self):
"""
An integer of the port number the socket is connected to
"""
return self.socket.getpeername()[1]
@property
def socket(self):
"""
The underlying socket.socket connection
"""
if self._ssl is None:
self._raise_closed()
return self._socket
def __del__(self):
self.close()
| mit | d7365ea72fe2dc10c28c3911d7891a64 | 33.519969 | 110 | 0.520916 | 4.426348 | false | false | false | false |
wbond/oscrypto | oscrypto/_win/symmetric.py | 1 | 30875 | # coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
from .._errors import pretty_message
from .._ffi import (
buffer_from_bytes,
bytes_from_buffer,
deref,
new,
null,
pointer_set,
struct,
struct_bytes,
unwrap,
write_to_buffer,
)
from .util import rand_bytes
from .. import backend
from .._types import type_name, byte_cls
_backend = backend()
if _backend == 'winlegacy':
from ._advapi32 import advapi32, Advapi32Const, handle_error, open_context_handle, close_context_handle
else:
from ._cng import bcrypt, BcryptConst, handle_error, open_alg_handle, close_alg_handle
__all__ = [
'aes_cbc_no_padding_decrypt',
'aes_cbc_no_padding_encrypt',
'aes_cbc_pkcs7_decrypt',
'aes_cbc_pkcs7_encrypt',
'des_cbc_pkcs5_decrypt',
'des_cbc_pkcs5_encrypt',
'rc2_cbc_pkcs5_decrypt',
'rc2_cbc_pkcs5_encrypt',
'rc4_decrypt',
'rc4_encrypt',
'tripledes_cbc_pkcs5_decrypt',
'tripledes_cbc_pkcs5_encrypt',
]
def aes_cbc_no_padding_encrypt(key, data, iv):
"""
Encrypts plaintext using AES in CBC mode with a 128, 192 or 256 bit key and
no padding. This means the ciphertext must be an exact multiple of 16 bytes
long.
:param key:
The encryption key - a byte string either 16, 24 or 32 bytes long
:param data:
The plaintext - a byte string
:param iv:
The initialization vector - either a byte string 16-bytes long or None
to generate an IV
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A tuple of two byte strings (iv, ciphertext)
"""
if len(key) not in [16, 24, 32]:
raise ValueError(pretty_message(
'''
key must be either 16, 24 or 32 bytes (128, 192 or 256 bits)
long - is %s
''',
len(key)
))
if not iv:
iv = rand_bytes(16)
elif len(iv) != 16:
raise ValueError(pretty_message(
'''
iv must be 16 bytes long - is %s
''',
len(iv)
))
if len(data) % 16 != 0:
raise ValueError(pretty_message(
'''
data must be a multiple of 16 bytes long - is %s
''',
len(data)
))
return (iv, _encrypt('aes', key, data, iv, False))
def aes_cbc_no_padding_decrypt(key, data, iv):
"""
Decrypts AES ciphertext in CBC mode using a 128, 192 or 256 bit key and no
padding.
:param key:
The encryption key - a byte string either 16, 24 or 32 bytes long
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector - a byte string 16-bytes long
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the plaintext
"""
if len(key) not in [16, 24, 32]:
raise ValueError(pretty_message(
'''
key must be either 16, 24 or 32 bytes (128, 192 or 256 bits)
long - is %s
''',
len(key)
))
if len(iv) != 16:
raise ValueError(pretty_message(
'''
iv must be 16 bytes long - is %s
''',
len(iv)
))
return _decrypt('aes', key, data, iv, False)
def aes_cbc_pkcs7_encrypt(key, data, iv):
"""
Encrypts plaintext using AES in CBC mode with a 128, 192 or 256 bit key and
PKCS#7 padding.
:param key:
The encryption key - a byte string either 16, 24 or 32 bytes long
:param data:
The plaintext - a byte string
:param iv:
The initialization vector - either a byte string 16-bytes long or None
to generate an IV
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A tuple of two byte strings (iv, ciphertext)
"""
if len(key) not in [16, 24, 32]:
raise ValueError(pretty_message(
'''
key must be either 16, 24 or 32 bytes (128, 192 or 256 bits)
long - is %s
''',
len(key)
))
if not iv:
iv = rand_bytes(16)
elif len(iv) != 16:
raise ValueError(pretty_message(
'''
iv must be 16 bytes long - is %s
''',
len(iv)
))
return (iv, _encrypt('aes', key, data, iv, True))
def aes_cbc_pkcs7_decrypt(key, data, iv):
"""
Decrypts AES ciphertext in CBC mode using a 128, 192 or 256 bit key
:param key:
The encryption key - a byte string either 16, 24 or 32 bytes long
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector - a byte string 16-bytes long
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the plaintext
"""
if len(key) not in [16, 24, 32]:
raise ValueError(pretty_message(
'''
key must be either 16, 24 or 32 bytes (128, 192 or 256 bits)
long - is %s
''',
len(key)
))
if len(iv) != 16:
raise ValueError(pretty_message(
'''
iv must be 16 bytes long - is %s
''',
len(iv)
))
return _decrypt('aes', key, data, iv, True)
def rc4_encrypt(key, data):
"""
Encrypts plaintext using RC4 with a 40-128 bit key
:param key:
The encryption key - a byte string 5-16 bytes long
:param data:
The plaintext - a byte string
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the ciphertext
"""
if len(key) < 5 or len(key) > 16:
raise ValueError(pretty_message(
'''
key must be 5 to 16 bytes (40 to 128 bits) long - is %s
''',
len(key)
))
return _encrypt('rc4', key, data, None, None)
def rc4_decrypt(key, data):
"""
Decrypts RC4 ciphertext using a 40-128 bit key
:param key:
The encryption key - a byte string 5-16 bytes long
:param data:
The ciphertext - a byte string
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the plaintext
"""
if len(key) < 5 or len(key) > 16:
raise ValueError(pretty_message(
'''
key must be 5 to 16 bytes (40 to 128 bits) long - is %s
''',
len(key)
))
return _decrypt('rc4', key, data, None, None)
def rc2_cbc_pkcs5_encrypt(key, data, iv):
"""
Encrypts plaintext using RC2 with a 64 bit key
:param key:
The encryption key - a byte string 8 bytes long
:param data:
The plaintext - a byte string
:param iv:
The 8-byte initialization vector to use - a byte string - set as None
to generate an appropriate one
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A tuple of two byte strings (iv, ciphertext)
"""
if len(key) < 5 or len(key) > 16:
raise ValueError(pretty_message(
'''
key must be 5 to 16 bytes (40 to 128 bits) long - is %s
''',
len(key)
))
if not iv:
iv = rand_bytes(8)
elif len(iv) != 8:
raise ValueError(pretty_message(
'''
iv must be 8 bytes long - is %s
''',
len(iv)
))
return (iv, _encrypt('rc2', key, data, iv, True))
def rc2_cbc_pkcs5_decrypt(key, data, iv):
"""
Decrypts RC2 ciphertext using a 64 bit key
:param key:
The encryption key - a byte string 8 bytes long
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector used for encryption - a byte string
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the plaintext
"""
if len(key) < 5 or len(key) > 16:
raise ValueError(pretty_message(
'''
key must be 5 to 16 bytes (40 to 128 bits) long - is %s
''',
len(key)
))
if len(iv) != 8:
raise ValueError(pretty_message(
'''
iv must be 8 bytes long - is %s
''',
len(iv)
))
return _decrypt('rc2', key, data, iv, True)
def tripledes_cbc_pkcs5_encrypt(key, data, iv):
"""
Encrypts plaintext using 3DES in either 2 or 3 key mode
:param key:
The encryption key - a byte string 16 or 24 bytes long (2 or 3 key mode)
:param data:
The plaintext - a byte string
:param iv:
The 8-byte initialization vector to use - a byte string - set as None
to generate an appropriate one
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A tuple of two byte strings (iv, ciphertext)
"""
if len(key) != 16 and len(key) != 24:
raise ValueError(pretty_message(
'''
key must be 16 bytes (2 key) or 24 bytes (3 key) long - is %s
''',
len(key)
))
if not iv:
iv = rand_bytes(8)
elif len(iv) != 8:
raise ValueError(pretty_message(
'''
iv must be 8 bytes long - is %s
''',
len(iv)
))
cipher = 'tripledes_3key'
if len(key) == 16:
cipher = 'tripledes_2key'
return (iv, _encrypt(cipher, key, data, iv, True))
def tripledes_cbc_pkcs5_decrypt(key, data, iv):
"""
Decrypts 3DES ciphertext in either 2 or 3 key mode
:param key:
The encryption key - a byte string 16 or 24 bytes long (2 or 3 key mode)
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector used for encryption - a byte string
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the plaintext
"""
if len(key) != 16 and len(key) != 24:
raise ValueError(pretty_message(
'''
key must be 16 bytes (2 key) or 24 bytes (3 key) long - is %s
''',
len(key)
))
if len(iv) != 8:
raise ValueError(pretty_message(
'''
iv must be 8 bytes long - is %s
''',
len(iv)
))
cipher = 'tripledes_3key'
if len(key) == 16:
cipher = 'tripledes_2key'
return _decrypt(cipher, key, data, iv, True)
def des_cbc_pkcs5_encrypt(key, data, iv):
"""
Encrypts plaintext using DES with a 56 bit key
:param key:
The encryption key - a byte string 8 bytes long (includes error
correction bits)
:param data:
The plaintext - a byte string
:param iv:
The 8-byte initialization vector to use - a byte string - set as None
to generate an appropriate one
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A tuple of two byte strings (iv, ciphertext)
"""
if len(key) != 8:
raise ValueError(pretty_message(
'''
key must be 8 bytes (56 bits + 8 parity bits) long - is %s
''',
len(key)
))
if not iv:
iv = rand_bytes(8)
elif len(iv) != 8:
raise ValueError(pretty_message(
'''
iv must be 8 bytes long - is %s
''',
len(iv)
))
return (iv, _encrypt('des', key, data, iv, True))
def des_cbc_pkcs5_decrypt(key, data, iv):
"""
Decrypts DES ciphertext using a 56 bit key
:param key:
The encryption key - a byte string 8 bytes long (includes error
correction bits)
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector used for encryption - a byte string
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the plaintext
"""
if len(key) != 8:
raise ValueError(pretty_message(
'''
key must be 8 bytes (56 bits + 8 parity bits) long - is %s
''',
len(key)
))
if len(iv) != 8:
raise ValueError(pretty_message(
'''
iv must be 8 bytes long - is %s
''',
len(iv)
))
return _decrypt('des', key, data, iv, True)
def _advapi32_create_handles(cipher, key, iv):
"""
Creates an HCRYPTPROV and HCRYPTKEY for symmetric encryption/decryption. The
HCRYPTPROV must be released by close_context_handle() and the
HCRYPTKEY must be released by advapi32.CryptDestroyKey() when done.
:param cipher:
A unicode string of "aes", "des", "tripledes_2key", "tripledes_3key",
"rc2", "rc4"
:param key:
A byte string of the symmetric key
:param iv:
The initialization vector - a byte string - unused for RC4
:return:
A tuple of (HCRYPTPROV, HCRYPTKEY)
"""
context_handle = None
if cipher == 'aes':
algorithm_id = {
16: Advapi32Const.CALG_AES_128,
24: Advapi32Const.CALG_AES_192,
32: Advapi32Const.CALG_AES_256,
}[len(key)]
else:
algorithm_id = {
'des': Advapi32Const.CALG_DES,
'tripledes_2key': Advapi32Const.CALG_3DES_112,
'tripledes_3key': Advapi32Const.CALG_3DES,
'rc2': Advapi32Const.CALG_RC2,
'rc4': Advapi32Const.CALG_RC4,
}[cipher]
provider = Advapi32Const.MS_ENH_RSA_AES_PROV
context_handle = open_context_handle(provider, verify_only=False)
blob_header_pointer = struct(advapi32, 'BLOBHEADER')
blob_header = unwrap(blob_header_pointer)
blob_header.bType = Advapi32Const.PLAINTEXTKEYBLOB
blob_header.bVersion = Advapi32Const.CUR_BLOB_VERSION
blob_header.reserved = 0
blob_header.aiKeyAlg = algorithm_id
blob_struct_pointer = struct(advapi32, 'PLAINTEXTKEYBLOB')
blob_struct = unwrap(blob_struct_pointer)
blob_struct.hdr = blob_header
blob_struct.dwKeySize = len(key)
blob = struct_bytes(blob_struct_pointer) + key
flags = 0
if cipher in set(['rc2', 'rc4']) and len(key) == 5:
flags = Advapi32Const.CRYPT_NO_SALT
key_handle_pointer = new(advapi32, 'HCRYPTKEY *')
res = advapi32.CryptImportKey(
context_handle,
blob,
len(blob),
null(),
flags,
key_handle_pointer
)
handle_error(res)
key_handle = unwrap(key_handle_pointer)
if cipher == 'rc2':
buf = new(advapi32, 'DWORD *', len(key) * 8)
res = advapi32.CryptSetKeyParam(
key_handle,
Advapi32Const.KP_EFFECTIVE_KEYLEN,
buf,
0
)
handle_error(res)
if cipher != 'rc4':
res = advapi32.CryptSetKeyParam(
key_handle,
Advapi32Const.KP_IV,
iv,
0
)
handle_error(res)
buf = new(advapi32, 'DWORD *', Advapi32Const.CRYPT_MODE_CBC)
res = advapi32.CryptSetKeyParam(
key_handle,
Advapi32Const.KP_MODE,
buf,
0
)
handle_error(res)
buf = new(advapi32, 'DWORD *', Advapi32Const.PKCS5_PADDING)
res = advapi32.CryptSetKeyParam(
key_handle,
Advapi32Const.KP_PADDING,
buf,
0
)
handle_error(res)
return (context_handle, key_handle)
def _bcrypt_create_key_handle(cipher, key):
"""
Creates a BCRYPT_KEY_HANDLE for symmetric encryption/decryption. The
handle must be released by bcrypt.BCryptDestroyKey() when done.
:param cipher:
A unicode string of "aes", "des", "tripledes_2key", "tripledes_3key",
"rc2", "rc4"
:param key:
A byte string of the symmetric key
:return:
A BCRYPT_KEY_HANDLE
"""
alg_handle = None
alg_constant = {
'aes': BcryptConst.BCRYPT_AES_ALGORITHM,
'des': BcryptConst.BCRYPT_DES_ALGORITHM,
'tripledes_2key': BcryptConst.BCRYPT_3DES_112_ALGORITHM,
'tripledes_3key': BcryptConst.BCRYPT_3DES_ALGORITHM,
'rc2': BcryptConst.BCRYPT_RC2_ALGORITHM,
'rc4': BcryptConst.BCRYPT_RC4_ALGORITHM,
}[cipher]
try:
alg_handle = open_alg_handle(alg_constant)
blob_type = BcryptConst.BCRYPT_KEY_DATA_BLOB
blob_struct_pointer = struct(bcrypt, 'BCRYPT_KEY_DATA_BLOB_HEADER')
blob_struct = unwrap(blob_struct_pointer)
blob_struct.dwMagic = BcryptConst.BCRYPT_KEY_DATA_BLOB_MAGIC
blob_struct.dwVersion = BcryptConst.BCRYPT_KEY_DATA_BLOB_VERSION1
blob_struct.cbKeyData = len(key)
blob = struct_bytes(blob_struct_pointer) + key
if cipher == 'rc2':
buf = new(bcrypt, 'DWORD *', len(key) * 8)
res = bcrypt.BCryptSetProperty(
alg_handle,
BcryptConst.BCRYPT_EFFECTIVE_KEY_LENGTH,
buf,
4,
0
)
handle_error(res)
key_handle_pointer = new(bcrypt, 'BCRYPT_KEY_HANDLE *')
res = bcrypt.BCryptImportKey(
alg_handle,
null(),
blob_type,
key_handle_pointer,
null(),
0,
blob,
len(blob),
0
)
handle_error(res)
return unwrap(key_handle_pointer)
finally:
if alg_handle:
close_alg_handle(alg_handle)
def _encrypt(cipher, key, data, iv, padding):
"""
Encrypts plaintext
:param cipher:
A unicode string of "aes", "des", "tripledes_2key", "tripledes_3key",
"rc2", "rc4"
:param key:
The encryption key - a byte string 5-16 bytes long
:param data:
The plaintext - a byte string
:param iv:
The initialization vector - a byte string - unused for RC4
:param padding:
Boolean, if padding should be used - unused for RC4
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the ciphertext
"""
if not isinstance(key, byte_cls):
raise TypeError(pretty_message(
'''
key must be a byte string, not %s
''',
type_name(key)
))
if not isinstance(data, byte_cls):
raise TypeError(pretty_message(
'''
data must be a byte string, not %s
''',
type_name(data)
))
if cipher != 'rc4' and not isinstance(iv, byte_cls):
raise TypeError(pretty_message(
'''
iv must be a byte string, not %s
''',
type_name(iv)
))
if cipher != 'rc4' and not padding:
# AES in CBC mode can be allowed with no padding if
# the data is an exact multiple of the block size
if not (cipher == 'aes' and len(data) % 16 == 0):
raise ValueError('padding must be specified')
if _backend == 'winlegacy':
return _advapi32_encrypt(cipher, key, data, iv, padding)
return _bcrypt_encrypt(cipher, key, data, iv, padding)
def _advapi32_encrypt(cipher, key, data, iv, padding):
"""
Encrypts plaintext via CryptoAPI
:param cipher:
A unicode string of "aes", "des", "tripledes_2key", "tripledes_3key",
"rc2", "rc4"
:param key:
The encryption key - a byte string 5-16 bytes long
:param data:
The plaintext - a byte string
:param iv:
The initialization vector - a byte string - unused for RC4
:param padding:
Boolean, if padding should be used - unused for RC4
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the ciphertext
"""
context_handle = None
key_handle = None
try:
context_handle, key_handle = _advapi32_create_handles(cipher, key, iv)
out_len = new(advapi32, 'DWORD *', len(data))
res = advapi32.CryptEncrypt(
key_handle,
null(),
True,
0,
null(),
out_len,
0
)
handle_error(res)
buffer_len = deref(out_len)
buffer = buffer_from_bytes(buffer_len)
write_to_buffer(buffer, data)
pointer_set(out_len, len(data))
res = advapi32.CryptEncrypt(
key_handle,
null(),
True,
0,
buffer,
out_len,
buffer_len
)
handle_error(res)
output = bytes_from_buffer(buffer, deref(out_len))
# Remove padding when not required. CryptoAPI doesn't support this, so
# we just manually remove it.
if cipher == 'aes' and not padding and len(output) == len(data) + 16:
output = output[:-16]
return output
finally:
if key_handle:
advapi32.CryptDestroyKey(key_handle)
if context_handle:
close_context_handle(context_handle)
def _bcrypt_encrypt(cipher, key, data, iv, padding):
"""
Encrypts plaintext via CNG
:param cipher:
A unicode string of "aes", "des", "tripledes_2key", "tripledes_3key",
"rc2", "rc4"
:param key:
The encryption key - a byte string 5-16 bytes long
:param data:
The plaintext - a byte string
:param iv:
The initialization vector - a byte string - unused for RC4
:param padding:
Boolean, if padding should be used - unused for RC4
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the ciphertext
"""
key_handle = None
try:
key_handle = _bcrypt_create_key_handle(cipher, key)
if iv is None:
iv_len = 0
else:
iv_len = len(iv)
flags = 0
if padding is True:
flags = BcryptConst.BCRYPT_BLOCK_PADDING
out_len = new(bcrypt, 'ULONG *')
res = bcrypt.BCryptEncrypt(
key_handle,
data,
len(data),
null(),
null(),
0,
null(),
0,
out_len,
flags
)
handle_error(res)
buffer_len = deref(out_len)
buffer = buffer_from_bytes(buffer_len)
iv_buffer = buffer_from_bytes(iv) if iv else null()
res = bcrypt.BCryptEncrypt(
key_handle,
data,
len(data),
null(),
iv_buffer,
iv_len,
buffer,
buffer_len,
out_len,
flags
)
handle_error(res)
return bytes_from_buffer(buffer, deref(out_len))
finally:
if key_handle:
bcrypt.BCryptDestroyKey(key_handle)
def _decrypt(cipher, key, data, iv, padding):
"""
Decrypts AES/RC4/RC2/3DES/DES ciphertext
:param cipher:
A unicode string of "aes", "des", "tripledes_2key", "tripledes_3key",
"rc2", "rc4"
:param key:
The encryption key - a byte string 5-16 bytes long
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector - a byte string - unused for RC4
:param padding:
Boolean, if padding should be used - unused for RC4
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the plaintext
"""
if not isinstance(key, byte_cls):
raise TypeError(pretty_message(
'''
key must be a byte string, not %s
''',
type_name(key)
))
if not isinstance(data, byte_cls):
raise TypeError(pretty_message(
'''
data must be a byte string, not %s
''',
type_name(data)
))
if cipher != 'rc4' and not isinstance(iv, byte_cls):
raise TypeError(pretty_message(
'''
iv must be a byte string, not %s
''',
type_name(iv)
))
if cipher not in set(['rc4', 'aes']) and not padding:
raise ValueError('padding must be specified')
if _backend == 'winlegacy':
return _advapi32_decrypt(cipher, key, data, iv, padding)
return _bcrypt_decrypt(cipher, key, data, iv, padding)
def _advapi32_decrypt(cipher, key, data, iv, padding):
"""
Decrypts AES/RC4/RC2/3DES/DES ciphertext via CryptoAPI
:param cipher:
A unicode string of "aes", "des", "tripledes_2key", "tripledes_3key",
"rc2", "rc4"
:param key:
The encryption key - a byte string 5-16 bytes long
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector - a byte string - unused for RC4
:param padding:
Boolean, if padding should be used - unused for RC4
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the plaintext
"""
context_handle = None
key_handle = None
try:
context_handle, key_handle = _advapi32_create_handles(cipher, key, iv)
if cipher == 'aes' and not padding and len(data) % 16 != 0:
raise ValueError('Invalid data - ciphertext length must be a multiple of 16')
buffer = buffer_from_bytes(data)
out_len = new(advapi32, 'DWORD *', len(data))
res = advapi32.CryptDecrypt(
key_handle,
null(),
# To skip padding, we have to tell the API that this is not
# the final block
False if cipher == 'aes' and not padding else True,
0,
buffer,
out_len
)
handle_error(res)
return bytes_from_buffer(buffer, deref(out_len))
finally:
if key_handle:
advapi32.CryptDestroyKey(key_handle)
if context_handle:
close_context_handle(context_handle)
def _bcrypt_decrypt(cipher, key, data, iv, padding):
"""
Decrypts AES/RC4/RC2/3DES/DES ciphertext via CNG
:param cipher:
A unicode string of "aes", "des", "tripledes_2key", "tripledes_3key",
"rc2", "rc4"
:param key:
The encryption key - a byte string 5-16 bytes long
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector - a byte string - unused for RC4
:param padding:
Boolean, if padding should be used - unused for RC4
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the plaintext
"""
key_handle = None
try:
key_handle = _bcrypt_create_key_handle(cipher, key)
if iv is None:
iv_len = 0
else:
iv_len = len(iv)
flags = 0
if padding is True:
flags = BcryptConst.BCRYPT_BLOCK_PADDING
out_len = new(bcrypt, 'ULONG *')
res = bcrypt.BCryptDecrypt(
key_handle,
data,
len(data),
null(),
null(),
0,
null(),
0,
out_len,
flags
)
handle_error(res)
buffer_len = deref(out_len)
buffer = buffer_from_bytes(buffer_len)
iv_buffer = buffer_from_bytes(iv) if iv else null()
res = bcrypt.BCryptDecrypt(
key_handle,
data,
len(data),
null(),
iv_buffer,
iv_len,
buffer,
buffer_len,
out_len,
flags
)
handle_error(res)
return bytes_from_buffer(buffer, deref(out_len))
finally:
if key_handle:
bcrypt.BCryptDestroyKey(key_handle)
| mit | a51223e1597612dc19e85f9070c47fcd | 25.479417 | 107 | 0.560777 | 3.969019 | false | false | false | false |
wbond/oscrypto | dev/lint.py | 7 | 1030 | # coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import os
from . import package_name, package_root
import flake8
if not hasattr(flake8, '__version_info__') or flake8.__version_info__ < (3,):
from flake8.engine import get_style_guide
else:
from flake8.api.legacy import get_style_guide
def run():
"""
Runs flake8 lint
:return:
A bool - if flake8 did not find any errors
"""
print('Running flake8 %s' % flake8.__version__)
flake8_style = get_style_guide(config_file=os.path.join(package_root, 'tox.ini'))
paths = []
for _dir in [package_name, 'dev', 'tests']:
for root, _, filenames in os.walk(_dir):
for filename in filenames:
if not filename.endswith('.py'):
continue
paths.append(os.path.join(root, filename))
report = flake8_style.check_files(paths)
success = report.total_errors == 0
if success:
print('OK')
return success
| mit | eea53e90e1a6dfb1791388486bdca215 | 26.105263 | 85 | 0.617476 | 3.678571 | false | false | false | false |
wbond/oscrypto | oscrypto/_tls.py | 1 | 17845 | # coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import re
from datetime import datetime
from ._asn1 import Certificate, int_from_bytes, timezone
from ._cipher_suites import CIPHER_SUITE_MAP
from .errors import TLSVerificationError, TLSDisconnectError, TLSError
__all__ = [
'detect_client_auth_request',
'extract_chain',
'get_dh_params_length',
'parse_alert',
'parse_handshake_messages',
'parse_session_info',
'parse_tls_records',
'raise_client_auth',
'raise_dh_params',
'raise_disconnection',
'raise_expired_not_yet_valid',
'raise_handshake',
'raise_hostname',
'raise_no_issuer',
'raise_protocol_error',
'raise_revoked',
'raise_self_signed',
'raise_verification',
'raise_weak_signature',
]
def extract_chain(server_handshake_bytes):
"""
Extracts the X.509 certificates from the server handshake bytes for use
when debugging
:param server_handshake_bytes:
A byte string of the handshake data received from the server
:return:
A list of asn1crypto.x509.Certificate objects
"""
output = []
chain_bytes = None
for record_type, _, record_data in parse_tls_records(server_handshake_bytes):
if record_type != b'\x16':
continue
for message_type, message_data in parse_handshake_messages(record_data):
if message_type == b'\x0b':
chain_bytes = message_data
break
if chain_bytes:
break
if chain_bytes:
# The first 3 bytes are the cert chain length
pointer = 3
while pointer < len(chain_bytes):
cert_length = int_from_bytes(chain_bytes[pointer:pointer + 3])
cert_start = pointer + 3
cert_end = cert_start + cert_length
pointer = cert_end
cert_bytes = chain_bytes[cert_start:cert_end]
output.append(Certificate.load(cert_bytes))
return output
def detect_client_auth_request(server_handshake_bytes):
"""
Determines if a CertificateRequest message is sent from the server asking
the client for a certificate
:param server_handshake_bytes:
A byte string of the handshake data received from the server
:return:
A boolean - if a client certificate request was found
"""
for record_type, _, record_data in parse_tls_records(server_handshake_bytes):
if record_type != b'\x16':
continue
for message_type, message_data in parse_handshake_messages(record_data):
if message_type == b'\x0d':
return True
return False
def get_dh_params_length(server_handshake_bytes):
"""
Determines the length of the DH params from the ServerKeyExchange
:param server_handshake_bytes:
A byte string of the handshake data received from the server
:return:
None or an integer of the bit size of the DH parameters
"""
output = None
dh_params_bytes = None
for record_type, _, record_data in parse_tls_records(server_handshake_bytes):
if record_type != b'\x16':
continue
for message_type, message_data in parse_handshake_messages(record_data):
if message_type == b'\x0c':
dh_params_bytes = message_data
break
if dh_params_bytes:
break
if dh_params_bytes:
output = int_from_bytes(dh_params_bytes[0:2]) * 8
return output
def parse_alert(server_handshake_bytes):
"""
Parses the handshake for protocol alerts
:param server_handshake_bytes:
A byte string of the handshake data received from the server
:return:
None or an 2-element tuple of integers:
0: 1 (warning) or 2 (fatal)
1: The alert description (see https://tools.ietf.org/html/rfc5246#section-7.2)
"""
for record_type, _, record_data in parse_tls_records(server_handshake_bytes):
if record_type != b'\x15':
continue
if len(record_data) != 2:
return None
return (int_from_bytes(record_data[0:1]), int_from_bytes(record_data[1:2]))
return None
def parse_session_info(server_handshake_bytes, client_handshake_bytes):
"""
Parse the TLS handshake from the client to the server to extract information
including the cipher suite selected, if compression is enabled, the
session id and if a new or reused session ticket exists.
:param server_handshake_bytes:
A byte string of the handshake data received from the server
:param client_handshake_bytes:
A byte string of the handshake data sent to the server
:return:
A dict with the following keys:
- "protocol": unicode string
- "cipher_suite": unicode string
- "compression": boolean
- "session_id": "new", "reused" or None
- "session_ticket: "new", "reused" or None
"""
protocol = None
cipher_suite = None
compression = False
session_id = None
session_ticket = None
server_session_id = None
client_session_id = None
for record_type, _, record_data in parse_tls_records(server_handshake_bytes):
if record_type != b'\x16':
continue
for message_type, message_data in parse_handshake_messages(record_data):
# Ensure we are working with a ServerHello message
if message_type != b'\x02':
continue
protocol = {
b'\x03\x00': "SSLv3",
b'\x03\x01': "TLSv1",
b'\x03\x02': "TLSv1.1",
b'\x03\x03': "TLSv1.2",
b'\x03\x04': "TLSv1.3",
}[message_data[0:2]]
session_id_length = int_from_bytes(message_data[34:35])
if session_id_length > 0:
server_session_id = message_data[35:35 + session_id_length]
cipher_suite_start = 35 + session_id_length
cipher_suite_bytes = message_data[cipher_suite_start:cipher_suite_start + 2]
cipher_suite = CIPHER_SUITE_MAP[cipher_suite_bytes]
compression_start = cipher_suite_start + 2
compression = message_data[compression_start:compression_start + 1] != b'\x00'
extensions_length_start = compression_start + 1
extensions_data = message_data[extensions_length_start:]
for extension_type, extension_data in _parse_hello_extensions(extensions_data):
if extension_type == 35:
session_ticket = "new"
break
break
for record_type, _, record_data in parse_tls_records(client_handshake_bytes):
if record_type != b'\x16':
continue
for message_type, message_data in parse_handshake_messages(record_data):
# Ensure we are working with a ClientHello message
if message_type != b'\x01':
continue
session_id_length = int_from_bytes(message_data[34:35])
if session_id_length > 0:
client_session_id = message_data[35:35 + session_id_length]
cipher_suite_start = 35 + session_id_length
cipher_suite_length = int_from_bytes(message_data[cipher_suite_start:cipher_suite_start + 2])
compression_start = cipher_suite_start + 2 + cipher_suite_length
compression_length = int_from_bytes(message_data[compression_start:compression_start + 1])
# On subsequent requests, the session ticket will only be seen
# in the ClientHello message
if server_session_id is None and session_ticket is None:
extensions_length_start = compression_start + 1 + compression_length
extensions_data = message_data[extensions_length_start:]
for extension_type, extension_data in _parse_hello_extensions(extensions_data):
if extension_type == 35:
session_ticket = "reused"
break
break
if server_session_id is not None:
if client_session_id is None:
session_id = "new"
else:
if client_session_id != server_session_id:
session_id = "new"
else:
session_id = "reused"
return {
"protocol": protocol,
"cipher_suite": cipher_suite,
"compression": compression,
"session_id": session_id,
"session_ticket": session_ticket,
}
def parse_tls_records(data):
"""
Creates a generator returning tuples of information about each record
in a byte string of data from a TLS client or server. Stops as soon as it
find a ChangeCipherSpec message since all data from then on is encrypted.
:param data:
A byte string of TLS records
:return:
A generator that yields 3-element tuples:
[0] Byte string of record type
[1] Byte string of protocol version
[2] Byte string of record data
"""
pointer = 0
data_len = len(data)
while pointer < data_len:
# Don't try to parse any more once the ChangeCipherSpec is found
if data[pointer:pointer + 1] == b'\x14':
break
length = int_from_bytes(data[pointer + 3:pointer + 5])
yield (
data[pointer:pointer + 1],
data[pointer + 1:pointer + 3],
data[pointer + 5:pointer + 5 + length]
)
pointer += 5 + length
def parse_handshake_messages(data):
"""
Creates a generator returning tuples of information about each message in
a byte string of data from a TLS handshake record
:param data:
A byte string of a TLS handshake record data
:return:
A generator that yields 2-element tuples:
[0] Byte string of message type
[1] Byte string of message data
"""
pointer = 0
data_len = len(data)
while pointer < data_len:
length = int_from_bytes(data[pointer + 1:pointer + 4])
yield (
data[pointer:pointer + 1],
data[pointer + 4:pointer + 4 + length]
)
pointer += 4 + length
def _parse_hello_extensions(data):
"""
Creates a generator returning tuples of information about each extension
from a byte string of extension data contained in a ServerHello ores
ClientHello message
:param data:
A byte string of a extension data from a TLS ServerHello or ClientHello
message
:return:
A generator that yields 2-element tuples:
[0] Byte string of extension type
[1] Byte string of extension data
"""
if data == b'':
return
extentions_length = int_from_bytes(data[0:2])
extensions_start = 2
extensions_end = 2 + extentions_length
pointer = extensions_start
while pointer < extensions_end:
extension_type = int_from_bytes(data[pointer:pointer + 2])
extension_length = int_from_bytes(data[pointer + 2:pointer + 4])
yield (
extension_type,
data[pointer + 4:pointer + 4 + extension_length]
)
pointer += 4 + extension_length
def raise_hostname(certificate, hostname):
"""
Raises a TLSVerificationError due to a hostname mismatch
:param certificate:
An asn1crypto.x509.Certificate object
:raises:
TLSVerificationError
"""
is_ip = re.match('^\\d+\\.\\d+\\.\\d+\\.\\d+$', hostname) or hostname.find(':') != -1
if is_ip:
hostname_type = 'IP address %s' % hostname
else:
hostname_type = 'domain name %s' % hostname
message = 'Server certificate verification failed - %s does not match' % hostname_type
valid_ips = ', '.join(certificate.valid_ips)
valid_domains = ', '.join(certificate.valid_domains)
if valid_domains:
message += ' valid domains: %s' % valid_domains
if valid_domains and valid_ips:
message += ' or'
if valid_ips:
message += ' valid IP addresses: %s' % valid_ips
raise TLSVerificationError(message, certificate)
def raise_verification(certificate):
"""
Raises a generic TLSVerificationError
:param certificate:
An asn1crypto.x509.Certificate object
:raises:
TLSVerificationError
"""
message = 'Server certificate verification failed'
raise TLSVerificationError(message, certificate)
def raise_weak_signature(certificate):
"""
Raises a TLSVerificationError when a certificate uses a weak signature
algorithm
:param certificate:
An asn1crypto.x509.Certificate object
:raises:
TLSVerificationError
"""
message = 'Server certificate verification failed - weak certificate signature algorithm'
raise TLSVerificationError(message, certificate)
def raise_client_auth():
"""
Raises a TLSError indicating client authentication is required
:raises:
TLSError
"""
message = 'TLS handshake failed - client authentication required'
raise TLSError(message)
def raise_revoked(certificate):
"""
Raises a TLSVerificationError due to the certificate being revoked
:param certificate:
An asn1crypto.x509.Certificate object
:raises:
TLSVerificationError
"""
message = 'Server certificate verification failed - certificate has been revoked'
raise TLSVerificationError(message, certificate)
def raise_no_issuer(certificate):
"""
Raises a TLSVerificationError due to no issuer certificate found in trust
roots
:param certificate:
An asn1crypto.x509.Certificate object
:raises:
TLSVerificationError
"""
message = 'Server certificate verification failed - certificate issuer not found in trusted root certificate store'
raise TLSVerificationError(message, certificate)
def raise_self_signed(certificate):
"""
Raises a TLSVerificationError due to a self-signed certificate
roots
:param certificate:
An asn1crypto.x509.Certificate object
:raises:
TLSVerificationError
"""
message = 'Server certificate verification failed - certificate is self-signed'
raise TLSVerificationError(message, certificate)
def raise_lifetime_too_long(certificate):
"""
Raises a TLSVerificationError due to a certificate lifetime exceeding
the CAB forum certificate lifetime limit
:param certificate:
An asn1crypto.x509.Certificate object
:raises:
TLSVerificationError
"""
message = 'Server certificate verification failed - certificate lifetime is too long'
raise TLSVerificationError(message, certificate)
def raise_expired_not_yet_valid(certificate):
"""
Raises a TLSVerificationError due to certificate being expired, or not yet
being valid
:param certificate:
An asn1crypto.x509.Certificate object
:raises:
TLSVerificationError
"""
validity = certificate['tbs_certificate']['validity']
not_after = validity['not_after'].native
not_before = validity['not_before'].native
now = datetime.now(timezone.utc)
if not_before > now:
formatted_before = not_before.strftime('%Y-%m-%d %H:%M:%SZ')
message = 'Server certificate verification failed - certificate not valid until %s' % formatted_before
elif not_after < now:
formatted_after = not_after.strftime('%Y-%m-%d %H:%M:%SZ')
message = 'Server certificate verification failed - certificate expired %s' % formatted_after
raise TLSVerificationError(message, certificate)
def raise_disconnection():
"""
Raises a TLSDisconnectError due to a disconnection
:raises:
TLSDisconnectError
"""
raise TLSDisconnectError('The remote end closed the connection')
def raise_protocol_error(server_handshake_bytes):
"""
Raises a TLSError due to a protocol error
:param server_handshake_bytes:
A byte string of the handshake data received from the server
:raises:
TLSError
"""
other_protocol = detect_other_protocol(server_handshake_bytes)
if other_protocol:
raise TLSError('TLS protocol error - server responded using %s' % other_protocol)
raise TLSError('TLS protocol error - server responded using a different protocol')
def raise_handshake():
"""
Raises a TLSError due to a handshake error
:raises:
TLSError
"""
raise TLSError('TLS handshake failed')
def raise_protocol_version():
"""
Raises a TLSError due to a TLS version incompatibility
:raises:
TLSError
"""
raise TLSError('TLS handshake failed - protocol version error')
def raise_dh_params():
"""
Raises a TLSError due to weak DH params
:raises:
TLSError
"""
raise TLSError('TLS handshake failed - weak DH parameters')
def detect_other_protocol(server_handshake_bytes):
"""
Looks at the server handshake bytes to try and detect a different protocol
:param server_handshake_bytes:
A byte string of the handshake data received from the server
:return:
None, or a unicode string of "ftp", "http", "imap", "pop3", "smtp"
"""
if server_handshake_bytes[0:5] == b'HTTP/':
return 'HTTP'
if server_handshake_bytes[0:4] == b'220 ':
if re.match(b'^[^\r\n]*ftp', server_handshake_bytes, re.I):
return 'FTP'
else:
return 'SMTP'
if server_handshake_bytes[0:4] == b'220-':
return 'FTP'
if server_handshake_bytes[0:4] == b'+OK ':
return 'POP3'
if server_handshake_bytes[0:4] == b'* OK' or server_handshake_bytes[0:9] == b'* PREAUTH':
return 'IMAP'
return None
| mit | 1c472ee99d3c702b812eb7836e48cf07 | 28.544702 | 119 | 0.629812 | 4.218676 | false | false | false | false |
wbond/oscrypto | oscrypto/_win/_secur32_cffi.py | 1 | 3967 | # coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import sys
from .._ffi import register_ffi
from .._types import str_cls
from ..errors import LibraryNotFoundError
import cffi
__all__ = [
'get_error',
'secur32',
]
ffi = cffi.FFI()
if cffi.__version_info__ >= (0, 9):
ffi.set_unicode(True)
if sys.maxsize > 2 ** 32:
ffi.cdef("typedef uint64_t ULONG_PTR;")
else:
ffi.cdef("typedef unsigned long ULONG_PTR;")
ffi.cdef("""
typedef HANDLE HCERTSTORE;
typedef unsigned int ALG_ID;
typedef WCHAR SEC_WCHAR;
typedef unsigned long SECURITY_STATUS;
typedef void *LUID;
typedef void *SEC_GET_KEY_FN;
typedef struct _SecHandle {
ULONG_PTR dwLower;
ULONG_PTR dwUpper;
} SecHandle;
typedef SecHandle CredHandle;
typedef SecHandle CtxtHandle;
typedef struct _SCHANNEL_CRED {
DWORD dwVersion;
DWORD cCreds;
void *paCred;
HCERTSTORE hRootStore;
DWORD cMappers;
void **aphMappers;
DWORD cSupportedAlgs;
ALG_ID *palgSupportedAlgs;
DWORD grbitEnabledProtocols;
DWORD dwMinimumCipherStrength;
DWORD dwMaximumCipherStrength;
DWORD dwSessionLifespan;
DWORD dwFlags;
DWORD dwCredFormat;
} SCHANNEL_CRED;
typedef struct _TimeStamp {
DWORD dwLowDateTime;
DWORD dwHighDateTime;
} TimeStamp;
typedef struct _SecBuffer {
ULONG cbBuffer;
ULONG BufferType;
BYTE *pvBuffer;
} SecBuffer;
typedef struct _SecBufferDesc {
ULONG ulVersion;
ULONG cBuffers;
SecBuffer *pBuffers;
} SecBufferDesc;
typedef struct _SecPkgContext_StreamSizes {
ULONG cbHeader;
ULONG cbTrailer;
ULONG cbMaximumMessage;
ULONG cBuffers;
ULONG cbBlockSize;
} SecPkgContext_StreamSizes;
typedef struct _CERT_CONTEXT {
DWORD dwCertEncodingType;
BYTE *pbCertEncoded;
DWORD cbCertEncoded;
void *pCertInfo;
HCERTSTORE hCertStore;
} CERT_CONTEXT;
typedef struct _SecPkgContext_ConnectionInfo {
DWORD dwProtocol;
ALG_ID aiCipher;
DWORD dwCipherStrength;
ALG_ID aiHash;
DWORD dwHashStrength;
ALG_ID aiExch;
DWORD dwExchStrength;
} SecPkgContext_ConnectionInfo;
SECURITY_STATUS AcquireCredentialsHandleW(SEC_WCHAR *pszPrincipal, SEC_WCHAR *pszPackage, ULONG fCredentialUse,
LUID *pvLogonID, void *pAuthData, SEC_GET_KEY_FN pGetKeyFn, void *pvGetKeyArgument,
CredHandle *phCredential, TimeStamp *ptsExpiry);
SECURITY_STATUS FreeCredentialsHandle(CredHandle *phCredential);
SECURITY_STATUS InitializeSecurityContextW(CredHandle *phCredential, CtxtHandle *phContext,
SEC_WCHAR *pszTargetName, ULONG fContextReq, ULONG Reserved1, ULONG TargetDataRep,
SecBufferDesc *pInput, ULONG Reserved2, CtxtHandle *phNewContext, SecBufferDesc *pOutput,
ULONG *pfContextAttr, TimeStamp *ptsExpiry);
SECURITY_STATUS FreeContextBuffer(void *pvContextBuffer);
SECURITY_STATUS ApplyControlToken(CtxtHandle *phContext, SecBufferDesc *pInput);
SECURITY_STATUS DeleteSecurityContext(CtxtHandle *phContext);
SECURITY_STATUS QueryContextAttributesW(CtxtHandle *phContext, ULONG ulAttribute, void *pBuffer);
SECURITY_STATUS EncryptMessage(CtxtHandle *phContext, ULONG fQOP, SecBufferDesc *pMessage, ULONG MessageSeqNo);
SECURITY_STATUS DecryptMessage(CtxtHandle *phContext, SecBufferDesc *pMessage, ULONG MessageSeqNo, ULONG *pfQOP);
""")
try:
secur32 = ffi.dlopen('secur32.dll')
register_ffi(secur32, ffi)
except (OSError) as e:
if str_cls(e).find('cannot load library') != -1:
raise LibraryNotFoundError('secur32.dll could not be found')
raise
def get_error():
return ffi.getwinerror()
| mit | 2d1bd263e411afd89c0ea86d03f82ce1 | 29.751938 | 117 | 0.675826 | 3.676552 | false | false | false | false |
wbond/oscrypto | tests/test_init.py | 1 | 4724 | # coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import ast
import _ast
import unittest
import os
import sys
import oscrypto as module
# This handles situations where an import is importing a function from a
# dotted path, e.g. "from . import ident", and ident is a function, not a
# submodule
MOD_MAP = {
'oscrypto._backend_config': 'oscrypto',
'oscrypto.backend': 'oscrypto',
'oscrypto.ffi': 'oscrypto'
}
def add_mod(mod_name, imports):
"""
Maps pre-defined module.function to module import names
:param mod_name:
A unicode string of a fully-qualified module name being imported
:param imports:
A set of unicode strings of the modules that are being imported
"""
imports.add(MOD_MAP.get(mod_name, mod_name))
def walk_ast(parent_node, modname, imports):
"""
Walks the AST for a module finding any imports and recording them
:param parent_node:
A node from the _ast module
:param modname:
A unicode string of the module we are walking the AST of
:param imports:
A set of unicode strings of the imports that have been found so far
"""
for node in ast.iter_child_nodes(parent_node):
if isinstance(node, _ast.Import):
if node.names[0].name.startswith(module.__name__):
add_mod(node.names[0].name, imports)
elif isinstance(node, _ast.ImportFrom):
if node.level > 0:
if modname == module.__name__:
base_mod = module.__name__
else:
base_mod = '.'.join(modname.split('.')[:-node.level])
if node.module:
base_mod += '.' + node.module
else:
base_mod = node.module
if not base_mod.startswith(module.__name__):
continue
if node.level > 0 and not node.module:
for n in node.names:
add_mod(base_mod + '.' + n.name, imports)
else:
add_mod(base_mod, imports)
elif isinstance(node, _ast.If):
for subast in node.body:
walk_ast(subast, modname, imports)
for subast in node.orelse:
walk_ast(subast, modname, imports)
elif sys.version_info >= (3, 3) and isinstance(node, _ast.Try):
for subast in node.body:
walk_ast(subast, modname, imports)
for subast in node.orelse:
walk_ast(subast, modname, imports)
for subast in node.finalbody:
walk_ast(subast, modname, imports)
elif sys.version_info < (3, 3) and isinstance(node, _ast.TryFinally):
for subast in node.body:
walk_ast(subast, modname, imports)
for subast in node.finalbody:
walk_ast(subast, modname, imports)
elif sys.version_info < (3, 3) and isinstance(node, _ast.TryExcept):
for subast in node.body:
walk_ast(subast, modname, imports)
for subast in node.orelse:
walk_ast(subast, modname, imports)
class InitTests(unittest.TestCase):
def test_load_order(self):
deps = {}
mod_root = os.path.abspath(os.path.dirname(module.__file__))
files = []
for root, dnames, fnames in os.walk(mod_root):
for f in fnames:
if f.endswith('.py'):
full_path = os.path.join(root, f)
rel_path = full_path.replace(mod_root + os.sep, '')
files.append((full_path, rel_path))
for full_path, rel_path in sorted(files):
with open(full_path, 'rb') as f:
full_code = f.read()
if sys.version_info >= (3,):
full_code = full_code.decode('utf-8')
modname = rel_path.replace('.py', '').replace(os.sep, '.')
if modname == '__init__':
modname = module.__name__
else:
modname = '%s.%s' % (module.__name__, modname)
if sys.version_info < (3,) and sys.platform == 'win32' and b'\r\n' in full_code:
full_code = full_code.replace(b'\r\n', b'\n')
imports = set([])
module_node = ast.parse(full_code, filename=full_path)
walk_ast(module_node, modname, imports)
deps[modname] = imports
load_order = module.load_order()
prev = set([])
for mod in load_order:
self.assertEqual(True, mod in deps)
self.assertEqual((mod, set([])), (mod, deps[mod] - prev))
prev.add(mod)
| mit | 4bbe628a935111ead503d6be0655b6ef | 32.034965 | 92 | 0.547629 | 3.930116 | false | false | false | false |
wbond/oscrypto | oscrypto/_mac/_security_cffi.py | 1 | 11218 | # coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import platform
from .._ffi import register_ffi
from cffi import FFI
__all__ = [
'Security',
'version',
'version_info',
]
version = platform.mac_ver()[0]
version_info = tuple(map(int, version.split('.')))
if version_info < (10, 7):
raise OSError('Only OS X 10.7 and newer are supported, not %s.%s' % (version_info[0], version_info[1]))
ffi = FFI()
# It appears SSLCipherSuite is uint16_t on ARM64, but uint32_t on X86_64
if platform.machine() == 'arm64':
ffi.cdef("""
typedef uint16_t SSLCipherSuite;
""")
else:
ffi.cdef("""
typedef uint32_t SSLCipherSuite;
""")
ffi.cdef("""
typedef bool Boolean;
typedef long CFIndex;
typedef int32_t OSStatus;
typedef unsigned long CFTypeID;
typedef uint32_t SecTrustSettingsDomain;
typedef uint32_t SecPadding;
typedef uint32_t SecItemImportExportFlags;
typedef uint32_t SecKeyImportExportFlags;
typedef uint32_t SecExternalFormat;
typedef uint32_t SecExternalItemType;
typedef uint32_t CSSM_ALGORITHMS;
typedef uint64_t CSSM_CC_HANDLE;
typedef uint32_t CSSM_KEYUSE;
typedef uint32_t CSSM_CERT_TYPE;
typedef uint32_t SSLProtocol;
typedef uint32_t SecTrustResultType;
typedef void *CFTypeRef;
typedef CFTypeRef CFArrayRef;
typedef CFTypeRef CFDataRef;
typedef CFTypeRef CFStringRef;
typedef CFTypeRef CFDictionaryRef;
typedef CFTypeRef CFErrorRef;
typedef CFTypeRef CFAllocatorRef;
typedef ... *SecKeyRef;
typedef ... *SecCertificateRef;
typedef ... *SecTransformRef;
typedef ... *SecRandomRef;
typedef ... *SecPolicyRef;
typedef ... *SecPolicySearchRef;
typedef ... *SecAccessRef;
typedef struct
{
uint32_t version;
SecKeyImportExportFlags flags;
CFTypeRef passphrase;
CFStringRef alertTitle;
CFStringRef alertPrompt;
SecAccessRef accessRef;
CFArrayRef keyUsage;
CFArrayRef keyAttributes;
} SecItemImportExportKeyParameters;
typedef ... *SecKeychainRef;
typedef ... *SSLContextRef;
typedef ... *SecTrustRef;
typedef uint32_t SSLConnectionRef;
typedef struct {
uint32_t Length;
char *Data;
} CSSM_DATA, CSSM_OID;
typedef struct {
uint32_t Version;
uint32_t Flags;
CSSM_DATA *LocalResponder;
CSSM_DATA *LocalResponderCert;
} CSSM_APPLE_TP_OCSP_OPTIONS;
typedef struct {
uint32_t Version;
uint32_t CrlFlags;
void *crlStore;
} CSSM_APPLE_TP_CRL_OPTIONS;
OSStatus SecKeychainCreate(char *path, uint32_t pass_len, void *pass,
Boolean prompt, SecAccessRef initialAccess, SecKeychainRef *keychain);
OSStatus SecKeychainDelete(SecKeychainRef keychain);
int SecRandomCopyBytes(SecRandomRef rnd, size_t count, char *bytes);
SecKeyRef SecKeyCreateFromData(CFDictionaryRef parameters, CFDataRef keyData, CFErrorRef *error);
SecTransformRef SecEncryptTransformCreate(SecKeyRef keyRef, CFErrorRef *error);
SecTransformRef SecDecryptTransformCreate(SecKeyRef keyRef, CFErrorRef *error);
Boolean SecTransformSetAttribute(SecTransformRef transformRef, CFStringRef key, CFTypeRef value, CFErrorRef *error);
CFTypeRef SecTransformExecute(SecTransformRef transformRef, CFErrorRef *errorRef);
SecTransformRef SecVerifyTransformCreate(SecKeyRef key, CFDataRef signature, CFErrorRef *error);
SecTransformRef SecSignTransformCreate(SecKeyRef key, CFErrorRef *error);
SecCertificateRef SecCertificateCreateWithData(CFAllocatorRef allocator, CFDataRef data);
OSStatus SecCertificateCopyPublicKey(SecCertificateRef certificate, SecKeyRef *key);
SecKeyRef SecCertificateCopyKey(SecCertificateRef certificate);
CFStringRef SecCopyErrorMessageString(OSStatus status, void *reserved);
OSStatus SecTrustCopyAnchorCertificates(CFArrayRef *anchors);
CFDataRef SecCertificateCopyData(SecCertificateRef certificate);
OSStatus SecTrustSettingsCopyCertificates(SecTrustSettingsDomain domain, CFArrayRef *certArray);
OSStatus SecTrustSettingsCopyTrustSettings(SecCertificateRef certRef, SecTrustSettingsDomain domain,
CFArrayRef *trustSettings);
CFDictionaryRef SecPolicyCopyProperties(SecPolicyRef policyRef);
CFTypeID SecPolicyGetTypeID(void);
OSStatus SecKeyEncrypt(SecKeyRef key, SecPadding padding, const char *plainText, size_t plainTextLen,
char *cipherText, size_t *cipherTextLen);
OSStatus SecKeyDecrypt(SecKeyRef key, SecPadding padding, const char *cipherText, size_t cipherTextLen,
char *plainText, size_t *plainTextLen);
OSStatus SecKeyRawSign(SecKeyRef key, SecPadding padding, const char *dataToSign, size_t dataToSignLen,
char *sig, size_t * sigLen);
OSStatus SecKeyRawVerify(SecKeyRef key, SecPadding padding, const char *signedData, size_t signedDataLen,
const char *sig, size_t sigLen);
OSStatus SecItemImport(CFDataRef importedData, CFStringRef fileNameOrExtension,
SecExternalFormat *inputFormat, SecExternalItemType *itemType,
SecItemImportExportFlags flags, const SecItemImportExportKeyParameters *keyParams,
SecKeychainRef importKeychain, CFArrayRef *outItems);
OSStatus SecItemExport(CFTypeRef secItemOrArray, SecExternalFormat outputFormat, SecItemImportExportFlags flags,
const SecItemImportExportKeyParameters *keyParams, CFDataRef *exportedData);
OSStatus SecAccessCreate(CFStringRef descriptor, CFArrayRef trustedlist, SecAccessRef *accessRef);
OSStatus SecKeyCreatePair(SecKeychainRef keychainRef, CSSM_ALGORITHMS algorithm, uint32_t keySizeInBits,
CSSM_CC_HANDLE contextHandle, CSSM_KEYUSE publicKeyUsage, uint32_t publicKeyAttr,
CSSM_KEYUSE privateKeyUsage, uint32_t privateKeyAttr, SecAccessRef initialAccess,
SecKeyRef* publicKeyRef, SecKeyRef* privateKeyRef);
OSStatus SecKeychainItemDelete(SecKeyRef itemRef);
typedef OSStatus (*SSLReadFunc)(SSLConnectionRef connection, char *data, size_t *dataLength);
typedef OSStatus (*SSLWriteFunc)(SSLConnectionRef connection, const char *data, size_t *dataLength);
OSStatus SSLSetIOFuncs(SSLContextRef context, SSLReadFunc readFunc, SSLWriteFunc writeFunc);
OSStatus SSLSetPeerID(SSLContextRef context, const char *peerID, size_t peerIDLen);
OSStatus SSLSetConnection(SSLContextRef context, SSLConnectionRef connection);
OSStatus SSLSetPeerDomainName(SSLContextRef context, const char *peerName, size_t peerNameLen);
OSStatus SSLHandshake(SSLContextRef context);
OSStatus SSLGetBufferedReadSize(SSLContextRef context, size_t *bufSize);
OSStatus SSLRead(SSLContextRef context, char *data, size_t dataLength, size_t *processed);
OSStatus SSLWrite(SSLContextRef context, const char *data, size_t dataLength, size_t *processed);
OSStatus SSLClose(SSLContextRef context);
OSStatus SSLGetNumberSupportedCiphers(SSLContextRef context, size_t *numCiphers);
OSStatus SSLGetSupportedCiphers(SSLContextRef context, SSLCipherSuite *ciphers, size_t *numCiphers);
OSStatus SSLSetEnabledCiphers(SSLContextRef context, const SSLCipherSuite *ciphers, size_t numCiphers);
OSStatus SSLGetNumberEnabledCiphers(SSLContextRef context, size_t *numCiphers);
OSStatus SSLGetEnabledCiphers(SSLContextRef context, SSLCipherSuite *ciphers, size_t *numCiphers);
OSStatus SSLGetNegotiatedCipher(SSLContextRef context, SSLCipherSuite *cipherSuite);
OSStatus SSLGetNegotiatedProtocolVersion(SSLContextRef context, SSLProtocol *protocol);
OSStatus SSLCopyPeerTrust(SSLContextRef context, SecTrustRef *trust);
OSStatus SecTrustGetCssmResultCode(SecTrustRef trust, OSStatus *resultCode);
CFIndex SecTrustGetCertificateCount(SecTrustRef trust);
SecCertificateRef SecTrustGetCertificateAtIndex(SecTrustRef trust, CFIndex ix);
OSStatus SecTrustSetAnchorCertificates(SecTrustRef trust, CFArrayRef anchorCertificates);
OSStatus SecTrustSetAnchorCertificatesOnly(SecTrustRef trust, Boolean anchorCertificatesOnly);
OSStatus SecTrustSetPolicies(SecTrustRef trust, CFArrayRef policies);
SecPolicyRef SecPolicyCreateSSL(Boolean server, CFStringRef hostname);
OSStatus SecPolicySearchCreate(CSSM_CERT_TYPE certType, const CSSM_OID *policyOID, const CSSM_DATA *value,
SecPolicySearchRef *searchRef);
OSStatus SecPolicySearchCopyNext(SecPolicySearchRef searchRef, SecPolicyRef *policyRef);
OSStatus SecPolicySetValue(SecPolicyRef policyRef, const CSSM_DATA *value);
OSStatus SecTrustEvaluate(SecTrustRef trust, SecTrustResultType *result);
extern SecRandomRef kSecRandomDefault;
extern CFStringRef kSecPaddingKey;
extern CFStringRef kSecPaddingPKCS7Key;
extern CFStringRef kSecPaddingPKCS5Key;
extern CFStringRef kSecPaddingPKCS1Key;
extern CFStringRef kSecPaddingOAEPKey;
extern CFStringRef kSecPaddingNoneKey;
extern CFStringRef kSecModeCBCKey;
extern CFStringRef kSecTransformInputAttributeName;
extern CFStringRef kSecDigestTypeAttribute;
extern CFStringRef kSecDigestLengthAttribute;
extern CFStringRef kSecIVKey;
extern CFStringRef kSecAttrIsExtractable;
extern CFStringRef kSecDigestSHA1;
extern CFStringRef kSecDigestSHA2;
extern CFStringRef kSecDigestMD5;
extern CFStringRef kSecAttrKeyType;
extern CFTypeRef kSecAttrKeyTypeRSA;
extern CFTypeRef kSecAttrKeyTypeDSA;
extern CFTypeRef kSecAttrKeyTypeECDSA;
extern CFStringRef kSecAttrKeySizeInBits;
extern CFStringRef kSecAttrLabel;
extern CFTypeRef kSecAttrCanSign;
extern CFTypeRef kSecAttrCanVerify;
extern CFTypeRef kSecAttrKeyTypeAES;
extern CFTypeRef kSecAttrKeyTypeRC4;
extern CFTypeRef kSecAttrKeyTypeRC2;
extern CFTypeRef kSecAttrKeyType3DES;
extern CFTypeRef kSecAttrKeyTypeDES;
""")
if version_info < (10, 8):
ffi.cdef("""
OSStatus SSLNewContext(Boolean isServer, SSLContextRef *contextPtr);
OSStatus SSLDisposeContext(SSLContextRef context);
OSStatus SSLSetEnableCertVerify(SSLContextRef context, Boolean enableVerify);
OSStatus SSLSetProtocolVersionEnabled(SSLContextRef context, SSLProtocol protocol, Boolean enable);
""")
else:
ffi.cdef("""
typedef uint32_t SSLProtocolSide;
typedef uint32_t SSLConnectionType;
typedef uint32_t SSLSessionOption;
SSLContextRef SSLCreateContext(CFAllocatorRef alloc, SSLProtocolSide protocolSide,
SSLConnectionType connectionType);
OSStatus SSLSetSessionOption(SSLContextRef context, SSLSessionOption option, Boolean value);
OSStatus SSLSetProtocolVersionMin(SSLContextRef context, SSLProtocol minVersion);
OSStatus SSLSetProtocolVersionMax(SSLContextRef context, SSLProtocol maxVersion);
""")
security_path = '/System/Library/Frameworks/Security.framework/Security'
Security = ffi.dlopen(security_path)
register_ffi(Security, ffi)
| mit | ab078aa3e5d39ac24f5ff5eeebc9b5ff | 44.052209 | 120 | 0.755037 | 3.86561 | false | false | false | false |
ursgal/ursgal | ursgal/wrappers/msblender_09_2015.py | 2 | 18783 | #!/usr/bin/python
from __future__ import print_function
from __future__ import division
import os
import os.path
import sys
import csv
import math
import subprocess
from pprint import pprint
import ursgal
class msblender_09_2015(ursgal.UNode):
"""
MSblender UNode
Documentation at http://www.marcottelab.org/index.php/MSblender
"""
META_INFO = {
"edit_version": 1.00,
"name": "MSblender",
"version": "09.2015",
"release_date": "2015-9-1",
"engine_type": {
"meta_engine": True,
},
"input_extensions": [".csv"],
"output_extensions": [".csv"],
"in_development": True,
"create_own_folder": False,
"include_in_git": False,
"distributable": False,
"utranslation_style": "msblender_style_1",
"engine": {
"linux": {
"64bit": {
"exe": "msblender",
"url": "",
"zip_md5": None,
"additional_exe": [],
},
},
"darwin": {
"64bit": {
"exe": "msblender",
"url": "",
"zip_md5": None,
"additional_exe": [],
},
},
},
"citation": "Kwon T, Choi H, Vogel C, Nesvizhskii AI, Marcotte EM. (2011) "
"MSblender: A Probabilistic Approach for Integrating Peptide "
"Identifications from Multiple Database Search Engines.",
}
def __init__(self, *args, **kwargs):
super(msblender_09_2015, self).__init__(*args, **kwargs)
self.RT_lookup = {}
return
def preflight(self):
"""
Formatting the command line via self.params
Settings file is created in the output folder
and added to self.created_tmp_files (can be deleted)
Returns:
self.params(dict)
http://www.marcottelab.org/index.php/MSblender#Pre-processing
"""
# first, make sure that the msblender input generation script is
# in the same path as the executable :
exe_dir = os.path.dirname(self.exe)
make_msblender_in_py_path = os.path.join(exe_dir, "make-msblender_in.py")
assert os.path.isfile(
make_msblender_in_py_path
), """
The MSblender python script 'make-msblender_in.py'
was not found in {}""".format(
exe_dir
)
self.params["make-msblender_in"] = make_msblender_in_py_path
self.params["output_file_incl_path"] = os.path.join(
self.params["output_dir_path"], self.params["output_file"]
)
self.params["input_files"] = []
self.params["engine_names"] = []
self.params["engine_column_names"] = []
self.params["evalue_column_names"] = []
for input_file_dict in self.params["input_file_dicts"]:
search_engine = input_file_dict["last_search_engine"]
score_colname = input_file_dict["last_search_engine_colname"]
input_file_path = os.path.join(
input_file_dict["dir"], input_file_dict["file"]
)
evalue_colname = self.meta_unodes[search_engine].DEFAULT_PARAMS[
"evalue_field"
]
self.params["engine_names"].append(search_engine)
self.params["engine_column_names"].append(score_colname)
self.params["evalue_column_names"].append(evalue_colname)
self.params["input_files"].append(input_file_path)
print('Converting ident CSVs to MSblender "logE hit list" format...')
msbl_hit_files = []
for i, ident_csv in enumerate(self.params["input_files"]):
print(
"Converting file {0} of {1}...".format(
i + 1, len(self.params["input_files"])
)
)
f = self.convert_ursgal_csv_to_msblender_input(ident_csv)
msbl_hit_files.append(f)
print("Generating MSblender config file...")
self.params["msblender_conf"] = self.make_msblender_config_file(msbl_hit_files)
command_list_to_generate_input = [
"python2.7",
"{make-msblender_in}".format(**self.params), # path to make-msblender_in.py
"{msblender_conf}".format(**self.params), # path to msblender_conf file
]
print("Generating MSblender input file via command line call:")
print("$ " + " ".join(command_list_to_generate_input))
print()
proc = subprocess.Popen(
command_list_to_generate_input,
stdout=subprocess.PIPE,
)
for line in proc.stdout:
print(line.strip().decode("utf"))
# make-msblender_in.py should have produced an input file with this name:
msblender_input_file_path = self.params["msblender_conf"] + ".msblender_in"
assert os.path.isfile(
msblender_input_file_path
), """
MSblender input file could not be found at that path:
{}""".format(
msblender_input_file_path
)
self.params["msblender_in"] = msblender_input_file_path
self.params["command_list"] = [
self.exe, # path to msblender executable
"{msblender_in}".format(**self.params), # path to msblender_in file
]
def postflight(self):
# check if msblender output file was sucessfully created or not
self.params["msblender_out"] = self.params["msblender_in"] + ".msblender_out"
assert os.path.isfile(
self.params["msblender_out"]
), "MSblender output file was not found."
msg = "MSblender output file successfully created: {msblender_out}"
print(msg.format(**self.params))
print("\nFormatting MSblender output file...")
output_file = self.convert_msblender_out(self.params["msblender_out"])
print(
"MSblender Summary CSV created: {output_file_incl_path}".format(
**self.params
)
)
def make_msblender_config_file(self, msblender_input_files):
"""
MSblender needs a config file of this format:
MyriMatch: test.myrimatch.mvh_hit_list_best
X!Tandem: test.tandem_k.logE_hit_list_best
...
This function generates that file.
"""
config_file_path = self.params["output_file_incl_path"] + ".msblender_conf"
with open(config_file_path, "w") as cfg_file:
for i, msb_file in enumerate(msblender_input_files):
search_engine = self.params["engine_names"][i]
line = " ".join([search_engine, msb_file])
cfg_file.write(line + "\n")
self.created_tmp_files.append(config_file_path) # mark for deletion
return config_file_path
def convert_ursgal_csv_to_msblender_input(self, csv_file):
"""
csv_file: search engine output file, converted to csv
using ursgal
generates a new csv in msblender "hit_list" format
(see http://www.marcottelab.org/index.php/MSblender#Pre-processing )
with the same name as input file, but file extension
replaced with ".msblender_input"
"""
output_header = [
"#Spectrum_id",
"Charge",
"PrecursorMz",
"MassDiff",
"Peptide",
"Protein",
"MissedCleavages",
"Score(-log10[E-value])",
]
msblender_decoy_tag = "xf_" # msblender interprets a hit as
# decoy if spectrum ID starts with this tag,
# so we have to mark our decoys with this tag!
# basename = os.path.basename( csv_file )
# fname, ext = os.path.splitext( basename )
converted_file = csv_file + ".msblender_input"
with open(csv_file, "r") as in_f:
rows = (line for line in in_f if not line.startswith("#"))
reader = csv.DictReader(rows)
with open(converted_file, "w") as out_f:
writer = csv.DictWriter(out_f, output_header, delimiter="\t")
writer.writeheader()
for in_row in reader:
# only consider 'the best' PSM for each spectrum
if "rank" in in_row:
if int(in_row["rank"]) != 1:
continue
# same effect as 'select-best-PSM.py' from msblender
out_row = {}
sequence = in_row["Sequence"]
mods = in_row["Modifications"]
spec_title = in_row["Spectrum Title"]
basename = spec_title.split(".")[0]
spec_id_start = int(spec_title.split(".")[1])
spec_id_stop = int(spec_title.split(".")[2])
input_file_charge = int(spec_title.split(".")[-1])
charge = in_row["Charge"]
out_row["Peptide"] = "#".join([sequence, mods])
out_row["#Spectrum_id"] = spec_title
out_row["Charge"] = charge
out_row["PrecursorMz"] = in_row["Exp m/z"]
out_row["MassDiff"] = abs(
float(in_row["Exp m/z"]) - float(in_row["Calc m/z"])
)
prot_ID = in_row["proteinacc_start_stop_pre_post_;"]
if in_row["Is decoy"] == "true" or "decoy" in prot_ID:
out_row["Protein"] = msblender_decoy_tag + prot_ID
else:
out_row["Protein"] = prot_ID
RandK = [aa for aa in sequence[:-1] if aa in ["R", "K"]]
out_row["MissedCleavages"] = len(RandK)
for e in self.params["evalue_column_names"]:
if e in in_row:
evalue = float(in_row[e])
# no log can be calculated for evalues that are supposedly
# zero so they are replaced with evalues of a very small value
# (converters by the msblender team use the same fix)
if evalue == 0.0:
evalue = 1e-15
out_row["Score(-log10[E-value])"] = -math.log10(evalue)
# noting down some PSM information in a lookup dict, we need
# this info later when the final output file is generated
for spec_id in set([spec_id_start, spec_id_stop]):
# msblender output doesn't have start+stop, so we note down both, just in case
PSM = (sequence, basename, spec_id, input_file_charge)
info = (
in_row["Retention Time (s)"],
in_row["proteinacc_start_stop_pre_post_;"],
)
self.RT_lookup[PSM] = info
writer.writerow(out_row)
self.created_tmp_files.append(converted_file) # mark for deletion
return converted_file
def convert_msblender_out(self, msblender_out):
"""
Format the MSblender output csv to match our standards.
It should look just like the combine_FDR output csv.
Also adds the estimated FDR for each hit!
"""
msblender_converted_out = self.params["output_file_incl_path"]
# mapping the msblender score column names to the ones that we want in our output csv,
# i.e. { 'xtandem_score' : 'X\!Tandem:MSblender_score' }
score_colname_map = {}
for i, engine in enumerate(self.params["engine_names"]):
msbl_score_colname = engine + "_score"
pretty_engine_name = self.params["engine_column_names"][i]
new_score_colname = "".join(
["MSblender:", pretty_engine_name, ":score"]
) # i.e. "MSblender:MyriMatch_score"
score_colname_map[msbl_score_colname] = new_score_colname
out_header = [
"Spectrum ID",
"Sequence",
"Modifications",
"MSblender:mvScore",
"Charge",
"Retention Time (s)",
"Engines",
"Is decoy",
"Estimated FDR",
]
out_header += list(score_colname_map.values())
out_header.append("proteinacc_start_stop_pre_post_;")
# these counters will be used for FDR calculation:
PSM_count = 0
decoy_count = 0
with open(msblender_out, "r") as msblout:
reader = csv.DictReader(msblout, delimiter="\t")
with open(msblender_converted_out, "w") as out:
writer = csv.DictWriter(out, out_header)
writer.writeheader()
# sorting the csv by score to allow FDR calculation while iterating
# sorting in reverse since a bigger mvScore is better
# (it's the "posterior probability of correct identification", see paper)
sorted_reader = sorted(
reader,
key=lambda x: float(x["mvScore"]),
reverse=True,
)
for in_row in sorted_reader:
msblender_score = in_row["mvScore"]
if float(msblender_score) > self.DEFAULT_PARAMS["FDR_cutoff"]:
continue
if in_row["Decoy"] == "D":
is_decoy = "true"
elif in_row["Decoy"] == "F":
is_decoy = "false"
else:
raise ValueError(
"""
"Decoy"-column info is missing from MSblender output row!
It should be either "D" or "R"!
"""
)
# desired output format:
# Spectrum ID Sequence Modifications Charge Retention Time (s)
# Engines Is decoy X\!Tandem:PEP OMSSA:PEP proteinacc_start_stop_pre_post_;
spec = in_row["Spectrum"]
spec_tokens = spec.split(".")
spec_id = int(spec_tokens[-3])
charge = spec_tokens[-2]
sequence, mods = spec_tokens[-1].split("#")
basename = spec_tokens[0]
out_row = {}
out_row["MSblender:mvScore"] = msblender_score
out_row["Spectrum ID"] = spec_id
out_row["Charge"] = charge
out_row["Sequence"] = sequence
out_row["Modifications"] = mods
out_row["Is decoy"] = is_decoy
PSM = (sequence, basename, spec_id, int(charge))
RT, proteinacc = self.RT_lookup[PSM]
out_row["Retention Time (s)"] = RT
out_row["proteinacc_start_stop_pre_post_;"] = proteinacc
engines_list = [] # list indicating which engines found the PSM
for u_score, m_score in score_colname_map.items():
out_row[m_score] = in_row[u_score]
if in_row[u_score] != "":
engines_list.append(
m_score.split(":")[1]
# MSblender:MyriMatch:score -> MyriMatch
)
out_row["Engines"] = ";".join(engines_list)
# estimating the FDR for each row:
PSM_count += 1
if is_decoy == "true":
decoy_count += 1
# calculate and store the estimated FDR for each identification
out_row["Estimated FDR"] = self.calc_FDR(PSM_count, decoy_count)
writer.writerow(out_row)
self.created_tmp_files.append(msblender_converted_out) # mark for deletion
return msblender_converted_out
def calc_FDR(self, PSM_count, false_positives):
"""
calculate false discovery rate according to FDR Method 2
(Käll et al. 2008) as explained by Jones et al. 2009
"""
true_positives = PSM_count - (2 * false_positives)
if true_positives <= 0: # prevent FDR above 1. Not sure if this is done?
return 1.0
FDR = false_positives / (false_positives + true_positives)
return FDR
def add_estimated_FDR(self, csv_rowlist):
"""
Adding the estimated FDR (based on # of decoys).
based on Jones et al. 2009: Algorithm 2, step 3
careful, input file must be sorted by average score (low->high) already!
"""
# In certain datasets, decoy hits are not
# observed at any score threshold for identifications made by all
# three search engines. To correct for the size of the result set, an
# artificial decoy hit is added at the end of each data series, such
# that no identification has a combined FDR/PEP Score = 0.
artificial_decoy = [{"Is decoy": "true"}]
PSM_count = 0
decoy_count = 0
# Traverse identifications from lowest AFS to highest:
for row in csv_rowlist + artificial_decoy:
PSM_count += 1
if row["Is decoy"] == "true":
decoy_count += 1
# calculate and store the estimated FDR (FDRest) for each identification according to FDR Method 2.
row["Estimated FDR"] = calc_FDR(PSM_count, decoy_count)
row["_decoy_percent"] = decoy_count / PSM_count
if __name__ == "__main__":
m = msblender_09_2015()
# m.params = {
# "output_file" : "./msblender_final_output.csv",
# "folder" : ".",
# "basename" : "120813OTc1_NQL-AU-0314-LFQ-LCM-SG-04_048_14N",
# "input_files" : [ # merged ident csv files:
# "14N_msgfplus_v9979_merged.csv",
# "14N_myrimatch_2_1_138_merged.csv",
# "14N_omssa_2_1_9_merged.csv",
# "14N_xtandem_sledgehammer_merged.csv",
# ],
# "score_fields" : {
# "xtandem" : "X\!Tandem:whatever",
# "omssa" : "OMSSA:bla",
# "msgfplus" : "MS-GF+:whatever",
# "myrimatch" : "MyriMatch:doesnmatter",
# },
# "FDR_cutoff" : 0.01,
# }
m.exe = "./msblender"
m.preflight()
m._execute()
m.postflight()
| mit | 81993d727d67d4f2d382ffc6b275ef59 | 37.248473 | 111 | 0.51475 | 3.875361 | false | false | false | false |
ursgal/ursgal | ursgal/resources/platform_independent/arc_independent/svm_1_0_0/svm_1_0_0.py | 2 | 27891 | #!/usr/bin/env python
"""
usage:
svm.py unified_input.csv engine_score_column_name
i.e. :
svm.py omssa_2_1_6_unified.csv 'OMSSA:pvalue'
Writes a new file with added column "SVMscore" which is the distance to
the separating hyperplane of a Percolator-like support vector machine.
"""
import numpy as np
import sklearn
from sklearn import svm
from sklearn.cross_validation import StratifiedKFold
from sklearn.preprocessing import Imputer
from collections import Counter, defaultdict
from random import random
import csv
import re
import os
import argparse
from misc import (
get_score_colname_and_order,
field_to_float,
unify_sequence,
calc_FDR,
scale_scores,
row_is_decoy,
field_to_bayes_float,
get_mz_values,
)
SCALER = (
sklearn.preprocessing.RobustScaler()
) # RobustScaler() seems to be most robust ;)
PROTON = 1.00727646677
class SVMWrapper(dict):
def __init__(self):
self._svm_score_name = "SVMscore"
self.counter = { # counting the # of possible training PSMs
"target": 0,
"decoy": 0,
"positive": 0,
"negative": 0,
"unknown": 0,
"parsed PSMs": 0,
}
self.results = {}
self.shitty_decoy_seqs = set() # is overwritten by find_shitty_decoys()
self.mgf_lookup = {}
self.pep_to_mz = {}
if __name__ == "__main__":
self.parse_options() # parse command line args and set options
self.set_input_csv()
self.observed_charges = set()
self.used_extra_fields = set()
self.decoy_train_prob = (
None # probability to include decoy PSMs as negative training examples
)
self.maximum_proteins_per_line = 0
self.tryptic_aas = set(["R", "K", "-"])
self.delim_regex = re.compile(
r"<\|>|\;"
) # regex to split a line by both ";" and "<|>"
return
def parse_options(self):
"""
parses the command line args for options/parameters
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"-i",
"--input_csv",
type=str,
help="Input CSV path(s)",
required=True,
nargs="+",
)
parser.add_argument(
"-o", "--output_csv", type=str, help="Output CSV path", required=True
)
parser.add_argument(
"-k",
"--kernel",
type=str,
default="rbf",
help='SVM kernel type ("rbf", "linear", "poly" or "sigmoid")',
)
parser.add_argument(
"-c", type=float, default=1.0, help="Penalty parameter C of the error term"
)
parser.add_argument(
"-g",
"--gamma",
type=str,
default="auto",
help="Gamma parameter of the SVM.",
)
parser.add_argument(
"-r",
"--mb_ram",
type=float,
default=4000,
help="Available RAM in megabytes, for SVM calculation",
)
parser.add_argument(
"-f",
"--fdr_cutoff",
type=float,
default=0.01,
help="Target PSMs with a lower FDR will be used as a "
"positive training set",
)
parser.add_argument(
"-x",
"--columns_as_features",
type=str,
nargs="+",
default=[
"MS-GF:RawScore",
"MS-GF:DeNovoScore",
"MS-GF:SpecEValue",
"MS-GF:EValue",
"OMSSA:evalue",
"OMSSA:pvalue",
"X\!Tandem:expect",
"X\!Tandem:hyperscore",
],
help="Columns that should be used as a feature directly "
"(e.g. secondary scores). Will be converted to float",
)
parser.add_argument(
"-d",
"--dump_svm_matrix",
type=str,
default=False,
help="Dump SVM matrix in PIN (Percolator input) format "
"to the specified path, mostly for debugging "
"and benchmarking.",
)
arg_dict = vars(parser.parse_args()) # convert to dict
self.update(arg_dict)
try:
self["gamma"] = float(self["gamma"])
except ValueError:
assert (
self["gamma"] == "auto"
), "Invalid gamma param: " '"{0}", using "auto" instead.'.format(
self["gamma"]
)
def set_input_csv(self):
"""
distinguishes one vs. many unified input csv files and either
sets the single csv as input, or merges all csvs and sets
the merged csv as input.
"""
if len(self["input_csv"]) > 1:
raise Exception("You must only specify *one* unified CSV file!")
self.csv_path = self["input_csv"][0]
print("Using input file", self.csv_path)
def find_shitty_decoys(self):
"""
Finds and notes decoys that share their sequence with a target PSM.
Also counts the number of targets and decoys to get a quick estimate
of how many positive/negative training examples can be "claimed".
"""
target_seqs = set()
decoy_seqs = set()
with open(self.csv_path, "r") as f:
reader = csv.DictReader(f)
sorted_reader = sorted(
reader,
reverse=self["bigger_scores_better"],
key=lambda d: float(d[self.col_for_sorting]),
)
for row in sorted_reader:
self.observed_charges.add(int(row["Charge"]))
if row_is_decoy(row):
decoy_seqs.add(unify_sequence(row["Sequence"]))
self.counter["decoy"] += 1
else:
target_seqs.add(unify_sequence(row["Sequence"]))
self.counter["target"] += 1
self.shitty_decoy_seqs = target_seqs.intersection(decoy_seqs)
if len(self.shitty_decoy_seqs) > 0:
print(
"Warning! Found {0} sequences that are target AND decoy "
"(immutable peptides?). These will not be used for training.\n".format(
len(self.shitty_decoy_seqs)
)
)
return
def determine_csv_sorting(self):
with open(self.csv_path, "r") as in_file:
reader = csv.DictReader(in_file)
(
self.col_for_sorting,
self["bigger_scores_better"],
) = get_score_colname_and_order(reader.fieldnames)
if self.col_for_sorting == self._svm_score_name:
self._svm_score_name = self._svm_score_name + "2"
print(
"CSV will be sorted by column {0} (reverse={1}"
")".format(self.col_for_sorting, self["bigger_scores_better"])
)
for feat in self["columns_as_features"]:
if feat in reader.fieldnames and feat != self.col_for_sorting:
self.used_extra_fields.add(feat)
def sort_by_rank(self, rowdict):
score = float(rowdict[self.col_for_sorting])
spec_title = rowdict["Spectrum Title"]
return (spec_title, score)
@staticmethod
def parse_protein_ids(csv_field, sep="<|>"):
"""
Turns the unified CSV column "Protein ID"
into a set of all protein IDs.
"""
clean = csv_field.replace("decoy_", "").strip()
prot_id_set = set(clean.split(sep))
return prot_id_set
def count_intra_set_features(self):
"""
intra-set features as calculated by Percolator:
- num_pep: Number of PSMs for which this is the best scoring peptide.
- num_prot: Number of times the matched protein matches other PSMs.
- pep_site: Number of different peptides that match this protein.
own ideas:
- pep_charge_states: in how many charge states was the peptide found?
- seq_mods: in how many mod states was the AA-sequence found?
- num_spec: Number of times the matched spectrum matches other peptides.
"""
print("Counting intra-set features...")
self.num_pep = defaultdict(int)
self.num_prot = defaultdict(set)
self.pep_site = defaultdict(set)
self.score_list_dict = defaultdict(list)
self.pep_charge_states = defaultdict(set)
self.seq_mods = defaultdict(set)
self.num_spec = defaultdict(set)
with open(self.csv_path, "r") as f:
reader = csv.DictReader(f)
previous_spec_title = None
rows_of_spectrum = []
for row in sorted(
reader, reverse=self["bigger_scores_better"], key=self.sort_by_rank
):
if unify_sequence(row["Sequence"]) in self.shitty_decoy_seqs:
continue
current_spec_title = row["Spectrum Title"]
if current_spec_title != previous_spec_title:
# the next spectrum started, so let's process the info we
# collected for the previous spectrum:
score_list = [
field_to_bayes_float(r[self.col_for_sorting])
for r in rows_of_spectrum
]
self.score_list_dict[previous_spec_title] = score_list
for rank, line in enumerate(rows_of_spectrum):
# print("\t".join([
# str(rank), line['Spectrum Title'], line[self.col_for_sorting]
# ]))
uni_sequence = unify_sequence(line["Sequence"])
peptide = (uni_sequence, line["Modifications"])
# multiple proteins are separated by <|>
# ignore start_stop_pre_post part since it depends on the peptide
# and not the protein (i.e. _233_243_A_R)
proteins = set(
line["Protein ID"].replace("decoy_", "").split(";")
)
# old unify csv format:
# proteins = self.parse_protein_ids(
# line['proteinacc_start_stop_pre_post_;']
# )
if len(proteins) > self.maximum_proteins_per_line:
self.maximum_proteins_per_line = len(proteins)
if rank == 0:
# this is the 'best' peptide for that spectrum
self.num_pep[peptide] += 1
for protein in proteins:
self.num_prot[protein].add(
(
line["Spectrum Title"],
uni_sequence,
line["Modifications"],
)
)
self.pep_site[protein].add(peptide)
self.pep_charge_states[peptide].add(int(row["Charge"]))
self.seq_mods[uni_sequence].add(row["Modifications"])
self.num_spec[line["Spectrum Title"]].add(peptide)
rows_of_spectrum = []
rows_of_spectrum.append(row)
previous_spec_title = current_spec_title
def row_to_features(self, row):
"""
Converts a unified CSV row to a SVM feature matrix (numbers only!)
"""
sequence = unify_sequence(row["Sequence"])
charge = field_to_float(row["Charge"])
score = field_to_bayes_float(row[self.col_for_sorting])
calc_mz, exp_mz, calc_mass, exp_mass = get_mz_values(row)
# calc_mz = field_to_float( row['Calc m/z'] ) # calc m/z or uCalc?
# exp_mz = field_to_float( row['Exp m/z'] )
pre_aa_field = row["Sequence Pre AA"]
post_aa_field = row["Sequence Post AA"]
all_pre_aas = set(re.split(self.delim_regex, pre_aa_field))
all_post_aas = set(re.split(self.delim_regex, post_aa_field))
if any(pre_aa not in self.tryptic_aas for pre_aa in all_pre_aas):
enzN = 0
else:
enzN = 1
if any(post_aa not in self.tryptic_aas for post_aa in all_post_aas):
enzC = 0
else:
enzC = 1
n_missed_cleavages = len(
[aa for aa in sequence[:-1] if aa in ["R", "K"]]
) # / len(sequence)
missed_cleavages = [0] * 6
try:
missed_cleavages[n_missed_cleavages] = 1
except IndexError: # if a peptide has more than 6 missed cleavages
missed_cleavages[-1] = 2
spectrum = row["Spectrum Title"].strip()
mass = (exp_mz * charge) - (charge - 1) * PROTON
pep_len = len(sequence)
# delta_mz = calc_mz - exp_mz
delta_mass = calc_mass - exp_mass
peptide = (sequence, row["Modifications"])
proteins = self.parse_protein_ids(row["Protein ID"])
num_pep = self.num_pep[peptide]
pep_charge_states = len(self.pep_charge_states[peptide])
seq_mods = len(self.seq_mods[sequence])
num_spec = len(self.num_spec[row["Spectrum Title"]])
num_prot = sum((len(self.num_prot[protein]) for protein in proteins))
pep_site = sum((len(self.pep_site[protein]) for protein in proteins))
user_specified_features = []
for feat in self.used_extra_fields:
if feat != self.col_for_sorting:
try:
user_specified_features.append(field_to_float(row[feat]))
except ValueError:
pass
charges = defaultdict(int)
for charge_n in sorted(self.pep_charge_states[peptide]):
charges[charge_n] = 1
if sequence in self.shitty_decoy_seqs:
is_shitty = 1
else:
is_shitty = 0
score_list = sorted(
list(set(self.score_list_dict[spectrum])),
reverse=self["bigger_scores_better"],
)
try:
score_list_scaled = scale_scores(score_list)
rank = score_list.index(score)
deltLCn = (
score_list_scaled[rank] - score_list_scaled[1]
) # Fractional difference between current and second best XCorr
deltCn = (
score_list_scaled[rank] - score_list_scaled[-1]
) # Fractional difference between current and worst XCorr
except (ValueError, IndexError, AssertionError):
# NaN values will be replaced by the column mean later
# NaN values are entered when there is no ranking
# e.g. when only one peptide was matched to the spectrum.
rank, deltLCn, deltCn = np.nan, np.nan, np.nan
features = [
score,
rank,
deltCn,
deltLCn,
charge,
# delta_mz,# / pep_len,
delta_mass, # / pep_len,
# abs(delta_mz),# / pep_len,
abs(delta_mass), # / pep_len,
n_missed_cleavages / pep_len,
missed_cleavages[0],
missed_cleavages[1],
missed_cleavages[2],
missed_cleavages[3],
missed_cleavages[4],
missed_cleavages[5],
enzN,
enzC,
mass,
pep_len,
num_pep,
num_prot,
pep_site,
is_shitty,
pep_charge_states,
num_spec,
seq_mods,
]
for charge_n in self.observed_charges:
features.append(charges[charge_n])
return features + user_specified_features
def collect_data(self):
"""
parses a unified csv file and collects features from each row
"""
categories = []
list_of_feature_lists = []
feature_sets = set()
with open(self.csv_path, "r") as f:
reader = csv.DictReader(f)
# collecting some stats for FDR calculation:
self.PSM_count = 0
self.decoy_count = 0
if self["dump_svm_matrix"]:
self.init_svm_matrix_dump()
additional_matrix_info = []
for i, row in enumerate(
sorted(
reader,
reverse=self["bigger_scores_better"],
key=lambda d: float(d[self.col_for_sorting]),
)
):
features = self.row_to_features(row)
if tuple(features) in feature_sets:
continue
feature_sets.add(tuple(features))
category, psm_FDR = self.get_psm_category(row)
list_of_feature_lists.append(features)
categories.append(category)
if self["dump_svm_matrix"]:
label = -1 if row_is_decoy(row) else 1
sequence = "{0}.{1}#{2}.{3}".format(
row["Sequence Pre AA"].strip(),
row["Sequence"].strip(),
row["Modifications"].strip(),
row["Sequence Post AA"].strip(),
)
additional_matrix_info.append(
{
"psm_id": row["Spectrum Title"].strip(),
"label": label,
"scannr": row["Spectrum Title"].strip().split(".")[-2],
"peptide": sequence,
"proteins": self.parse_protein_ids(row["Protein ID"]),
}
)
if i % 1000 == 0:
score_val = float(row[self.col_for_sorting])
msg = (
"Generating feature matrix from input csv "
"(line ~{0}) with score {1} and FDR "
"{2}".format(i, score_val, psm_FDR)
)
print(msg, end="\r")
# All data points are collected in one big matrix, to make standardization possible
print("\nConverting feature matrix to NumPy array...")
X_raw = np.array(list_of_feature_lists, dtype=float)
print("Replacing empty/NaN values with the mean of each column...")
self.nan_replacer = Imputer()
self.nan_replacer.fit(X_raw)
X_raw = self.nan_replacer.transform(X_raw)
# Standardize input matrix to ease machine learning! Scaled data has zero mean and unit variance
print("Standardizing input matrix...")
self.scaler = SCALER.fit(X_raw)
self.X = self.scaler.transform(X_raw)
self.categories = np.array(categories)
print()
if self["dump_svm_matrix"]:
print("Dumping SVM matrix to", self["dump_svm_matrix"])
for i, matrix_row in enumerate(self.X):
matrix_row_info = additional_matrix_info[i]
self.dump_svm_matrix_row(
row=list(matrix_row),
psm_id=matrix_row_info["psm_id"],
label=matrix_row_info["label"],
scannr=matrix_row_info["scannr"],
peptide=matrix_row_info["peptide"],
proteins=matrix_row_info["proteins"],
)
print("Dumped SVM matrix to", self["dump_svm_matrix"])
return
def init_svm_matrix_dump(self):
from misc import FEATURE_NAMES
colnames = ["PSMId", "label", "scannr"] + FEATURE_NAMES
colnames += ["charge{0}".format(c) for c in self.observed_charges]
for extra_field in sorted(self.used_extra_fields):
colnames += [extra_field]
colnames += ["peptide"]
for n_proteins in range(self.maximum_proteins_per_line):
colnames.append("proteinId{0}".format(n_proteins + 1))
self.matrix_csv_path = self["dump_svm_matrix"]
print("Dumping raw SVM input matrix to", self.matrix_csv_path)
with open(self.matrix_csv_path, "w") as f:
f.write("\t".join(colnames) + "\n")
def dump_svm_matrix_row(
self,
row=None,
psm_id=None,
label=None,
scannr=None,
peptide=None,
proteins=None,
):
full_row = [psm_id, label, scannr] + row + [peptide] + list(proteins)
with open(self.matrix_csv_path, "a") as f:
row_str = "\t".join(str(x) for x in full_row) + "\n"
f.write(row_str)
def get_psm_category(self, row):
"""
Determines whether a PSM (csv row) should be used as a negative or
positive training example.
returns
1 - high-scoring target (positive training example)
0 - not-high-scoring target (not usable for training)
-1 - decoy (negative training example)
"""
category = 0 # unknown (mix of true positives and false positives)
self.PSM_count += 1 # for FDR calculation
sequence = unify_sequence(row["Sequence"])
psm_FDR = calc_FDR(self.PSM_count, self.decoy_count)
if row_is_decoy(row):
self.decoy_count += 1
if psm_FDR <= 0.25 and sequence not in self.shitty_decoy_seqs:
category = -1 # decoy (false positive hits)
self.counter["negative"] += 1
else:
if not self.decoy_train_prob:
need_max = self.counter["positive"] * 2
have = self.counter["negative"]
still_there = self.counter["decoy"] - have
prob = need_max / still_there
if prob < 0.001:
prob = 0.001
self.decoy_train_prob = prob
print()
print(self.counter)
print("need max:", need_max)
print("have:", have)
print("still_there:", still_there)
print("probability:", self.decoy_train_prob)
print()
if self.decoy_train_prob >= 1.0 or random() <= self.decoy_train_prob:
category = -1 # decoy (false positive hits)
self.counter["negative"] += 1
else: # row is target
if psm_FDR <= self["fdr_cutoff"] and sequence not in self.shitty_decoy_seqs:
category = 1 # high quality target (almost certainly true positives)
self.counter["positive"] += 1
if category == 0:
self.counter["unknown"] += 1
return (category, psm_FDR)
def train(self, training_matrix, training_categories):
counter = Counter(training_categories)
msg = "Training {0} SVM on {1} target PSMs and {2} decoy PSMs" "...".format(
self["kernel"], counter[1], counter[-1]
)
print(msg, end="\r")
# specify the classification method (rbf and linear SVC seem to work best and are quite fast)
classifier = svm.SVC(
C=self["c"],
kernel=self["kernel"],
probability=False, # we don't want to get probabilities later on -> faster
cache_size=self["mb_ram"], # available RAM in megabytes
# decision_function_shape = 'ovr', # doesn't seem to matter
# class_weight= 'balanced', # doesn't seem to matter
)
# train the SVC on our set of training data:
classifier.fit(
training_matrix,
training_categories,
)
print(msg + " done!")
return classifier
def classify(self, classifier, psm_matrix):
msg = "Classifying {0} PSMs...".format(len(psm_matrix))
print(msg, end="\r")
for i, row in enumerate(psm_matrix):
# get the distance to the separating SVM hyperplane and use it as a score:
svm_score = classifier.decision_function(np.array([row]))[0]
features = tuple(row)
if features not in self.results:
self.results[features] = svm_score
else:
print(
"Warning! This combination of features already has a predicted probability! "
"Previous svm_score: {0:f} - Current svm_score: {1:f}"
"".format(self.results[tuple(row)], svm_score)
)
# take the mean value, no idea how to handle this better, but it never happened so far...
self.results[features] = (self.results[features] + svm_score) / 2.0
print(msg + " done!")
return
def add_scores_to_csv(self):
outfname = os.path.basename(self["output_csv"])
print("Writing output csv {0} ...".format(outfname))
msg = "Writing output csv {0} (line ~{1})..."
with open(self["output_csv"], "w", newline="") as out_csv, open(
self.csv_path, "r"
) as in_csv:
reader = csv.DictReader(in_csv)
writer = csv.DictWriter(out_csv, reader.fieldnames + [self._svm_score_name])
writer.writeheader()
for i, row in enumerate(reader):
if i % 1000 == 0:
print(msg.format(outfname, i), end="\r")
features = self.nan_replacer.transform(
np.array([self.row_to_features(row)])
)
features_scaled = tuple(list(self.scaler.transform(features)[0]))
SVMScore = self.results[features_scaled]
row[self._svm_score_name] = SVMScore
writer.writerow(row)
print("\n")
return
def __str__(self):
out_str = ["\n\tpyPercolator Options:"]
for option, value in self.items():
out_str.append("{0:·<25}{1}".format(option, value))
return "\n".join(out_str)
if __name__ == "__main__":
s = SVMWrapper()
print(s) # print parameter/settings overview
s.determine_csv_sorting()
s.find_shitty_decoys()
print("\nCounter:")
print(s.counter)
print()
s.count_intra_set_features()
s.collect_data()
print(
"Splitting data in half to avoid training and testing on the same features..."
)
skfold = StratifiedKFold(s.categories, n_folds=2, shuffle=True)
# use one half to score the other half, and vice versa:
for i, (train_index, test_index) in enumerate(skfold):
current_half = "1st" if i == 0 else "2nd"
other_half = "2nd" if i == 0 else "1st"
print(
"\nUsing high-scoring PSMs and decoys of the {0} half to train...".format(
current_half
)
)
mask = s.categories[train_index] != 0
train_categories = s.categories[train_index][mask]
train_features = s.X[train_index][mask]
svm_classifier = s.train(
training_matrix=train_features,
training_categories=train_categories,
)
print(
"Using the trained SVM to classify all PSMs of the {0} half".format(
other_half
)
)
s.classify(
svm_classifier,
s.X[test_index],
)
if s["kernel"].lower() == "linear":
print() # print SVM coefficients (only works for linear kernel)
print(svm_classifier.coef_)
print()
print("\nCounter:")
print(s.counter)
print()
s.add_scores_to_csv()
| mit | 67e8f3bc7af13a5ee616ba75c56a3094 | 35.98939 | 105 | 0.514557 | 3.945395 | false | false | false | false |
ursgal/ursgal | example_scripts/grouped_search_example.py | 2 | 11223 | #!/usr/bin/env python3
# encoding: utf-8
import ursgal
import csv
from collections import defaultdict as ddict
import os
import glob
import math
from collections import defaultdict as ddict
params = {
"database": os.path.join(
os.pardir,
"example_data",
"Creinhardtii_281_v5_5_CP_MT_with_contaminants_target_decoy.fasta",
),
"csv_filter_rules": [
["Is decoy", "equals", "false"],
["PEP", "lte", 0.01],
],
# Modifications that should be included in the search
"modifications": [
"C,fix,any,Carbamidomethyl",
"M,opt,any,Oxidation",
"N,opt,any,Deamidated",
"Q,opt,any,Deamidated",
"E,opt,any,Methyl",
"K,opt,any,Methyl",
"R,opt,any,Methyl",
"*,opt,Prot-N-term,Acetyl",
"S,opt,any,Phospho",
"T,opt,any,Phospho",
],
}
# We specify all search engines and validation engines that we want
# to use in a list (version numbers might differ on windows or mac):
search_engines = [
"omssa",
"xtandem_piledriver",
"msgfplus_v9979",
# 'myrimatch_2_1_138',
"msamanda_1_0_0_5243",
]
validation_engines = [
"percolator_2_08",
"qvality",
]
# Groups that are evaluated seperately
groups = {
"0": "",
"1": "Oxidation",
"2": "Deamidated",
"3": "Methyl",
"4": "Acetyl",
"5": "Phospho",
}
mass_spectrometer = "LTQ XL low res"
get_params = {
"ftp_url": "ftp.peptideatlas.org",
"ftp_login": "PASS00269",
"ftp_password": "FI4645a",
"ftp_include_ext": [
"JB_FASP_pH8_2-3_28122012.mzML",
"JB_FASP_pH8_2-4_28122012.mzML",
"JB_FASP_pH8_3-1_28122012.mzML",
"JB_FASP_pH8_4-1_28122012.mzML",
],
"ftp_output_folder": os.path.join(os.pardir, "example_data", "grouped_search"),
"http_url": "https://www.sas.upenn.edu/~sschulze/Creinhardtii_281_v5_5_CP_MT_with_contaminants_target_decoy.fasta",
"http_output_folder": os.path.join(os.pardir, "example_data"),
}
def get_files():
uc = ursgal.UController(params=get_params)
if os.path.exists(params["database"]) is False:
uc.fetch_file(engine="get_http_files_1_0_0")
if os.path.exists(get_params["ftp_output_folder"]) is False:
os.makedirs(get_params["ftp_output_folder"])
uc.fetch_file(engine="get_ftp_files_1_0_0")
spec_files = []
for mzML_file in glob.glob(os.path.join(get_params["ftp_output_folder"], "*.mzML")):
spec_files.append(mzML_file)
return spec_files
def search(validation_engine):
"""
Executes a grouped search on four example files from the
data from Barth et al.
usage:
./grouped_search_example.py
Searches for peptides including the following potential modifications:
oxidation of M,
deamidation of N/Q,
methylation of E/K/R,
N-terminal acetylation,
phosphorylation of S/T.
After the search, each type of modification is validated seperately.
"""
# Initializing the ursgal UController class with
# our specified modifications and mass spectrometer
uc = ursgal.UController(
profile=mass_spectrometer, params=params # 'LTQ XL low res' profile!
)
# complete workflow:
# every spectrum file is searched with every search engine,
# results are seperated into groups and validated seperately,
# validated results are merged and filtered for targets and PEP <= 0.01.
# In the end, all filtered results from all spectrum files are merged
# for validation_engine in validation_engines:
result_files = []
for n, spec_file in enumerate(spec_files):
validated_results = []
for search_engine in search_engines:
unified_search_results = uc.search(
input_file=spec_file,
engine=search_engine,
)
# Calculate PEP for every group seperately, therefore need to split
# the csv first
group_list = sorted(groups.keys())
for p, group in enumerate(group_list):
if group == "0":
uc.params["csv_filter_rules"] = [
["Modifications", "contains_not", "{0}".format(groups["1"])],
["Modifications", "contains_not", "{0}".format(groups["2"])],
["Modifications", "contains_not", "{0}".format(groups["3"])],
["Modifications", "contains_not", "{0}".format(groups["4"])],
["Modifications", "contains_not", "{0}".format(groups["5"])],
]
else:
uc.params["csv_filter_rules"] = [
["Modifications", "contains", "{0}".format(groups[group])]
]
for other_group in group_list:
if other_group == "0" or other_group == group:
continue
uc.params["csv_filter_rules"].append(
[
"Modifications",
"contains_not",
"{0}".format(groups[other_group]),
],
)
uc.params["prefix"] = "grouped-{0}".format(group)
filtered_results = uc.execute_misc_engine(
input_file=unified_search_results, engine="filter_csv"
)
uc.params["prefix"] = ""
validated_search_results = uc.validate(
input_file=filtered_results,
engine=validation_engine,
)
validated_results.append(validated_search_results)
uc.params["prefix"] = "file{0}".format(n)
validated_results_from_all_engines = uc.execute_misc_engine(
input_file=sorted(validated_results),
engine="merge_csvs",
)
uc.params["prefix"] = ""
uc.params["csv_filter_rules"] = [
["Is decoy", "equals", "false"],
["PEP", "lte", 0.01],
]
filtered_validated_results = uc.execute_misc_engine(
input_file=validated_results_from_all_engines, engine="filter_csv"
)
result_files.append(filtered_validated_results)
results_all_files = uc.execute_misc_engine(
input_files=sorted(result_files),
engine="merge_csvs",
)
return results_all_files
def analyze(collector):
"""
Simle analysis script for the grouped search,
counting the number of identified peptides (combination of peptide sequence and modifications)
and PSMs (additionally include the spectrum ID)
"""
mod_list = ["Oxidation", "Deamidated", "Methyl", "Acetyl", "Phospho"]
fieldnames = (
["approach", "count_type", "validation_engine", "unmodified", "multimodified"]
+ mod_list
+ ["total"]
)
csv_writer = csv.DictWriter(open("grouped_results.csv", "w"), fieldnames)
csv_writer.writeheader()
uc = ursgal.UController()
uc.params["validation_score_field"] = "PEP"
uc.params["bigger_scores_better"] = False
# Count the number of identified peptides and PSMs for the different modifications
# Spectra with multiple PSMs are sanitized, i.e. only the PSM with best PEP score is counted
# and only if the best hit has a PEP that is at least two orders of
# magnitude smaller than the others
for validation_engine, result_file in collector.items():
counter_dict = {"psm": ddict(set), "pep": ddict(set)}
grouped_psms = uc._group_psms(
result_file, validation_score_field="PEP", bigger_scores_better=False
)
for spec_title, grouped_psm_list in grouped_psms.items():
best_score, best_line_dict = grouped_psm_list[0]
if len(grouped_psm_list) > 1:
second_best_score, second_best_line_dict = grouped_psm_list[1]
best_peptide_and_mod = (
best_line_dict["Sequence"] + best_line_dict["Modifications"]
)
second_best_peptide_and_mod = (
second_best_line_dict["Sequence"]
+ second_best_line_dict["Modifications"]
)
if best_peptide_and_mod == second_best_peptide_and_mod:
line_dict = best_line_dict
elif best_line_dict["Sequence"] == second_best_line_dict["Sequence"]:
if best_score == second_best_score:
line_dict = best_line_dict
else:
if (-1 * math.log10(best_score)) - (
-1 * math.log10(second_best_score)
) >= 2:
line_dict = best_line_dict
else:
continue
else:
if (-1 * math.log10(best_score)) - (
-1 * math.log10(second_best_score)
) >= 2:
line_dict = best_line_dict
else:
continue
else:
line_dict = best_line_dict
count = 0
for mod in mod_list:
if mod in line_dict["Modifications"]:
count += 1
key_2_add = ""
if count == 0:
key_2_add = "unmodified"
elif count >= 2:
key_2_add = "multimodified"
elif count == 1:
for mod in mod_list:
if mod in line_dict["Modifications"]:
key_2_add = mod
break
# for peptide identification comparison
counter_dict["pep"][key_2_add].add(
line_dict["Sequence"] + line_dict["Modifications"]
)
# for PSM comparison
counter_dict["psm"][key_2_add].add(
line_dict["Spectrum Title"]
+ line_dict["Sequence"]
+ line_dict["Modifications"]
)
for counter_key, count_dict in counter_dict.items():
dict_2_write = {
"approach": "grouped",
"count_type": counter_key,
"validation_engine": validation_engine,
}
total_number = 0
for key, obj_set in count_dict.items():
dict_2_write[key] = len(obj_set)
total_number += len(obj_set)
dict_2_write["total"] = total_number
csv_writer.writerow(dict_2_write)
return
if __name__ == "__main__":
spec_files = get_files()
collector = {}
for validation_engine in validation_engines:
results_all_files = search(validation_engine)
print(
">>> ",
"final results for {0}".format(validation_engine),
" were written into:",
)
print(">>> ", results_all_files)
collector[validation_engine] = results_all_files
analyze(collector)
print(">>> ", "number of identified peptides and PSMs were written into:")
print(">>> ", "grouped_results.csv")
| mit | 1f8be2c96c299cb54a1d162ebbb0756b | 35.796721 | 119 | 0.538893 | 3.744745 | false | false | false | false |
mitdbg/aurum-datadiscovery | knowledgerepr/lite_graph.py | 1 | 1487 | from bitarray import bitarray
from enum import Enum
from collections import defaultdict
num_edge_types = 4
class EdgeType(Enum):
SCHEMA_SIM = 0
CONTENT_SIM = 1
PKFK = 2
SEMANTIC = 3
class LiteGraph:
def __init__(self):
self._node_count = 0
self._edge_count = 0
self._node_index = dict()
def add_node(self, nid):
if nid not in self._node_index:
self._node_index[nid] = defaultdict(bitarray)
self._node_count += 1
def add_edge(self, source, target, type):
if source == target:
return
type = type.value
self.add_node(source)
self.add_node(target)
if target not in self._node_index[source]:
edge_type = bitarray(num_edge_types)
edge_type.setall(False)
edge_type[type] = True
self._node_index[source][target] = edge_type
self._edge_count += 1
elif self._node_index[source][target][type] is False:
self._node_index[source][target][type] = True
self._edge_count += 1
def add_undirected_edge(self, source, target, type):
self.add_edge(source, target, type)
self.add_edge(target, source, type)
def neighbors(self, nid, type):
type = type.value
n = []
nodes = self._node_index[nid]
for target, edge_type in nodes.items():
if edge_type[type]:
n.append(target)
return n | mit | f62ec92475c74d9150d67921de5c3a67 | 26.555556 | 61 | 0.565568 | 3.689826 | false | false | false | false |
mitdbg/aurum-datadiscovery | ontomatch/process_utils.py | 1 | 1215 |
with open("/Users/ra-mit/development/discovery_proto/ontomatch/links_massdata_dbpedia", "r") as f:
lines = f.readlines()
# lines = lines[:3]
print("Total lines: " + str(len(lines)))
isalines = []
for l in lines:
base = str(l).split(")")
tokens = base[1].split(",")
r = tokens[1].strip().replace("'", "")
# print(r)
if r == 'is_a':
isalines.append(l)
print("Filtered lines: " + str(len(isalines)))
seen = set()
clean_lines = []
for l in isalines:
l = l.replace("(", "")
l = l.replace(")", "")
tokens = l.split(",")
table_a = tokens[1].strip().replace("'", "")
table_b = tokens[5].strip().replace("'", "")
if table_a + table_b not in seen or table_b + table_a not in seen:
clean_line = table_a + "," + table_b
clean_lines.append(clean_line)
seen.add(table_a + table_b)
seen.add(table_b + table_a)
print("Total unique pairs: " + str(len(clean_lines)))
with open("/Users/ra-mit/development/discovery_proto/ontomatch/links_massdata_clean", "w") as g:
for cl in clean_lines:
g.write(cl + '\n')
print("Done")
| mit | 1b40a4ad65a403271f34de505c223e3f | 32.75 | 100 | 0.528395 | 3.310627 | false | false | false | false |
mitdbg/aurum-datadiscovery | DoD/data_processing_utils.py | 1 | 28280 | import pandas as pd
from collections import defaultdict
import math
from DoD.utils import FilterType
import config as C
import os
import psutil
from tqdm import tqdm
import time
import pprint
pp = pprint.PrettyPrinter(indent=4)
# Cache reading and transformation of DFs
cache = dict()
memory_limit_join_processing = C.memory_limit_join_processing * psutil.virtual_memory().total
data_separator = C.separator
tmp_spill_file = "./tmp_spill_file.tmp"
# tmp_df_chunk = "./chunk_df"
def configure_csv_separator(separator):
global data_separator
data_separator = separator
def estimate_output_row_size(a: pd.DataFrame, b: pd.DataFrame):
# 1. check each dataframe's size in memory and number of rows
# a_bytes = sum(a.memory_usage(deep=True))
# b_bytes = sum(b.memory_usage(deep=True))
a_bytes = sum(a.memory_usage(deep=False))
b_bytes = sum(b.memory_usage(deep=False))
a_len_rows = len(a)
b_len_rows = len(b)
# 2. estimate size per row from previous
a_row_size = float(a_bytes/a_len_rows)
b_row_size = float(b_bytes/b_len_rows)
# 3. estimate row size of output join (no selections)
o_row_size = a_row_size + b_row_size
return o_row_size
def does_join_fit_in_memory(chunk, ratio, o_row_size):
estimated_output_num_rows = (float)((chunk / ratio))
estimated_output_size = estimated_output_num_rows * o_row_size
if estimated_output_size >= memory_limit_join_processing:
# eos_gb = estimated_output_size / 1024 / 1024 / 1024
# print("Estimated Output size in GB: " + str(eos_gb))
return False, estimated_output_size/1024/1024/1024
return True, estimated_output_size/1024/1024/1024
def join_ab_on_key_optimizer(a: pd.DataFrame, b: pd.DataFrame, a_key: str, b_key: str,
suffix_str=None, chunksize=C.join_chunksize, normalize=True):
# clean up temporal stuff -- i.e., in case there was a crash
try:
# os.remove(tmp_df_chunk)
os.remove(tmp_spill_file)
except FileNotFoundError:
pass
# if normalize:
a[a_key] = a[a_key].apply(lambda x: str(x).lower())
try:
b[b_key] = b[b_key].apply(lambda x: str(x).lower())
except KeyError:
print("COLS: " + str(b.columns))
print("KEY: " + str(b_key))
# drop NaN/Null values
a.dropna(subset=[a_key], inplace=True)
b.dropna(subset=[b_key], inplace=True)
a_drop_indices = [i for i, el in enumerate(a[a_key]) if el == 'nan' or el == 'null' or el is pd.NaT]
b_drop_indices = [i for i, el in enumerate(b[b_key]) if el == 'nan' or el == 'null' or el is pd.NaT]
a.drop(a_drop_indices, inplace=True)
b.drop(b_drop_indices, inplace=True)
a.reset_index(drop=True)
b.reset_index(drop=True)
if len(a) == 0 or len(b) == 0:
return False
# Estimate output join row size
o_row_size = estimate_output_row_size(a, b)
# join by chunks
def join_chunk(chunk_df, header=False):
# print("First chunk? : " + str(header))
# print("a: " + str(len(a)))
# print("b: " + str(len(chunk_df)))
# worst_case_estimated_join_size = chunksize * len(a) * o_row_size
# if worst_case_estimated_join_size >= memory_limit_join_processing:
# print("Can't join sample. Size: " + str(worst_case_estimated_join_size))
# return False # can't even join a sample
# print(a[a_key].head(10))
# print(chunk_df[b_key].head(10))
target_chunk = pd.merge(a, chunk_df, left_on=a_key, right_on=b_key, sort=False, suffixes=('', suffix_str))
if header: # header is only activated the first time. We only want to do this check the first time
# sjt = time.time()
fits, estimated_join_size = does_join_fit_in_memory(len(target_chunk), (float)(chunksize/len(b)), o_row_size)
# ejt = time.time()
# join_time = (float)((ejt - sjt) * (float)(len(b)/chunksize))
# print("Est. join time: " + str(join_time))
print("Estimated join size: " + str(estimated_join_size))
# if estimated_join_size < 0.01:
# print("TC: " + str(len(target_chunk)))
# print("Ratio: " + str((float)(chunksize/len(b))))
# print("row size: " + str(o_row_size))
# print("FITS? : " + str(fits))
if fits:
return True
else:
return False
target_chunk.to_csv(tmp_spill_file, mode="a", header=header, index=False)
return False
def chunk_reader(df):
len_df = len(df)
init_index = 0
num_chunks = math.ceil(len_df / chunksize)
for i in range(num_chunks):
chunk_df = df[init_index:init_index + chunksize]
init_index += chunksize
yield chunk_df
# swap row order of b to approximate uniform sampling
b = b.sample(frac=1).reset_index(drop=True)
first_chunk = True
all_chunks = [chunk for chunk in chunk_reader(b)]
# for chunk in tqdm(all_chunks):
for chunk in all_chunks:
scp = time.time()
if first_chunk:
fits_in_memory = join_chunk(chunk, header=True)
first_chunk = False
if fits_in_memory: # join in memory and exit
return join_ab_on_key(a, b, a_key, b_key, suffix_str=suffix_str, normalize=False)
else: # just ignore no-fit in memory chunks
return False
else:
join_chunk(chunk)
ecp = time.time()
chunk_time = ecp - scp
estimated_total_time = chunk_time * len(all_chunks)
print("ETT: " + str(estimated_total_time))
if estimated_total_time > 60 * 3: # no more than 3 minutes
return False # cancel this join without breaking the whole pipeline
print("Reading written down relation: ")
# [join_chunk(chunk) for chunk in chunk_reader(b)]
joined = pd.read_csv(tmp_spill_file, encoding='latin1', sep=data_separator)
# clean up temporal stuff
try:
# os.remove(tmp_df_chunk)
os.remove(tmp_spill_file)
except FileNotFoundError:
pass
return joined
def join_ab_on_key_spill_disk(a: pd.DataFrame, b: pd.DataFrame, a_key: str, b_key: str, suffix_str=None, chunksize=C.join_chunksize):
# clean up temporal stuff -- i.e., in case there was a crash
try:
# os.remove(tmp_df_chunk)
os.remove(tmp_spill_file)
except FileNotFoundError:
pass
a[a_key] = a[a_key].apply(lambda x: str(x).lower())
try:
b[b_key] = b[b_key].apply(lambda x: str(x).lower())
except KeyError:
print("COLS: " + str(b.columns))
print("KEY: " + str(b_key))
# Calculate target columns
# a_columns = set(a.columns)
# b_columns = pd.Index([column if column not in a_columns else column + suffix_str for column in b.columns])
#
# # Write to disk the skeleton of the target
# df_target = pd.DataFrame(columns=(a.columns.append(b_columns)))
# df_target.to_csv(tmp_spill_file, index_label=False)
# join by chunks
def join_chunk(chunk_df, header=False):
# chunk_df[b_key] = chunk_df[b_key].apply(lambda x: str(x).lower()) # transform to string for join
target_chunk = pd.merge(a, chunk_df, left_on=a_key, right_on=b_key, sort=False, suffixes=('', suffix_str))
target_chunk.to_csv(tmp_spill_file, mode="a", header=header, index=False)
def chunk_reader(df):
len_df = len(df)
init_index = 0
num_chunks = math.ceil(len_df / chunksize)
for i in range(num_chunks):
chunk_df = df[init_index:init_index + chunksize]
init_index += chunksize
yield chunk_df
# b.to_csv(tmp_df_chunk, index_label=False)
# chunk_reader = pd.read_csv(tmp_df_chunk, encoding='latin1', sep=data_separator, chunksize=chunksize)
first_chunk = True
for chunk in chunk_reader(b):
if first_chunk:
join_chunk(chunk, header=True)
first_chunk = False
else:
join_chunk(chunk)
# [join_chunk(chunk) for chunk in chunk_reader(b)]
joined = pd.read_csv(tmp_spill_file, encoding='latin1', sep=data_separator)
# clean up temporal stuff
try:
# os.remove(tmp_df_chunk)
os.remove(tmp_spill_file)
except FileNotFoundError:
pass
return joined
def join_ab_on_key(a: pd.DataFrame, b: pd.DataFrame, a_key: str, b_key: str, suffix_str=None, normalize=True):
if normalize:
a[a_key] = a[a_key].apply(lambda x: str(x).lower())
b[b_key] = b[b_key].apply(lambda x: str(x).lower())
joined = pd.merge(a, b, how='inner', left_on=a_key, right_on=b_key, sort=False, suffixes=('', suffix_str))
return joined
# def update_relation_cache(relation_path, df):
# if relation_path in cache:
# cache[relation_path] = df
def read_relation(relation_path):
if relation_path in cache:
df = cache[relation_path]
else:
df = pd.read_csv(relation_path, encoding='latin1', sep=data_separator)
cache[relation_path] = df
return df
def read_relation_on_copy(relation_path):
"""
This is assuming than copying a DF is cheaper than reading it back from disk
:param relation_path:
:return:
"""
if relation_path in cache:
df = cache[relation_path]
else:
df = pd.read_csv(relation_path, encoding='latin1', sep=data_separator)
cache[relation_path] = df
return df.copy()
def empty_relation_cache():
global cache
cache = dict()
def get_dataframe(path):
# TODO: only csv is supported
df = pd.read_csv(path, encoding='latin1', sep=data_separator)
return df
def _join_ab_on_key(a: pd.DataFrame, b: pd.DataFrame, a_key: str, b_key: str, suffix_str=None):
# First make sure to remove empty/nan values from join columns
# TODO: Generate data event if nan values are found
a_valid_index = (a[a_key].dropna()).index
b_valid_index = (b[b_key].dropna()).index
a = a.iloc[a_valid_index]
b = b.iloc[b_valid_index]
# Normalize join columns
# a_original = a[a_key].copy()
# b_original = b[b_key].copy()
a[a_key] = a[a_key].apply(lambda x: str(x).lower())
b[b_key] = b[b_key].apply(lambda x: str(x).lower())
joined = pd.merge(a, b, how='inner', left_on=a_key, right_on=b_key, sort=False, suffixes=('', suffix_str))
# # Recover format of original columns
# FIXME: would be great to do this, but it's broken
# joined[a_key] = a_original
# joined[b_key] = b_original
return joined
def apply_filter(relation_path, attribute, cell_value):
# if relation_path in cache:
# df = cache[relation_path]
# else:
# df = pd.read_csv(relation_path, encoding='latin1', sep=data_separator)
# # store in cache
# cache[relation_path] = df
df = read_relation_on_copy(relation_path) # FIXME FIXE FIXME
# df = get_dataframe(relation_path)
df[attribute] = df[attribute].apply(lambda x: str(x).lower().strip())
# update_relation_cache(relation_path, df)
df = df[df[attribute] == str(cell_value).lower()]
return df
def find_key_for(relation_path, key, attribute, value):
"""
select key from relation where attribute = value;
"""
# normalize this value
value = str(value).lower()
# Check if DF in cache
if relation_path in cache:
df = cache[relation_path]
else:
df = pd.read_csv(relation_path, encoding='latin1', sep=data_separator)
#df = df.apply(lambda x: x.astype(str).str.lower())
# cache[relation_path] = df # cache for later
try:
key_value_df = df[df[attribute].map(lambda x: str(x).lower()) == value][[key]]
except KeyError:
print("!!!")
print("Attempt to access attribute: '" + str(attribute) + "' from relation: " + str(df.columns))
print("Attempt to project attribute: '" + str(key) + "' from relation: " + str(df.columns))
print("!!!")
return {x[0] for x in key_value_df.values}
def is_value_in_column(value, relation_path, column):
# normalize this value
value = str(value).lower()
if relation_path in cache:
df = cache[relation_path]
else:
df = pd.read_csv(relation_path, encoding='latin1', sep=data_separator)
#df = df.apply(lambda x: x.astype(str).str.lower())
cache[relation_path] = df # cache for later
return value in df[column].map(lambda x: str(x).lower()).unique()
def obtain_attributes_to_project(filters):
attributes_to_project = set()
for f in filters:
f_type = f[1].value
if f_type is FilterType.ATTR.value:
attributes_to_project.add(f[0][0])
elif f_type is FilterType.CELL.value:
attributes_to_project.add(f[0][1])
return attributes_to_project
def _obtain_attributes_to_project(jp_with_filters):
filters, jp = jp_with_filters
attributes_to_project = set()
for f in filters:
f_type = f[1].value
if f_type is FilterType.ATTR.value:
attributes_to_project.add(f[0][0])
elif f_type is FilterType.CELL.value:
attributes_to_project.add(f[0][1])
return attributes_to_project
def project(df, attributes_to_project):
print("Project: " + str(attributes_to_project))
df = df[list(attributes_to_project)]
return df
class InTreeNode:
def __init__(self, node):
self.node = node
self.parent = None
self.payload = None
def add_parent(self, parent):
self.parent = parent
def set_payload(self, payload: pd.DataFrame):
self.payload = payload
def get_payload(self) -> pd.DataFrame:
return self.payload
def get_parent(self):
return self.parent
def __hash__(self):
return hash(self.node)
def __eq__(self, other):
# compare with both strings and other nodes
if type(other) is str:
return self.node == other
elif type(other) is InTreeNode:
return self.node == other.node
def materialize_join_graph(jg, dod):
print("Materializing:")
pp.pprint(jg)
def build_tree(jg):
# Build in-tree (leaves to root)
intree = dict() # keep reference to all nodes here
leaves = []
hops = jg
while len(hops) > 0:
pending_hops = [] # we use this variable to maintain the temporarily disconnected hops
for l, r in hops:
if len(intree) == 0:
node = InTreeNode(l.source_name)
node_path = dod.aurum_api.helper.get_path_nid(l.nid) + "/" + l.source_name
df = read_relation_on_copy(node_path)# FIXME FIXME FIXME
# df = get_dataframe(node_path)
node.set_payload(df)
intree[l.source_name] = node
leaves.append(node)
# now either l or r should be in intree
if l.source_name in intree.keys():
rnode = InTreeNode(r.source_name) # create node for r
node_path = dod.aurum_api.helper.get_path_nid(r.nid) + "/" + r.source_name
df = read_relation_on_copy(node_path)# FIXME FIXME FIXME
# df = get_dataframe(node_path)
rnode.set_payload(df)
r_parent = intree[l.source_name]
rnode.add_parent(r_parent) # add ref
# r becomes a leave, and l stops being one
if r_parent in leaves:
leaves.remove(r_parent)
leaves.append(rnode)
intree[r.source_name] = rnode
elif r.source_name in intree.keys():
lnode = InTreeNode(l.source_name) # create node for l
node_path = dod.aurum_api.helper.get_path_nid(l.nid) + "/" + l.source_name
df = read_relation_on_copy(node_path)# FIXME FIXME FIXME
# df = get_dataframe(node_path)
lnode.set_payload(df)
l_parent = intree[r.source_name]
lnode.add_parent(l_parent) # add ref
if l_parent in leaves:
leaves.remove(l_parent)
leaves.append(lnode)
intree[l.source_name] = lnode
else:
# temporarily disjoint hop which we store for subsequent iteration
pending_hops.append((l, r))
hops = pending_hops
return intree, leaves
def find_l_r_key(l_source_name, r_source_name, jg):
# print(l_source_name + " -> " + r_source_name)
for l, r in jg:
if l.source_name == l_source_name and r.source_name == r_source_name:
return l.field_name, r.field_name
elif l.source_name == r_source_name and r.source_name == l_source_name:
return r.field_name, l.field_name
intree, leaves = build_tree(jg)
# find groups of leaves with same common ancestor
suffix_str = '_x'
go_on = True
while go_on:
if len(leaves) == 1 and leaves[0].get_parent() is None:
go_on = False
continue # we have now converged
leave_ancestor = defaultdict(list)
for leave in leaves:
if leave.get_parent() is not None: # never add the parent's parent, which does not exist
leave_ancestor[leave.get_parent()].append(leave)
# pick ancestor and find its join info with each children, then join, then add itself to leaves (remove others)
for k, v in leave_ancestor.items():
for child in v:
l = k.get_payload()
r = child.get_payload()
l_key, r_key = find_l_r_key(k.node, child.node, jg)
# print("L: " + str(k.node) + " - " + str(l_key) + " size: " + str(len(l)))
# print("R: " + str(child.node) + " - " + str(r_key) + " size: " + str(len(r)))
df = join_ab_on_key_optimizer(l, r, l_key, r_key, suffix_str=suffix_str)
# df = join_ab_on_key(l, r, l_key, r_key, suffix_str=suffix_str)
if df is False: # happens when join is outlier - (causes run out of memory)
return False
suffix_str += '_x'
k.set_payload(df) # update payload
if child in leaves:
leaves.remove(child) # removed merged children
# joined all children, now we include joint df on leaves
if k not in leaves: # avoid re-adding parent element
leaves.append(k) # k becomes a new leave
materialized_view = leaves[0].get_payload() # the last leave is folded onto the in-tree root
return materialized_view
def apply_consistent_sample(dfa, dfb, a_key, b_key, sample_size):
# Normalize values
dfa[a_key] = dfa[a_key].apply(lambda x: str(x).lower())
dfb[b_key] = dfb[b_key].apply(lambda x: str(x).lower())
# Chose consistently sample of IDs
a_len = len(set(dfa[a_key]))
b_len = len(set(dfb[b_key]))
if a_len > b_len:
sampling_side = dfa
sampling_key = a_key
else:
sampling_side = dfb
sampling_key = b_key
id_to_hash = dict()
for el in set(sampling_side[sampling_key]): # make sure you don't draw repetitions
h = hash(el)
id_to_hash[h] = el
sorted_hashes = sorted(id_to_hash.items(), key=lambda x: x[1], reverse=True) # reverse or not does not matter
chosen_ids = [id for hash, id in sorted_hashes[:sample_size]]
# Apply selection on both DFs
dfa = dfa[dfa[a_key].isin(chosen_ids)]
dfb = dfb[dfb[b_key].isin(chosen_ids)]
# Remove duplicate keys before returning
dfa = dfa.drop_duplicates(subset=a_key)
dfb = dfb.drop_duplicates(subset=b_key)
dfa.reset_index(drop=True)
dfb.reset_index(drop=True)
return dfa, dfb
def materialize_join_graph_sample(jg, dod, sample_size=100):
print("Materializing:")
pp.pprint(jg)
def build_tree(jg):
# Build in-tree (leaves to root)
intree = dict() # keep reference to all nodes here
leaves = []
hops = jg
while len(hops) > 0:
pending_hops = [] # we use this variable to maintain the temporarily disconnected hops
for l, r in hops:
if len(intree) == 0:
node = InTreeNode(l.source_name)
node_path = dod.aurum_api.helper.get_path_nid(l.nid) + "/" + l.source_name
df = read_relation_on_copy(node_path)# FIXME FIXME FIXME
# df = get_dataframe(node_path)
node.set_payload(df)
intree[l.source_name] = node
leaves.append(node)
# now either l or r should be in intree
if l.source_name in intree.keys():
rnode = InTreeNode(r.source_name) # create node for r
node_path = dod.aurum_api.helper.get_path_nid(r.nid) + "/" + r.source_name
df = read_relation_on_copy(node_path)# FIXME FIXME FIXME
# df = get_dataframe(node_path)
rnode.set_payload(df)
r_parent = intree[l.source_name]
rnode.add_parent(r_parent) # add ref
# r becomes a leave, and l stops being one
if r_parent in leaves:
leaves.remove(r_parent)
leaves.append(rnode)
intree[r.source_name] = rnode
elif r.source_name in intree.keys():
lnode = InTreeNode(l.source_name) # create node for l
node_path = dod.aurum_api.helper.get_path_nid(l.nid) + "/" + l.source_name
df = read_relation_on_copy(node_path)# FIXME FIXME FIXME
# df = get_dataframe(node_path)
lnode.set_payload(df)
l_parent = intree[r.source_name]
lnode.add_parent(l_parent) # add ref
if l_parent in leaves:
leaves.remove(l_parent)
leaves.append(lnode)
intree[l.source_name] = lnode
else:
# temporarily disjoint hop which we store for subsequent iteration
pending_hops.append((l, r))
hops = pending_hops
return intree, leaves
def find_l_r_key(l_source_name, r_source_name, jg):
for l, r in jg:
if l.source_name == l_source_name and r.source_name == r_source_name:
return l.field_name, r.field_name
elif l.source_name == r_source_name and r.source_name == l_source_name:
return r.field_name, l.field_name
intree, leaves = build_tree(jg)
# find groups of leaves with same common ancestor
suffix_str = '_x'
go_on = True
while go_on:
if len(leaves) == 1 and leaves[0].get_parent() is None:
go_on = False
continue # we have now converged
leave_ancestor = defaultdict(list)
for leave in leaves:
if leave.get_parent() is not None: # never add the parent's parent, which does not exist
leave_ancestor[leave.get_parent()].append(leave)
# pick ancestor and find its join info with each children, then join, then add itself to leaves (remove others)
for k, v in leave_ancestor.items():
for child in v:
l = k.get_payload()
r = child.get_payload()
l_key, r_key = find_l_r_key(k.node, child.node, jg)
# print("L: " + str(k.node) + " - " + str(l_key) + " size: " + str(len(l)))
# print("R: " + str(child.node) + " - " + str(r_key) + " size: " + str(len(r)))
l, r = apply_consistent_sample(l, r, l_key, r_key, sample_size)
# normalize false because I ensure it happens in the apply-consistent-sample function above
df = join_ab_on_key(l, r, l_key, r_key, suffix_str=suffix_str, normalize=True)
# df = join_ab_on_key_optimizer(l, r, l_key, r_key, suffix_str=suffix_str)
# df = join_ab_on_key(l, r, l_key, r_key, suffix_str=suffix_str)
if len(df) == 0:
df = False
if df is False: # happens when join is outlier - (causes run out of memory)
print("FALSE")
return False
suffix_str += '_x'
k.set_payload(df) # update payload
if child in leaves:
leaves.remove(child) # removed merged children
# joined all children, now we include joint df on leaves
if k not in leaves: # avoid re-adding parent element
leaves.append(k) # k becomes a new leave
materialized_view = leaves[0].get_payload() # the last leave is folded onto the in-tree root
return materialized_view
def estimate_join_memory(a: pd.DataFrame, b: pd.DataFrame):
# 1. check each dataframe's size in memory and number of rows
a_bytes = sum(a.memory_usage(deep=True))
b_bytes = sum(b.memory_usage(deep=True))
a_len_rows = len(a)
b_len_rows = len(b)
# 2. estimate size per row from previous
a_row_size = float(a_bytes/a_len_rows)
b_row_size = float(b_bytes/b_len_rows)
# 3. estimate row size of output join (no selections)
o_row_size = a_row_size + b_row_size
# 4. estimate cartesian product size in rows
o_num_rows = len(a) * len(b)
# 5. estimate cartesian product size in bytes
o_size_est = o_num_rows * o_row_size
# 6. check with memory limit and pick plan
if o_size_est > memory_limit_join_processing:
return False
else:
return True
def compute_table_cleanliness_profile(table_df: pd.DataFrame) -> dict:
columns = table_df.columns
# TODO: return a dict with a col profile. perhaps do some aggr at the end to return table-wide stats as well
# unique / total
# num null values
# uniqueness column in the whole dataset -> information
# FIXME: cardinality of the join - this is specific to a pair and not the invidivual
#
return columns
if __name__ == "__main__":
# JOIN
# a = pd.read_csv("/Users/ra-mit/data/mitdwhdata/Drupal_employee_directory.csv", encoding='latin1', sep=data_separator)
# b = pd.read_csv("/Users/ra-mit/data/mitdwhdata/Employee_directory.csv", encoding='latin1', sep=data_separator)
#
# a_key = 'Mit Id'
# b_key = 'Mit Id'
# joined = join_ab_on_key(a, b, a_key, b_key)
# JOIN causes trouble
# a = pd.read_csv("/Users/ra-mit/data/mitdwhdata/Fclt_building_list.csv", encoding='latin1', sep=data_separator)
# b = pd.read_csv("/Users/ra-mit/data/mitdwhdata/Fclt_building.csv", encoding='latin1', sep=data_separator)
# a_key = 'Building Sort'
# b_key = 'Building Number'
# joined = join_ab_on_key(a, b, a_key, b_key)
# joined = join_ab_on_key_optimizer(a, b, a_key, b_key)
# exit()
# Find KEY
# path = "/Users/ra-mit/data/mitdwhdata/Warehouse_users.csv"
# attribute_name = 'Unit Name'
# attribute_value = 'Mechanical Engineering'
# key_attribute = 'Krb Name Uppercase'
#
# keys = find_key_for(path, key_attribute, attribute_name, attribute_value)
#
# print(str(keys))
# Find and remove nan values
a = pd.read_csv("/Users/ra-mit/data/mitdwhdata/Fclt_organization.csv", encoding='latin1')
a_key = 'Organization Number'
a[a_key] = a[a_key].apply(lambda x: str(x).lower())
print("Original size: " + str(len(a)))
a.dropna(subset=[a_key], inplace=True)
print("After dropna: " + str(len(a)))
# pp.pprint(a[a_key])
# print(type(a[a_key].loc[4]))
a_null_indices = [i for i, el in enumerate(a[a_key]) if el == 'null' or el == 'nan' or el is pd.NaT]
a.drop(a_null_indices, inplace=True)
print("after individual indices: " + str(len(a)))
| mit | 39dd7b06a4eb4c842338824fec99a3fb | 37.47619 | 133 | 0.581294 | 3.399856 | false | false | false | false |
pydata/conf_site | conf_site/reviews/migrations/0001_initial_squashed_0004_proposalnotification.py | 1 | 3755 | # Generated by Django 2.2.9 on 2020-01-19 05:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
replaces = [('reviews', '0001_initial'), ('reviews', '0002_proposalfeedback_proposalresult'), ('reviews', '0003_proposalfeedback_proposal'), ('reviews', '0004_proposalnotification')]
dependencies = [
('proposals', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ProposalVote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('score', models.SmallIntegerField(blank=True, choices=[(3, '+1 — Good proposal and I will argue for it to be accepted.'), (1, '+0 — OK proposal, but I will not argue for it to be accepted.'), (-1, '−0 — Weak proposal, but I will not argue against acceptance.'), (-3, '−1 — Serious issues and I will argue to reject this proposal.')])),
('comment', models.TextField(blank=True)),
('comment_html', models.TextField(blank=True, editable=False)),
('date_created', models.DateTimeField(auto_now_add=True)),
('date_modified', models.DateTimeField(auto_now=True)),
('proposal', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='review_votes', to='proposals.Proposal')),
('voter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='review_votes', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ProposalResult',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(choices=[('U', 'Undecided'), ('A', 'Accepted'), ('R', 'Rejected'), ('S', 'Standby')], default='U', max_length=1)),
('proposal', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='review_result', to='proposals.Proposal')),
],
),
migrations.CreateModel(
name='ProposalFeedback',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.TextField(blank=True)),
('comment_html', models.TextField(blank=True, editable=False)),
('date_created', models.DateTimeField(auto_now_add=True)),
('date_modified', models.DateTimeField(auto_now=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='review_feedback', to=settings.AUTH_USER_MODEL)),
('proposal', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='review_feedback', to='proposals.Proposal')),
],
),
migrations.CreateModel(
name='ProposalNotification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('from_address', models.EmailField(max_length=254)),
('subject', models.CharField(max_length=254)),
('body', models.TextField()),
('date_sent', models.DateTimeField(auto_now_add=True, verbose_name='Date this notification was created and sent')),
('proposals', models.ManyToManyField(blank=True, related_name='review_notifications', to='proposals.Proposal')),
],
),
]
| mit | e7aa60b17d9b0daafe0044bab8499f4a | 59.370968 | 352 | 0.612343 | 4.09071 | false | false | false | false |
pydata/conf_site | conf_site/api/serializers.py | 1 | 1713 | from rest_framework import serializers
from rest_framework.reverse import reverse_lazy
from symposion.conference.models import Conference
from symposion.speakers.models import Speaker
from symposion.schedule.models import Presentation, Slot
class SpeakerSerializer(serializers.ModelSerializer):
username = serializers.StringRelatedField(source='user')
absolute_url = serializers.SerializerMethodField()
def get_absolute_url(self, obj):
return reverse_lazy(
'speaker_profile',
kwargs={"pk": obj.pk, "slug": obj.slug},
request=self.context['request'],
)
class Meta:
model = Speaker
fields = (
'username',
'name',
'email',
'absolute_url',
)
class ConferenceSerializer(serializers.ModelSerializer):
class Meta:
model = Conference
exclude = ('id',)
class SlotSerializer(serializers.ModelSerializer):
day = serializers.StringRelatedField()
kind = serializers.StringRelatedField()
class Meta:
model = Slot
exclude = ('id', 'content_override', 'content_override_html')
class PresentationSerializer(serializers.ModelSerializer):
speaker = SpeakerSerializer()
slot = SlotSerializer()
section = serializers.StringRelatedField()
absolute_url = serializers.SerializerMethodField()
def get_absolute_url(self, obj):
return reverse_lazy(
'schedule_presentation_detail',
args=[obj.pk, obj.slug],
request=self.context['request'],
)
class Meta:
model = Presentation
exclude = ('id', 'description_html', 'abstract_html', 'proposal_base')
| mit | 4d85fb2b9c045efc070521dc09920bd7 | 27.081967 | 78 | 0.652072 | 4.381074 | false | false | false | false |
pydata/conf_site | symposion/speakers/migrations/0001_initial.py | 1 | 2144 | # Generated by Django 2.0.13 on 2019-02-17 18:43
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Speaker',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='As you would like it to appear in the conference program.', max_length=100, verbose_name='Name')),
('biography', models.TextField(blank=True, help_text="A little bit about you. Edit using <a href='http://warpedvisions.org/projects/markdown-cheat-sheet/' target='_blank'>Markdown</a>.", verbose_name='Biography')),
('biography_html', models.TextField(blank=True)),
('photo', models.ImageField(blank=True, help_text='Maximum file size: 10 MB', upload_to='speaker_photos', verbose_name='Photo')),
('twitter_username', models.CharField(blank=True, help_text='Your Twitter account', max_length=15)),
('annotation', models.TextField(blank=True, verbose_name='Annotation')),
('invite_email', models.CharField(blank=True, db_index=True, default='', max_length=200, verbose_name='Invite_email')),
('invite_token', models.CharField(blank=True, db_index=True, max_length=40, verbose_name='Invite token')),
('created', models.DateTimeField(default=django.utils.timezone.now, editable=False, verbose_name='Created')),
('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='speaker_profile', to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
options={
'verbose_name': 'Speaker',
'ordering': ['name'],
'verbose_name_plural': 'Speakers',
},
),
]
| mit | 04a00a652428397bc59b2e9c0843e666 | 53.974359 | 231 | 0.630597 | 4.068311 | false | false | false | false |
pydata/conf_site | conf_site/reviews/tasks.py | 1 | 1799 | import csv
from django.contrib.auth.models import Group, User
from allauth.account.models import EmailAddress
from allauth.utils import generate_unique_username
def import_reviewer_csv(filename):
num_users_created = 0
num_existing_users = 0
with open(filename, "r", encoding="UTF-8-SIG") as csvfile:
csvreader = csv.reader(csvfile)
# Skip header row.
next(csvreader)
for row in csvreader:
try:
first_name = row[1].strip()
last_name = row[2].strip()
email = row[3].lower().strip()
except IndexError:
return False
# Get an existing user account with the same email address.
# If none exists, create a new user account with a username
# generated from the first name or email address.
if first_name:
username = first_name
else:
username = email
user, user_created = User.objects.get_or_create(
email=email,
defaults={"username": generate_unique_username([username])},
)
if user_created:
num_users_created += 1
EmailAddress.objects.create(user=user, email=email)
else:
num_existing_users += 1
if not user.first_name:
user.first_name = first_name
if not user.last_name:
user.last_name = last_name
# Make sure user has reviewing permissions by adding
# them to the right Group.
reviewers_group = Group.objects.get(name="Reviewers")
user.groups.add(reviewers_group)
user.save()
return (num_users_created, num_existing_users)
| mit | c4545e41731d8019f423e6201d0d4627 | 32.943396 | 76 | 0.559755 | 4.431034 | false | false | false | false |
pydata/conf_site | conf_site/urls.py | 1 | 2735 | from django.conf import settings
from django.conf.urls import include, re_path
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.admin.views.decorators import staff_member_required
from django.urls import path
from django.views.generic.base import TemplateView
try:
import debug_toolbar
except ImportError:
pass
import symposion.views
from conf_site.core.views import csrf_failure, TimeZoneChangeView
from conf_site.schedule.views import (
ExportPresentationSpeakerView,
PresentationDetailView,
PresentationRedirectView,
)
from conf_site.speakers.views import (
ExportAcceptedSpeakerEmailView,
SpeakerDetailView,
SpeakerListView,
SpeakerRedirectView,
)
WIKI_SLUG = r"(([\w-]{2,})(/[\w-]{2,})*)"
if settings.DEBUG:
urlpatterns = [
re_path(r"^__debug__/", include(debug_toolbar.urls)),
]
else:
urlpatterns = []
urlpatterns += [
re_path(r"^admin/", admin.site.urls),
re_path(r"^accounts/", include("allauth.urls")),
re_path(r"^api/", include("conf_site.api.urls")),
re_path(r"^dashboard/", symposion.views.dashboard, name="dashboard"),
re_path(
r"^speaker/export/$",
staff_member_required(ExportAcceptedSpeakerEmailView.as_view()),
name="speaker_email_export",
),
re_path(
r"^speaker/list/$", SpeakerListView.as_view(), name="speaker_list"
),
path(
"speaker/profile/<int:pk>/",
SpeakerRedirectView.as_view(),
name="speaker_profile_redirect",
),
path(
"speaker/profile/<int:pk>/<slug:slug>/",
SpeakerDetailView.as_view(),
name="speaker_profile",
),
re_path(r"^speaker/", include("symposion.speakers.urls")),
re_path(r"^proposals/", include("conf_site.proposals.urls")),
re_path(r"^reviews/", include("conf_site.reviews.urls")),
path(
"schedule/presentation/<int:pk>/",
PresentationRedirectView.as_view(),
name="schedule_presentation_redirect",
),
path(
"schedule/presentation/<int:pk>/<slug:slug>/",
PresentationDetailView.as_view(),
name="schedule_presentation_detail",
),
re_path(
r"^schedule/presentation/export/$",
staff_member_required(ExportPresentationSpeakerView.as_view()),
name="presentation_speaker_export",
),
re_path(r"^schedule/", include("symposion.schedule.urls")),
re_path(
r"^time-zone/$", TimeZoneChangeView.as_view(), name="time_zone_change"
),
re_path(r"^403-csrf/", csrf_failure, name="403-csrf"),
re_path(r"^413/", TemplateView.as_view(template_name="413.html")),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| mit | 86109dd6bb3c705dbbb149815ad6bfc4 | 30.436782 | 78 | 0.660695 | 3.542746 | false | false | true | false |
pydata/conf_site | ansible/roles/web/templates/sensitive-settings.py | 1 | 3263 | # Passwords, API keys, and other sensitive information.
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from conf_site.settings.base import INSTALLED_APPS
DATABASES_DEFAULT = {
"ENGINE": "django.db.backends.postgresql",
"NAME": "{{ django_database }}",
"USER": "{{ database_user }}",
"PASSWORD": "{{ database_password }}",
"HOST": "{{ database_host }}",
"PORT": "",
}
SECRET_KEY = "{{ django_secret_key }}"
SESSION_COOKIE_PATH = "{{ subdirectory }}" or "/"
DATE_FORMAT = "{{ date_format }}"
TIME_FORMAT= "{{ time_format }}"
TIME_ZONE = "{{ timezone }}"
DATETIME_FORMAT= "{{ date_format }} {{ time_format }}"
DEFAULT_FROM_EMAIL = "{{ default_email }}"
SERVER_EMAIL = "{{ default_email }}"
EMAIL_USE_TLS = True
EMAIL_HOST = '{{ email_host_name }}'
EMAIL_HOST_USER = '{{ email_host_user }}'
EMAIL_HOST_PASSWORD = '{{ email_host_password }}'
EMAIL_PORT = '587'
# Determine which email backend to use. Note that previous variables
# are only relevant to the SMTP backend.
{% if postmark_api_token and environment_type != "development" %}
EMAIL_BACKEND = "anymail.backends.postmark.EmailBackend"
ANYMAIL = {
"POSTMARK_SEND_DEFAULTS": {
"esp_extra": {"MessageStream": "{{ conference_identifier }}"},
},
"POSTMARK_SERVER_TOKEN": "{{ postmark_api_token }}",
}
{% elif environment_type != "development" %}
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
{% else %}
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
{% endif %}
ALLOWED_HOSTS = ['*']
USE_X_FORWARDED_HOST = {% if subdirectory %}True{% else %}False{% endif %}
WEBSITE_URL = "{{ website_url }}"
FAVICON_URL = "{{ favicon_url }}"
LOGIN_URL = "{{ website_url }}/accounts/login/"
LOGO_URL = "{{ logo_url }}"
MEDIA_URL = "{{ website_url }}/media/"
STATIC_URL = "{{ website_url }}/static/"
SENTRY_PUBLIC_DSN = (
"https://{{ sentry_public_key }}@sentry.io/{{ sentry_project_id }}"
)
{% if environment_type != "development" %}
sentry_sdk.init(
dsn=SENTRY_PUBLIC_DSN,
environment="{{ environment_type }}",
integrations=[DjangoIntegration(), RedisIntegration()],
release="{{ git_status.stdout }}",
server_name="{{ conference_identifier }}",
traces_sample_rate=0.1,
)
{% endif %}
GOOGLE_ANALYTICS_PROPERTY_ID = "{{ google_analytics_id }}"
{% if github_oauth_client_id is defined %}
INSTALLED_APPS = INSTALLED_APPS + ["allauth.socialaccount.providers.github"]
{% endif %}
{% if google_oauth_client_id is defined %}
INSTALLED_APPS = INSTALLED_APPS + ["allauth.socialaccount.providers.google"]
{% endif %}
SOCIALACCOUNT_PROVIDERS = {
{% if github_oauth_client_id is defined %}"github": {
"APP": {
"client_id": "{{ github_oauth_client_id }}",
"secret": "{{ github_oauth_client_secret }}",
}
},{% endif %}
{% if google_oauth_client_id is defined %}"google": {
"APP": {
"client_id": "{{ google_oauth_client_id }}",
"secret": "{{ google_oauth_client_secret }}",
},
"SCOPE": [
"profile",
"email",
],
"AUTH_PARAMS": {
"access_type": "online",
}
},{% endif %}
}
| mit | fe9d8c2a4a6c117e75d5e096f7eff24e | 30.679612 | 76 | 0.619369 | 3.501073 | false | false | true | false |
pydata/conf_site | symposion/proposals/urls.py | 1 | 1453 | from django.conf.urls import re_path
from .views import (
proposal_submit,
proposal_submit_kind,
proposal_detail,
proposal_edit,
proposal_speaker_manage,
proposal_cancel,
proposal_pending_join,
proposal_pending_decline,
document_create,
document_delete,
document_download,
)
urlpatterns = [
re_path(r"^submit/$", proposal_submit, name="proposal_submit"),
re_path(
r"^submit/([\w\-]+)/$",
proposal_submit_kind,
name="proposal_submit_kind",
),
re_path(r"^(\d+)/$", proposal_detail, name="proposal_detail"),
re_path(r"^(\d+)/edit/$", proposal_edit, name="proposal_edit"),
re_path(
r"^(\d+)/speakers/$",
proposal_speaker_manage,
name="proposal_speaker_manage",
),
re_path(r"^(\d+)/cancel/$", proposal_cancel, name="proposal_cancel"),
re_path(
r"^(\d+)/join/$", proposal_pending_join, name="proposal_pending_join"
),
re_path(
r"^(\d+)/decline/$",
proposal_pending_decline,
name="proposal_pending_decline",
),
re_path(
r"^(\d+)/document/create/$",
document_create,
name="proposal_document_create",
),
re_path(
r"^document/(\d+)/delete/$",
document_delete,
name="proposal_document_delete",
),
re_path(
r"^document/(\d+)/([^/]+)$",
document_download,
name="proposal_document_download",
),
]
| mit | 31b99574ea52921eade49d47fab32450 | 25.418182 | 77 | 0.565726 | 3.578818 | false | false | true | false |
pydata/conf_site | conf_site/reviews/models.py | 1 | 5944 | from django.contrib.auth.models import User
from django.core.mail import send_mass_mail
from django.db import models
from django.template import Context, Template
from symposion.markdown_parser import parse
class ProposalVote(models.Model):
"""
Model to track votes made on a proposal.
This model replaces symposion.reviews.models.Comment,
symposion.reviews.models.Review, and symposion.reviews.models.LatestVote.
Fields:
proposal - The proposal associated with this vote.
voter - The user that made the vote.
score - The user's vote, represented as two characters.
comment - The Markdown-formatted text of the vote's comment (optional).
comment_html - An autogenerated HTML version of the vote's comment.
date_created - The date/time that the vote was created.
date_modified - The date/time that the vote was last modified.
"""
# These values are based on the historical ones found in
# symposion.reviews.models.score_expression().
# This allows us to use integers directly which makes
# calculating scores easier.
PLUS_ONE = 3
PLUS_ZERO = 1
MINUS_ZERO = -1
MINUS_ONE = -3
SCORES = [
(
PLUS_ONE,
"+1 — Good proposal and I will argue for it to be accepted.",
),
(
PLUS_ZERO,
"+0 — OK proposal, but I will not argue for it to be accepted.",
),
(
MINUS_ZERO,
"−0 — Weak proposal, but I will not argue against acceptance.",
),
(
MINUS_ONE,
"−1 — Serious issues and I will argue to reject this proposal.",
),
]
proposal = models.ForeignKey(
"proposals.Proposal",
on_delete=models.CASCADE,
related_name="review_votes",
)
voter = models.ForeignKey(
User, on_delete=models.CASCADE, related_name="review_votes"
)
score = models.SmallIntegerField(blank=True, choices=SCORES)
comment = models.TextField(blank=True)
comment_html = models.TextField(blank=True, editable=False)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
def save(self, *args, **kwargs):
self.comment_html = parse(self.comment)
return super(ProposalVote, self).save(*args, **kwargs)
def get_numeric_score_display(self):
"""Returns numeric value at beginning of score display string."""
return self.get_score_display()[0:2].strip()
def proposalvote_score_cache_key(proposal, voter):
"""
Return the cache key for a ProposalVote's score
based on the proposal and voting user.
"""
return "proposalvote_{}_{}_score".format(proposal.pk, voter.pk)
class ProposalFeedback(models.Model):
proposal = models.ForeignKey(
"proposals.Proposal",
on_delete=models.CASCADE,
related_name="review_feedback",
)
author = models.ForeignKey(
User, on_delete=models.CASCADE, related_name="review_feedback"
)
comment = models.TextField(blank=True)
comment_html = models.TextField(blank=True, editable=False)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
def save(self, *args, **kwargs):
self.comment_html = parse(self.comment)
return super(ProposalFeedback, self).save(*args, **kwargs)
class ProposalResult(models.Model):
"""
Model to track whether a proposal's acceptance and notification status.
This model replaces symposion.reviews.models.ProposalResult and
symposion.reviews.models.ResultNotification.
"""
RESULT_ACCEPTED = "A"
RESULT_REJECTED = "R"
RESULT_STANDBY = "S"
RESULT_UNDECIDED = "U"
RESULT_STATUSES = [
(RESULT_UNDECIDED, "Undecided"),
(RESULT_ACCEPTED, "Accepted"),
(RESULT_REJECTED, "Rejected"),
(RESULT_STANDBY, "Standby"),
]
proposal = models.OneToOneField(
"proposals.Proposal",
on_delete=models.CASCADE,
related_name="review_result",
)
status = models.CharField(
choices=RESULT_STATUSES, default=RESULT_UNDECIDED, max_length=1
)
class ProposalNotification(models.Model):
"""Model to track notifications sent to proposal speakers."""
from_address = models.EmailField()
subject = models.CharField(max_length=254)
body = models.TextField()
proposals = models.ManyToManyField(
"proposals.Proposal",
blank=True,
related_name="review_notifications",
)
date_sent = models.DateTimeField(
verbose_name="Date this notification was created and sent",
auto_now_add=True,
)
def __str__(self):
return "{}".format(self.subject)
def send_email(self):
"""Returns a list of speakers without email addresses."""
email_messages = []
unemailed = []
# Create a message for each email address.
# This is necessary because we are not using BCC.
for proposal in self.proposals.all():
# In order to support the "variable substitution"
# supported by the previous reviews system, the
# message needs to be templated anew for each
# proposal.
message_body = Template(self.body).render(
Context({"proposal": proposal.notification_email_context()})
)
for speaker in proposal.speakers():
if speaker.email:
datamessage_tuple = (
self.subject,
message_body,
self.from_address,
[speaker.email],
)
email_messages.append(datamessage_tuple)
else:
unemailed.append(speaker)
send_mass_mail(email_messages)
return unemailed
| mit | bd55c088ffcd67bff75998499b7e107a | 32.325843 | 77 | 0.626433 | 4.043626 | false | false | false | false |
pyspeckit/pyspeckit | examples/ammonia_vtau_fit_example.py | 8 | 3778 | from __future__ import print_function
import pyspeckit
# Grab a .fits spectrum with a legitimate header
sp = pyspeckit.Spectrum('G031.947+00.076_nh3_11_Tastar.fits')
""" HEADER:
SIMPLE = T / Written by IDL: Tue Aug 31 18:17:01 2010
BITPIX = -64
NAXIS = 1 / number of array dimensions
NAXIS1 = 8192 /Number of positions along axis 1
CDELT1 = -0.077230503
CRPIX1 = 4096.0000
CRVAL1 = 68.365635
CTYPE1 = 'VRAD'
CUNIT1 = 'km/s '
SPECSYS = 'LSRK'
RESTFRQ = 2.3694500e+10
VELOSYS = -43755.930
CDELT1F = 6103.5156
CRPIX1F = 4096.0000
CRVAL1F = 2.3692555e+10
CTYPE1F = 'FREQ'
CUNIT1F = 'Hz'
SPECSYSF= 'LSRK'
RESTFRQF= 2.3694500e+10
VELOSYSF= -43755.930
VDEF = 'RADI-LSR'
SRCVEL = 70.000000
ZSOURCE = 0.00023349487
BUNIT = 'K '
OBJECT = 'G031.947+00.076'
TELESCOP= 'GBT'
TSYS = 42.1655
ELEV = 34.904846
AIRMASS = 1.7475941
LINE = 'nh3_11'
FREQ = 23.692555
TARGLON = 31.947236
TARGLAT = 0.076291610
MJD-AVG = 54548.620
CONTINUU= 0.0477613
CONTERR = 0.226990
SMTHOFF = 0
COMMENT 1 blank line
END
"""
# Start by computing the error using a reasonably signal-free region
sp.error[:] = sp.stats((-100, 50))['std']
# Change the plot range to be a reasonable physical coverage (the default is to
# plot the whole 8192 channel spectrum)
sp.plotter(xmin=-100,xmax=300)
# There are many extra channels, so let's smooth. Default is a Gaussian
# smooth. Downsampling helps speed up the fitting (assuming the line is still
# Nyquist sampled, which it is)
sp.smooth(2)
# replot after smoothing
sp.plotter(xmin=-100,xmax=300)
# First, fit a gaussian to the whole spectrum as a "first guess" (good at
# picking up the centroid, bad at getting the width right)
# negamp=False forces the fitter to search for a positive peak, not the
# negatives created in this spectrum by frequency switching
sp.specfit.selectregion(xmin=60,xmax=120,xtype='wcs')
sp.specfit(negamp=False, guesses='moments')
# Save the fit...
sp.plotter.figure.savefig('nh3_gaussfit.png')
# and print some information to screen
print("Guesses: ", sp.specfit.guesses)
print("Best fit: ", sp.specfit.modelpars)
# Run the ammonia spec fitter with a reasonable guess
# Since we only have a single line (1-1), the kinetic temperature is
# unconstrained: we'll fix it at 7 K. Similarly, the ortho fraction
# is fixed to 0.5
T=True; F=False
sp.specfit(fittype='ammonia_tau',
guesses=[7,4.45,4.5,0.84,96.2,0.43],
fixed=[T,F,F,F,F,T],
quiet=False)
# plot up the residuals in a different window. The residuals strongly suggest
# the presence of a second velocity component.
sp.specfit.plotresiduals()
sp.plotter.figure.savefig('nh3_ammonia_vtau_fit.png')
print("Guesses: ", sp.specfit.guesses)
print("Best fit: ", sp.specfit.modelpars)
# re-plot zoomed in
sp.plotter(xmin=70,xmax=125)
# replot the fit
sp.specfit.plot_fit()
sp.plotter.figure.savefig('nh3_ammonia_fit_vtau_zoom.png')
# refit with two components
sp.specfit(fittype='ammonia_tau',
guesses=[7,3.5,4.5,0.68,97.3,0.5]+[7,4.2,4.5,0.52,95.8,0.5],
fixed=[T,F,F,F,F,T]*2,
quiet=False)
sp.specfit.plotresiduals()
sp.plotter.figure.savefig('nh3_ammonia_multifit_vtau_zoom.png')
# compare to the 'thin' version
# In the thin version, Tex = Tk by force
sp.specfit.Registry.add_fitter('ammonia_tau_thin',
pyspeckit.spectrum.models.ammonia.ammonia_model_vtau_thin(),
5)
sp.specfit(fittype='ammonia_tau_thin',
guesses=[7,4.5,0.68,97.3,0.5]+[7,4.5,0.52,95.8,0.5],
fixed=[F,F,F,F,T]*2,
quiet=False)
sp.specfit.plotresiduals()
sp.plotter.figure.savefig('nh3_ammonia_multifit_vtau_thin_zoom.png')
| mit | f306009f908693e27dccfc5cab99e498 | 31.290598 | 91 | 0.676019 | 2.605517 | false | false | false | false |
pyspeckit/pyspeckit | pyspeckit/mpfit/mpfitexpr.py | 11 | 2384 | """
Copyright (C) 2009 Sergey Koposov
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import mpfit
import re
import numpy
def mpfitexpr(func, x, y, err , start_params, check=True, full_output=False, **kw):
"""Fit the used defined expression to the data
Input:
- func: string with the function definition
- x: x vector
- y: y vector
- err: vector with the errors of y
- start_params: the starting parameters for the fit
Output:
- The tuple (params, yfit) with best-fit params and the values of func evaluated at x
Keywords:
- check: boolean parameter. If true(default) the function will be checked for sanity
- full_output: boolean parameter. If True(default is False) then instead of best-fit parameters the mpfit object is returned
Example:
params,yfit=mpfitexpr('p[0]+p[2]*(x-p[1])',x,y,err,[0,10,1])
If you need to use numpy functions in your function, then
you must to use the full names of these functions, e.g.:
numpy.sin, numpy.cos etc.
This function is motivated by mpfitexpr() from wonderful MPFIT IDL package
written by Craig Markwardt
"""
def myfunc(p,fjac=None,x=None, y=None, err=None):
return [0, eval('(y-(%s))/err'%func)]
myre = "[^a-zA-Z]p\[(\d+)\]"
r = re.compile(myre)
maxp = -1
for m in re.finditer(r,func):
curp = int(m.group(1))
maxp = curp if curp > maxp else maxp
if check:
if maxp == -1:
raise Exception("wrong function format")
if maxp + 1 != len(start_params):
raise Exception("the length of the start_params != the length of the parameter verctor of the function")
fa={'x' : x, 'y' : y,'err' : err}
res = mpfit.mpfit(myfunc,start_params,functkw=fa,**kw)
yfit = eval(func, globals(), {'x':x, 'p': res.params})
if full_output:
return (res, yfit)
else:
return (res.params, yfit)
| mit | 6140dd853ba3ac4e57e1060efd5a0a1e | 34.58209 | 125 | 0.698826 | 3.191432 | false | false | false | false |
pyspeckit/pyspeckit | docs/sphinxext/flickr.py | 11 | 2354 | from docutils import nodes
from docutils.parsers.rst import directives
CODE = """\
<object width="%(width)i" height="%(height)i">
<param name="flashvars" value="offsite=true&lang=en-us&page_show_url=%2Fphotos%2F75341362%40N04%2Fsets%2F72157629059614751%2Fshow%2F&page_show_back_url=%2Fphotos%2F75341362%40N04%2Fsets%2F72157629059614751%2F&set_id=72157629059614751&jump_to="> </param>
<param name="movie" value="http://www.flickr.com/apps/slideshow/show.swf?v=%(flickid)s"> </param>
<param name="allowFullScreen" value="true"></param>
<embed type="application/x-shockwave-flash" src="http://www.flickr.com/apps/slideshow/show.swf?v=%(flickid)s" allowFullScreen="true" flashvars="offsite=true&lang=en-us&page_show_url=%2Fphotos%2F75341362%40N04%2Fsets%2F72157629059614751%2Fshow%2F&page_show_back_url=%2Fphotos%2F75341362%40N04%2Fsets%2F72157629059614751%2F&set_id=72157629059614751&jump_to=" width="%(width)i" height="%(height)i"></embed>
</object>
"""
PARAM = """\n <param name="%s" value="%s"></param>"""
def flickr(name, args, options, content, lineno,
contentOffset, blockText, state, stateMachine):
""" Restructured text extension for inserting flickr embedded slideshows """
if len(content) == 0:
return
string_vars = {
'flickid': content[0],
'width': 400,
'height': 300,
'extra': ''
}
extra_args = content[1:] # Because content[0] is ID
extra_args = [ea.strip().split("=") for ea in extra_args] # key=value
extra_args = [ea for ea in extra_args if len(ea) == 2] # drop bad lines
extra_args = dict(extra_args)
if 'width' in extra_args:
string_vars['width'] = extra_args.pop('width')
if 'height' in extra_args:
string_vars['height'] = extra_args.pop('height')
if extra_args:
params = [PARAM % (key, extra_args[key]) for key in extra_args]
string_vars['extra'] = "".join(params)
return [nodes.raw('', CODE % (string_vars), format='html')]
flickr.content = True
directives.register_directive('flickr', flickr)
from sphinx.util.compat import Directive
class FlickrDirective(Directive):
def run(self, *args):
return [flickr(*args)]
def setup(app):
app.add_config_value('flickr',False,False)
app.add_config_value('flickrID', None, 'html')
app.add_directive('flickr',FlickrDirective)
| mit | fa448bdcc222bf8ab3539a61658daf1d | 44.269231 | 403 | 0.67842 | 2.895449 | false | false | false | false |
pytest-dev/py | py/_xmlgen.py | 89 | 8364 | """
module for generating and serializing xml and html structures
by using simple python objects.
(c) holger krekel, holger at merlinux eu. 2009
"""
import sys, re
if sys.version_info >= (3,0):
def u(s):
return s
def unicode(x, errors=None):
if hasattr(x, '__unicode__'):
return x.__unicode__()
return str(x)
else:
def u(s):
return unicode(s)
unicode = unicode
class NamespaceMetaclass(type):
def __getattr__(self, name):
if name[:1] == '_':
raise AttributeError(name)
if self == Namespace:
raise ValueError("Namespace class is abstract")
tagspec = self.__tagspec__
if tagspec is not None and name not in tagspec:
raise AttributeError(name)
classattr = {}
if self.__stickyname__:
classattr['xmlname'] = name
cls = type(name, (self.__tagclass__,), classattr)
setattr(self, name, cls)
return cls
class Tag(list):
class Attr(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __init__(self, *args, **kwargs):
super(Tag, self).__init__(args)
self.attr = self.Attr(**kwargs)
def __unicode__(self):
return self.unicode(indent=0)
__str__ = __unicode__
def unicode(self, indent=2):
l = []
SimpleUnicodeVisitor(l.append, indent).visit(self)
return u("").join(l)
def __repr__(self):
name = self.__class__.__name__
return "<%r tag object %d>" % (name, id(self))
Namespace = NamespaceMetaclass('Namespace', (object, ), {
'__tagspec__': None,
'__tagclass__': Tag,
'__stickyname__': False,
})
class HtmlTag(Tag):
def unicode(self, indent=2):
l = []
HtmlVisitor(l.append, indent, shortempty=False).visit(self)
return u("").join(l)
# exported plain html namespace
class html(Namespace):
__tagclass__ = HtmlTag
__stickyname__ = True
__tagspec__ = dict([(x,1) for x in (
'a,abbr,acronym,address,applet,area,article,aside,audio,b,'
'base,basefont,bdi,bdo,big,blink,blockquote,body,br,button,'
'canvas,caption,center,cite,code,col,colgroup,command,comment,'
'datalist,dd,del,details,dfn,dir,div,dl,dt,em,embed,'
'fieldset,figcaption,figure,footer,font,form,frame,frameset,h1,'
'h2,h3,h4,h5,h6,head,header,hgroup,hr,html,i,iframe,img,input,'
'ins,isindex,kbd,keygen,label,legend,li,link,listing,map,mark,'
'marquee,menu,meta,meter,multicol,nav,nobr,noembed,noframes,'
'noscript,object,ol,optgroup,option,output,p,param,pre,progress,'
'q,rp,rt,ruby,s,samp,script,section,select,small,source,span,'
'strike,strong,style,sub,summary,sup,table,tbody,td,textarea,'
'tfoot,th,thead,time,title,tr,track,tt,u,ul,xmp,var,video,wbr'
).split(',') if x])
class Style(object):
def __init__(self, **kw):
for x, y in kw.items():
x = x.replace('_', '-')
setattr(self, x, y)
class raw(object):
"""just a box that can contain a unicode string that will be
included directly in the output"""
def __init__(self, uniobj):
self.uniobj = uniobj
class SimpleUnicodeVisitor(object):
""" recursive visitor to write unicode. """
def __init__(self, write, indent=0, curindent=0, shortempty=True):
self.write = write
self.cache = {}
self.visited = {} # for detection of recursion
self.indent = indent
self.curindent = curindent
self.parents = []
self.shortempty = shortempty # short empty tags or not
def visit(self, node):
""" dispatcher on node's class/bases name. """
cls = node.__class__
try:
visitmethod = self.cache[cls]
except KeyError:
for subclass in cls.__mro__:
visitmethod = getattr(self, subclass.__name__, None)
if visitmethod is not None:
break
else:
visitmethod = self.__object
self.cache[cls] = visitmethod
visitmethod(node)
# the default fallback handler is marked private
# to avoid clashes with the tag name object
def __object(self, obj):
#self.write(obj)
self.write(escape(unicode(obj)))
def raw(self, obj):
self.write(obj.uniobj)
def list(self, obj):
assert id(obj) not in self.visited
self.visited[id(obj)] = 1
for elem in obj:
self.visit(elem)
def Tag(self, tag):
assert id(tag) not in self.visited
try:
tag.parent = self.parents[-1]
except IndexError:
tag.parent = None
self.visited[id(tag)] = 1
tagname = getattr(tag, 'xmlname', tag.__class__.__name__)
if self.curindent and not self._isinline(tagname):
self.write("\n" + u(' ') * self.curindent)
if tag:
self.curindent += self.indent
self.write(u('<%s%s>') % (tagname, self.attributes(tag)))
self.parents.append(tag)
for x in tag:
self.visit(x)
self.parents.pop()
self.write(u('</%s>') % tagname)
self.curindent -= self.indent
else:
nameattr = tagname+self.attributes(tag)
if self._issingleton(tagname):
self.write(u('<%s/>') % (nameattr,))
else:
self.write(u('<%s></%s>') % (nameattr, tagname))
def attributes(self, tag):
# serialize attributes
attrlist = dir(tag.attr)
attrlist.sort()
l = []
for name in attrlist:
res = self.repr_attribute(tag.attr, name)
if res is not None:
l.append(res)
l.extend(self.getstyle(tag))
return u("").join(l)
def repr_attribute(self, attrs, name):
if name[:2] != '__':
value = getattr(attrs, name)
if name.endswith('_'):
name = name[:-1]
if isinstance(value, raw):
insert = value.uniobj
else:
insert = escape(unicode(value))
return ' %s="%s"' % (name, insert)
def getstyle(self, tag):
""" return attribute list suitable for styling. """
try:
styledict = tag.style.__dict__
except AttributeError:
return []
else:
stylelist = [x+': ' + y for x,y in styledict.items()]
return [u(' style="%s"') % u('; ').join(stylelist)]
def _issingleton(self, tagname):
"""can (and will) be overridden in subclasses"""
return self.shortempty
def _isinline(self, tagname):
"""can (and will) be overridden in subclasses"""
return False
class HtmlVisitor(SimpleUnicodeVisitor):
single = dict([(x, 1) for x in
('br,img,area,param,col,hr,meta,link,base,'
'input,frame').split(',')])
inline = dict([(x, 1) for x in
('a abbr acronym b basefont bdo big br cite code dfn em font '
'i img input kbd label q s samp select small span strike '
'strong sub sup textarea tt u var'.split(' '))])
def repr_attribute(self, attrs, name):
if name == 'class_':
value = getattr(attrs, name)
if value is None:
return
return super(HtmlVisitor, self).repr_attribute(attrs, name)
def _issingleton(self, tagname):
return tagname in self.single
def _isinline(self, tagname):
return tagname in self.inline
class _escape:
def __init__(self):
self.escape = {
u('"') : u('"'), u('<') : u('<'), u('>') : u('>'),
u('&') : u('&'), u("'") : u('''),
}
self.charef_rex = re.compile(u("|").join(self.escape.keys()))
def _replacer(self, match):
return self.escape[match.group(0)]
def __call__(self, ustring):
""" xml-escape the given unicode string. """
try:
ustring = unicode(ustring)
except UnicodeDecodeError:
ustring = unicode(ustring, 'utf-8', errors='replace')
return self.charef_rex.sub(self._replacer, ustring)
escape = _escape()
| mit | 6143a1ba7412585727b09d986fe7d5ae | 31.8 | 78 | 0.548541 | 3.769265 | false | false | false | false |
pyspeckit/pyspeckit | examples/hcn_example.py | 8 | 4056 | from __future__ import print_function
import pyspeckit
import pylab as pl
import astropy.units as u
# Load the spectrum & properly identify the units
# The data is from http://adsabs.harvard.edu/abs/1999A%26A...348..600P
sp = pyspeckit.Spectrum('02232_plus_6138.txt')
sp.xarr.set_unit(u.km/u.s)
sp.xarr.refX = 88.63184666e9 * u.Hz
sp.xarr.velocity_convention = 'radio'
sp.xarr.xtype='velocity'
sp.unit='$T_A^*$'
# set the error array based on a signal-free part of the spectrum
sp.error[:] = sp.stats((-35,-25))['std']
# Register the fitter
# The HCN fitter is 'built-in' but is not registered by default; this example
# shows how to register a fitting procedure
# 'multi' indicates that it is possible to fit multiple components and a
# background will not automatically be fit
# 5 is the number of parameters in the model (line center,
# line width, and amplitude for the 0-1, 2-1, and 1-1 lines)
sp.Registry.add_fitter('hcn_varyhf',pyspeckit.models.hcn.hcn_varyhf_amp_fitter,5)
# This one is the same, but with fixed relative ampltidue hyperfine components
sp.Registry.add_fitter('hcn_fixedhf',pyspeckit.models.hcn.hcn_amp,3)
# Plot the results
sp.plotter()
# Run the fixed-ampltiude fitter and show the individual fit components
sp.specfit(fittype='hcn_fixedhf',
multifit=None,
guesses=[1,-48,0.6],
show_hyperfine_components=True)
# Now plot the residuals offset below the original
sp.specfit.plotresiduals(axis=sp.plotter.axis,clear=False,yoffset=-1,color='g',label=False)
sp.plotter.reset_limits(ymin=-2)
# Save the figure (this step is just so that an image can be included on the web page)
sp.plotter.savefig('hcn_fixedhf_fit.png')
# Run the variable-ampltiude fitter and show the individual fit components
# Note the different order of the arguments (velocity, width, then three amplitudes)
sp.specfit(fittype='hcn_varyhf',
multifit=None,
guesses=[-48,1,0.2,0.6,0.3],
show_hyperfine_components=True,
clear=True)
# Again plot the residuals
sp.specfit.plotresiduals(axis=sp.plotter.axis,clear=False,yoffset=-1,color='g',label=False)
sp.plotter.reset_limits(ymin=-2)
# Save the figure
sp.plotter.savefig('hcn_freehf_fit.png')
# now do the same thing, but allow the widths to vary too
# there are 7 parameters:
# 1. the centroid
# 2,3,4 - the amplitudes of the 0-1, 2-1, and 1-1 lines
# 5,6,7 - the widths of the 0-1, 2-1, and 1-1 lines
sp.Registry.add_fitter('hcn_varyhf_width',pyspeckit.models.hcn.hcn_varyhf_amp_width_fitter,7)
# Run the fitter
sp.specfit(fittype='hcn_varyhf_width',
multifit=None,
guesses=[-48,0.2,0.6,0.3,1,1,1],
show_hyperfine_components=True,
clear=True)
# print the fitted parameters:
print(sp.specfit.parinfo)
# Param #0 CENTER0 = -51.865 +/- 0.0525058
# Param #1 AMP10-010 = 1.83238 +/- 0.0773993 Range: [0,inf)
# Param #2 AMP12-010 = 5.26566 +/- 0.0835981 Range: [0,inf)
# Param #3 AMP11-010 = 3.02621 +/- 0.0909095 Range: [0,inf)
# Param #4 WIDTH10-010 = 2.16711 +/- 0.118651 Range: [0,inf)
# Param #5 WIDTH12-010 = 1.90987 +/- 0.0476163 Range: [0,inf)
# Param #6 WIDTH11-010 = 1.64409 +/- 0.076998 Range: [0,inf)
sp.specfit.plotresiduals(axis=sp.plotter.axis,clear=False,yoffset=-1,color='g',label=False)
sp.plotter.reset_limits(ymin=-2)
# Save the figure (this step is just so that an image can be included on the web page)
sp.plotter.savefig('hcn_freehf_ampandwidth_fit.png')
# Finally, how well does a 2-component fit work?
sp.specfit(fittype='hcn_fixedhf',
multifit=None,
guesses=[1,-48,0.6,0.1,-46,0.6],
show_hyperfine_components=True,
clear=True)
sp.specfit.plotresiduals(axis=sp.plotter.axis,clear=False,yoffset=-1,color='g',label=False)
sp.plotter.reset_limits(ymin=-2)
# Save the figure (this step is just so that an image can be included on the web page)
sp.plotter.savefig('hcn_fixedhf_fit_2components.png')
| mit | 57c92bd4697522b7121c8838c2868258 | 39.56 | 93 | 0.685897 | 2.801105 | false | false | false | false |
pyspeckit/pyspeckit | pyspeckit/spectrum/models/n2dp.py | 4 | 8358 | """
===========
N2D+ fitter
===========
Reference for line params:
Dore (priv. comm.) line frequencies in CDMS,
line strength can also be obtained from Splatalogue
L. Dore, P. Caselli, S. Beninati, T. Bourke, P. C. Myers and G. Cazzoli A&A 413, 1177-1181 (2004)
http://adsabs.harvard.edu/abs/2004A%26A...413.1177D
L. Pagani, F. Daniel, and M. L. Dubernet A\%A 494, 719-727 (2009)
DOI: 10.1051/0004-6361:200810570
In this version added N2D+(1-0). Data taken from the articles above.
Module API
^^^^^^^^^^
"""
from . import hyperfine
import astropy.units as u
# line_names = ['J1-0', 'J2-1', 'J3-2',]
# line_names = ['J2-1', 'J3-2',]
freq_dict_cen ={
'J1-0': 77109.6162e6,
'J2-1': 154217.1805e6,
'J3-2': 231321.9119e6,
}
voff_lines_dict={
####### J 1-0
'J1-0_01': -9.6730,
'J1-0_02': -9.6730,
'J1-0_03': -9.6730,
'J1-0_04': -0.7537,
'J1-0_05': -0.7537,
'J1-0_06': -0.7537,
'J1-0_07': 0.0000,
'J1-0_08': 1.1314,
'J1-0_09': 1.1314,
'J1-0_10': 6.6519,
'J1-0_11': 6.6519,
'J1-0_12': 6.6519,
'J1-0_13': 7.1766,
'J1-0_14': 7.1766,
'J1-0_15': 8.3080,
####### J 2-1
'J2-1_01': -5.6031,
'J2-1_02': -5.5332,
'J2-1_03': -5.3617,
'J2-1_04': -5.0993,
'J2-1_05': -4.9677,
'J2-1_06': -4.7052,
'J2-1_07': -3.8195,
'J2-1_08': -3.5571,
'J2-1_09': -2.8342,
'J2-1_10': -2.3388,
'J2-1_11': -1.9449,
'J2-1_12': -1.9002,
'J2-1_13': -1.7733,
'J2-1_14': -1.3965,
'J2-1_15': -1.0025,
'J2-1_16': -0.7968,
'J2-1_17': -0.5740,
'J2-1_18': -0.2311,
'J2-1_19': -0.0085,
'J2-1_20': 0.0000,
'J2-1_21': 0.1351,
'J2-1_22': 0.1457,
'J2-1_23': 0.1886,
'J2-1_24': 0.2538,
'J2-1_25': 0.6165,
'J2-1_26': 0.7541,
'J2-1_27': 0.8789,
'J2-1_28': 2.5594,
'J2-1_29': 3.0143,
'J2-1_30': 3.0632,
'J2-1_31': 3.1579,
'J2-1_32': 3.4572,
'J2-1_33': 3.6394,
'J2-1_34': 3.7234,
'J2-1_35': 3.9567,
'J2-1_36': 4.2049,
'J2-1_37': 4.5817,
'J2-1_38': 4.6054,
'J2-1_39': 8.4164,
'J2-1_40': 9.0414,
####### J 3-2
'J3-2_01': -3.7164,
'J3-2_02': -3.5339,
'J3-2_03': -3.2997,
'J3-2_04': -3.2130,
'J3-2_05': -3.0633,
'J3-2_06': -2.8958,
'J3-2_07': -2.7424,
'J3-2_08': -2.6466,
'J3-2_09': -2.5748,
'J3-2_10': -1.9177,
'J3-2_11': -1.2333,
'J3-2_02': -0.7628,
'J3-2_13': -0.7590,
'J3-2_14': -0.7306,
'J3-2_15': -0.5953,
'J3-2_16': -0.5765,
'J3-2_17': -0.3419,
'J3-2_18': -0.0925,
'J3-2_19': -0.0210,
'J3-2_20': 0.0000,
'J3-2_21': 0.0065,
'J3-2_22': 0.0616,
'J3-2_23': 0.0618,
'J3-2_24': 0.0675,
'J3-2_25': 0.0748,
'J3-2_26': 0.2212,
'J3-2_27': 0.2691,
'J3-2_28': 0.4515,
'J3-2_29': 0.5422,
'J3-2_30': 0.5647,
'J3-2_31': 0.6050,
'J3-2_32': 0.6596,
'J3-2_33': 0.9222,
'J3-2_34': 1.0897,
'J3-2_35': 1.9586,
'J3-2_36': 2.0471,
'J3-2_37': 2.5218,
'J3-2_38': 2.5500,
'J3-2_39': 2.6156,
'J3-2_40': 3.0245,
'J3-2_41': 3.1786,
'J3-2_42': 3.3810,
'J3-2_43': 3.6436,
'J3-2_44': 4.2066,
}
line_strength_dict = {
####### J 1-0
'J1-0_01': 0.026018,
'J1-0_02': 0.065408,
'J1-0_03': 0.019683,
'J1-0_04': 0.004390,
'J1-0_05': 0.035006,
'J1-0_06': 0.071714,
'J1-0_07': 0.259259,
'J1-0_08': 0.156212,
'J1-0_09': 0.028973,
'J1-0_10': 0.041311,
'J1-0_11': 0.013379,
'J1-0_12': 0.056422,
'J1-0_13': 0.156214,
'J1-0_14': 0.028973,
'J1-0_15': 0.037038,
####### J 2-1
'J2-1_01': 0.008262,
'J2-1_02': 0.005907,
'J2-1_03': 0.031334,
'J2-1_04': 0.013833,
'J2-1_05': 0.013341,
'J2-1_06': 0.010384,
'J2-1_07': 0.000213,
'J2-1_08': 0.000675,
'J2-1_09': 0.000150,
'J2-1_10': 0.001202,
'J2-1_11': 0.000963,
'J2-1_12': 0.000878,
'J2-1_13': 0.002533,
'J2-1_14': 0.000362,
'J2-1_15': 0.000162,
'J2-1_16': 0.021268,
'J2-1_17': 0.031130,
'J2-1_18': 0.000578,
'J2-1_19': 0.001008,
'J2-1_20': 0.200000,
'J2-1_21': 0.111666,
'J2-1_22': 0.088138,
'J2-1_23': 0.142511,
'J2-1_24': 0.011550,
'J2-1_25': 0.027472,
'J2-1_26': 0.012894,
'J2-1_27': 0.066406,
'J2-1_28': 0.013082,
'J2-1_29': 0.003207,
'J2-1_30': 0.061847,
'J2-1_31': 0.004932,
'J2-1_32': 0.035910,
'J2-1_33': 0.011102,
'J2-1_34': 0.038958,
'J2-1_35': 0.019743,
'J2-1_36': 0.004297,
'J2-1_37': 0.001830,
'J2-1_38': 0.000240,
'J2-1_39': 0.000029,
'J2-1_40': 0.000004,
####### J 3-2
'J3-2_01': 0.001842,
'J3-2_02': 0.001819,
'J3-2_03': 0.003544,
'J3-2_04': 0.014100,
'J3-2_05': 0.011404,
'J3-2_06': 0.000088,
'J3-2_07': 0.002201,
'J3-2_08': 0.002153,
'J3-2_09': 0.000059,
'J3-2_10': 0.000058,
'J3-2_11': 0.000203,
'J3-2_12': 0.000259,
'J3-2_13': 0.000248,
'J3-2_14': 0.000437,
'J3-2_15': 0.010215,
'J3-2_16': 0.000073,
'J3-2_17': 0.007445,
'J3-2_18': 0.000155,
'J3-2_19': 0.000272,
'J3-2_20': 0.174603,
'J3-2_21': 0.018678,
'J3-2_22': 0.100524,
'J3-2_23': 0.135563,
'J3-2_24': 0.124910,
'J3-2_25': 0.060970,
'J3-2_26': 0.088513,
'J3-2_27': 0.001085,
'J3-2_28': 0.094480,
'J3-2_29': 0.013955,
'J3-2_30': 0.007236,
'J3-2_31': 0.022222,
'J3-2_32': 0.047921,
'J3-2_33': 0.015427,
'J3-2_34': 0.000070,
'J3-2_35': 0.000796,
'J3-2_36': 0.001373,
'J3-2_37': 0.007147,
'J3-2_38': 0.016574,
'J3-2_39': 0.009776,
'J3-2_40': 0.000995,
'J3-2_41': 0.000491,
'J3-2_42': 0.000067,
'J3-2_43': 0.000039,
'J3-2_44': 0.000010,
}
# freq_dict = {
# 'J2-1': (voff_lines_dict['J2-1']*u.km/u.s).to(u.GHz, equivalencies=u.doppler_radio(freq_dict_cen['J2-1']*u.Hz)).value,
# 'J3-2': (voff_lines_dict['J3-2']*u.km/u.s).to(u.GHz, equivalencies=u.doppler_radio(freq_dict_cen['J3-2']*u.Hz)).value,
# }
# Get frequency dictionary in Hz based on the offset velocity and rest frequency
conv_J10=u.doppler_radio(freq_dict_cen['J1-0']*u.Hz)
conv_J21=u.doppler_radio(freq_dict_cen['J2-1']*u.Hz)
conv_J32=u.doppler_radio(freq_dict_cen['J3-2']*u.Hz)
freq_dict = {
name: ((voff_lines_dict[name]*u.km/u.s).to(u.Hz, equivalencies=conv_J10).value) for name in voff_lines_dict.keys() if "J1-0" in name
}
freq_dict.update({
name: ((voff_lines_dict[name]*u.km/u.s).to(u.Hz, equivalencies=conv_J21).value) for name in voff_lines_dict.keys() if "J2-1" in name
})
freq_dict.update({
name: ((voff_lines_dict[name]*u.km/u.s).to(u.Hz, equivalencies=conv_J32).value) for name in voff_lines_dict.keys() if "J3-2" in name
})
# I don't know yet how to use this parameter... in CLASS it does not exist
# Note to Jaime: this is the sum of the degeneracy values for all hyperfines
# for a given line; it gives the relative weights between the J=2-1 and J=3-2
# lines, for example (the hyperfine weights are treated as normalized within
# one rotational transition)
w10 = sum(val for name,val in line_strength_dict.items() if 'J1-0' in name)
w21 = sum(val for name,val in line_strength_dict.items() if 'J2-1' in name)
w32 = sum(val for name,val in line_strength_dict.items() if 'J3-2' in name)
relative_strength_total_degeneracy = {
name : w10 for name in line_strength_dict.keys() if "J1-0" in name
}
relative_strength_total_degeneracy.update ({
name : w21 for name in line_strength_dict.keys() if "J2-1" in name
})
relative_strength_total_degeneracy.update({
name : w32 for name in line_strength_dict.keys() if "J3-2" in name
})
# Get the list of line names from the previous lists
line_names = [name for name in voff_lines_dict.keys()]
# 'J2-1': np.array([1]*len(voff_lines_dict['J2-1'])),
# 'J3-2': np.array([1]*len(voff_lines_dict['J3-2'])),
# }
# aval_dict = {
# # 'J1-0': 10**(-4.90770),
# 'J2-1': 10**(-3.92220),
# 'J3-2': 10**(-3.35866),
# }
n2dp_vtau = hyperfine.hyperfinemodel(line_names, voff_lines_dict, freq_dict,
line_strength_dict,
relative_strength_total_degeneracy)
n2dp_vtau_fitter = n2dp_vtau.fitter
n2dp_vtau_vheight_fitter = n2dp_vtau.vheight_fitter
n2dp_vtau_tbg_fitter = n2dp_vtau.background_fitter
| mit | 601d33c2b4778eb49a4ec4b3890f34c4 | 27.141414 | 136 | 0.527638 | 1.958294 | false | false | false | false |
pyspeckit/pyspeckit | pyspeckit/config.py | 7 | 4845 | """
==========================
PySpecKit config system
==========================
Create decorator used to modify inputs to various __call__ methods
to reflect preferences we set in ~/.pyspeckit/config.
To see what values exist in config file, do:
from config import mycfg
mycfg.keys()
To decorate a new __call__ method, do:
from config import ConfigDescriptor as cfgdec
@cfgdec
def __call__(self, **kwargs):
pass # do something!
.. moduleauthor:: Adam Ginsburg <adam.g.ginsburg@gmail.com>
.. moduleauthor:: Jordan Mirocha <mirochaj@gmail.com>
"""
import os
import inspect
import warnings
class dotdictify(dict):
"""
Class grabbed from
http://stackoverflow.com/questions/3031219/python-recursively-access-dict-via-attributes-as-well-as-index-access
to allow "dot" access to dictionary keys
"""
marker = object()
def __init__(self, value=None):
if value is None:
pass
elif isinstance(value, dict):
for key in value:
self.__setitem__(key, value[key])
else:
raise TypeError('expected dict')
def __setitem__(self, key, value):
if isinstance(value, dict) and not isinstance(value, dotdictify):
value = dotdictify(value)
dict.__setitem__(self, key, value)
def __getitem__(self, key):
found = self.get(key, dotdictify.marker)
if found is dotdictify.marker:
found = dotdictify()
dict.__setitem__(self, key, found)
return found
__setattr__ = __setitem__
__getattr__ = __getitem__
cfgDefaults = dotdictify(dict(
color = 'k',
composite_fit_color = 'r',
component_fit_color = 'blue',
baseline_fit_color = 'orange',
lw = 0.5,
composite_lw = 0.75,
component_lw = 0.75,
show_components = False,
autoannotate = True,
interactive = False,
autorefresh = True,
silent = True,
debug = False,
WARN = True,
))
class ConfigParser:
def __init__(self, fn = None):
"""
Initialize cfg dictionary.
"""
if fn is not None:
f = open(fn)
return_dict = cfgDefaults
for line in f:
if not line.strip(): continue
thisline = line.split()
if thisline[0][0] == '#': continue
if thisline[2] in ['True', 1]: return_dict[thisline[0]] = True
elif thisline[2] in ['False', 0]: return_dict[thisline[0]] = False
elif thisline[2] == 'None': return_dict[thisline[0]] = None
elif thisline[2].isalpha(): return_dict[thisline[0]] = str(thisline[2])
else: return_dict[thisline[0]] = float(thisline[2])
self.cfg = dotdictify(return_dict)
else:
self.cfg = dotdictify(cfgDefaults)
__fn = os.path.expanduser("~/.pyspeckit/config")
if os.path.exists(__fn):
mycfg = dotdictify(ConfigParser(__fn).cfg)
else:
mycfg = dotdictify(ConfigParser().cfg)
def ConfigDescriptor(f):
def decorator(self, *args, **kwargs):
"""
This is our decorator function, used to modify the inputs of __call__
methods to reflect preferences we set in config file.
Notes:
inspect.getargspec will tell us the names of all arguments and their default values.
Later we'll have to be more careful - all_defs only makes entries for arguments that actually
have default values
"""
with warnings.catch_warnings():
# ignore deprecation warning: getargspec does what we want here and
# we have to jump through hoops to get "signature" to work.
# This is not python4-compatible, but I don't have internet access
# now and can't figure out how to make it so.
warnings.simplefilter('ignore')
all_args, all_vars, all_keys, all_defs = inspect.getargspec(f)
all_args.pop(0) # pop self
# Construct dictionary containing all of f's keyword arguments
argsdefs = {}
for i, arg in enumerate(all_args):
argsdefs[arg] = all_defs[i]
# Include these in our new_kwargs dictionary
new_kwargs = argsdefs
# Read in config file and replace keyword arguments that have been defined in it
for arg in new_kwargs:
if arg in mycfg: new_kwargs[arg] = mycfg[arg]
# If we've changed anything on call, reflect this in new_kwargs
for arg in kwargs:
new_kwargs[arg] = kwargs[arg]
f(self, *args, **new_kwargs)
# documentation should be passed on, else sphinx doesn't work and the user can't access the docs
decorator.__doc__ = f.__doc__
decorator.__defaults__ = f.__defaults__
decorator.__repr__ = f.__repr__
return decorator
| mit | 99b24a85748542597ef09817b70a7e46 | 31.086093 | 116 | 0.593602 | 3.984375 | false | true | false | false |
pyspeckit/pyspeckit | examples/ammonia_vtau_multitem_example.py | 5 | 5782 | import numpy as np
import pyspeckit
from astropy import units as u
from pyspeckit.spectrum.models import ammonia_constants, ammonia, ammonia_hf
from pyspeckit.spectrum.models.ammonia_constants import freq_dict
from pyspeckit.spectrum.units import SpectroscopicAxis, SpectroscopicAxes
# Step 1. Generate a synthetic spectrum. Already have a real spectrum? Skip
# to step 2!
# Generate a synthetic spectrum based off of 3 NH3 lines
# Note that they are converted to GHz first
xarr11 = SpectroscopicAxis(np.linspace(-30, 30, 100)*u.km/u.s,
velocity_convention='radio',
refX=freq_dict['oneone']).as_unit(u.GHz)
xarr22 = SpectroscopicAxis(np.linspace(-40, 40, 100)*u.km/u.s,
velocity_convention='radio',
refX=freq_dict['twotwo']).as_unit(u.GHz)
xarr33 = SpectroscopicAxis(np.linspace(-50, 50, 100)*u.km/u.s,
velocity_convention='radio',
refX=freq_dict['threethree']).as_unit(u.GHz)
# Merge the three X-axes into a single axis
xarr = SpectroscopicAxes([xarr11,xarr22,xarr33])
# Compute a synthetic model that is made of two temperature components with
# identical velocities
synthspec = (ammonia.ammonia(xarr, trot=20, ntot=15, fortho=0.5, xoff_v=0.0,
width=1.0) +
ammonia.ammonia(xarr, trot=50, ntot=14, fortho=0.5, xoff_v=0.0,
width=1.0))
# Create the Spectrum object
spectrum = pyspeckit.Spectrum(xarr=xarr, data=synthspec, header={})
# Step 2. You have a spectrum.
# plot it
spectrum.plotter()
# Use the multi-tex/multi-tau model generator to build up a model function
# You can use any set of oneone, twotwo, ..., eighteight (no 9-9 or higher)
# This sets the number of parameters to be fit: 2+2*(n_transitions)
fitter = ammonia_hf.nh3_vtau_multimodel_generator(['oneone', 'twotwo',
'threethree'])
# Register the fitter - i.e., tell pyspeckit where it is and how to use it
spectrum.specfit.Registry.add_fitter('nh3_vtau_123', fitter, fitter.npars)
# These are the parameter names, approximately:
# parnames=['center','width','Tex11','tau11','Tex22','tau22','Tex33','tau33'],
# Need to give some input guesses. We start with something wrong-ish: -5 km/s,
# 1.2 km/s width, and 15 K + 0.5 tau for all 3 lines
guesses = [-5, 1.2, 15, 0.5, 15, 0.5, 15, 0.5,]
# Plot up the guessed model
spectrum.plotter.axis.plot(spectrum.xarr,
fitter.n_modelfunc(guesses)(spectrum.xarr), 'b')
# Run the fit!
spectrum.specfit(fittype='nh3_vtau_123', guesses=guesses)
# display the correct and fitted answers
print("Low column version:")
def printthings(ammonia=ammonia.ammonia, xarr=xarr):
print("Real optical depths of component 1: ",[ammonia(xarr, trot=20,
ntot=15, fortho=0.5,
xoff_v=0.0,
width=1.0,
return_tau=True)[x]
for x in ['oneone', 'twotwo',
'threethree']])
print("Real optical depths of component 2: ",[ammonia(xarr, trot=50,
ntot=14, fortho=0.5,
xoff_v=0.0,
width=1.0,
return_tau=True)[x]
for x in ['oneone', 'twotwo',
'threethree']])
printthings()
print("Fitted parameters: ",spectrum.specfit.parinfo)
# It works, but the covariances between tex and tau are large.
# So, another example with higher tau (and therefore... less degenerate?)
synthspec = (ammonia.ammonia(xarr, trot=20, ntot=16, fortho=0.5, xoff_v=0.0,
width=1.0) +
ammonia.ammonia(xarr, trot=50, ntot=15, fortho=0.5, xoff_v=0.0,
width=1.0))
spectrum2 = pyspeckit.Spectrum(xarr=xarr, data=synthspec, header={})
spectrum2.plotter()
spectrum2.specfit.Registry.add_fitter('nh3_vtau_123', fitter, fitter.npars)
spectrum2.specfit(fittype='nh3_vtau_123', guesses=guesses)
# We can also examine what tau really should have been... kinda.
print("High column version:")
def printthings(ammonia=ammonia.ammonia, xarr=xarr):
print("Real optical depths of component 1: ",[ammonia(xarr, trot=20,
ntot=16, fortho=0.5,
xoff_v=0.0,
width=1.0,
return_tau=True)[x]
for x in ['oneone', 'twotwo',
'threethree']])
print("Real optical depths of component 2: ",[ammonia(xarr, trot=50,
ntot=15, fortho=0.5,
xoff_v=0.0,
width=1.0,
return_tau=True)[x]
for x in ['oneone', 'twotwo',
'threethree']])
printthings()
print("Fitted parameters: ",spectrum2.specfit.parinfo)
| mit | ecd11cf157e39e426d4c84dd9348660f | 50.625 | 79 | 0.508648 | 3.692209 | false | false | false | false |
pyspeckit/pyspeckit | pyspeckit/spectrum/speclines/optical.py | 8 | 2399 | """
Storage for optical spectral line information.
"""
from __future__ import print_function
import numpy as np
def hydrogen(nu,nl, vacuum=True):
"""
Compute the rest wavelength of Hydrogen recombination lines in angstroms
"""
rydberg = 10973731.6 # m^-1
protontoelectron = 1836.15266 # ratio
lvac = 1.0/rydberg * 1./(1/float(nl)**2 - 1/float(nu)**2) * 1e10 * (1.0+1.0/protontoelectron)
if not vacuum:
import ref_index
return ref_index.vac2air(lvac/10)*10
else:
return lvac
# Format: name, units, vacuum?, display name
lines = {
"H_alpha": [6564.614, 'Angstrom', True, r'$\mathrm{H}\alpha$'],
"H_beta": [4862.721, 'Angstrom', True, r'$\mathrm{H}\beta$'],
"OIIIa": [4960.295, 'Angstrom', True, r'$[\mathrm{OIII}]\lambda 4959\AA$'],
"OIIIb": [5008.239, 'Angstrom', True, r'$[\mathrm{OIII}]\lambda 5007\AA$'],
"NIIa": [6549.860, 'Angstrom', True, r'$[\mathrm{NII}]\lambda 6549\AA$'],
"NIIb": [6585.270, 'Angstrom', True, r'$[\mathrm{NII}]\lambda 6585\AA$'],
"SIIa": [6718.290, 'Angstrom', True, r'$[\mathrm{SII}]\lambda 6718\AA$'],
"SIIb": [6732.680, 'Angstrom', True, r'$[\mathrm{SII}]\lambda 6732\AA$'],
"OI": [6300.304, 'Angstrom', True, r'$[\mathrm{OI}]\lambda 6300\AA$'],
"OII": [3727.319, 'Angstrom', True, r'$[\mathrm{OII}]\lambda 3727\AA$'],
"NeIII": [3868.760, 'Angstrom', True, r'$[\mathrm{OII}]\lambda 3869\AA$']
}
def get_optical_lines():
for i in range(3, 7):
name = 'H_%i-2' % i
wavelength = hydrogen(i, 2)
lines[name] = [wavelength, 'Angstrom', True, name]
xarr = []
for key in lines.keys():
xarr.append(lines[key][0])
xarr = np.array(xarr)
indx = np.argsort(xarr)
xarr = np.sort(xarr)
name = []
keys = list(lines.keys())
for i, key in enumerate(keys):
name.append(keys[indx[i]])
name = np.array(name)
xunits = []
xvac = []
dname = []
for i, nombre in enumerate(name):
xunits.append(lines[nombre][1])
xvac.append(lines[nombre][2])
dname.append(lines[nombre][3])
xunits = np.array(xunits)
xvac = np.array(xvac)
dname = np.array(dname)
optical_lines = {'name': name, 'xarr': xarr, 'xunits': xunits, 'xvac': xvac, 'dname': dname}
return optical_lines
| mit | 5845f9c4849c64f9449e576f999e7f8f | 32.319444 | 97 | 0.564402 | 2.647903 | false | false | false | false |
pyspeckit/pyspeckit | pyspeckit/spectrum/classes.py | 3 | 47345 | """
==========================
PySpecKit Spectrum classes
==========================
The spectrum module consists of the Spectrum class, with child classes ObsBlock
and Spectra for multi-spectrum analysis of different types.
The Spectrum class is the main functional code.
ObsBlocks are containers of multiple spectra of different objects
The Spectra class is a container of multiple spectra of the *same* object at
different wavelengths/frequencies
.. moduleauthor:: Adam Ginsburg <adam.g.ginsburg@gmail.com>
.. moduleauthor:: Jordan Mirocha <mirochaj@gmail.com>
"""
from __future__ import print_function
import numpy as np
from six import iteritems
try:
import astropy.io.fits as pyfits
except ImportError:
import pyfits
from . import smooth as sm
from . import readers
from . import plotters
from . import writers
from . import baseline
from . import units
from . import measurements
from . import speclines
from . import interpolation
from . import moments as moments_module
from . import fitters
from . import history
import copy
from astropy import log
from ..specwarnings import warn, PyspeckitWarning
try:
import atpy
atpyOK = True
except ImportError:
atpyOK = False
# specutils -> legacy specutils
# try:
# import specutils
# specutilsOK = True
# except ImportError:
# specutilsOK = False
try:
from Spectrum1D import Spectrum1D # inherit from astropy
except ImportError:
Spectrum1D = object
try:
import astropy.units as u
except ImportError:
u = None
class BaseSpectrum(object):
from .interpolation import interpnans
def __init__(self, filename_or_magic=None, filetype=None, xarr=None,
data=None, error=None, header=None, doplot=False,
maskdata=True, unit=None, plotkwargs={}, xarrkwargs={},
model_registry=None, filename=None,
**kwargs):
"""
Create a Spectrum object.
Must either pass in a filename or ALL of xarr, data, and header, plus
optionally error.
kwargs are passed to the file reader
Parameters
----------
filename_or_magic : string or something else
The filename or something with an hdu attribute. If data, xarr, and error are
specified, leave filename blank.
filetype : string
Specify the file type (only needed if it cannot be automatically
determined from the filename)
xarr : `units.SpectroscopicAxis` or `np.ndarray`
The X-axis of the data. If it is an np.ndarray, you must pass
`xarrkwargs` or a valid header if you want to use any of the unit
functionality.
data : `np.ndarray`
The data array (must have same length as xarr)
error : `np.ndarray`
The error array (must have same length as the data and xarr arrays)
header : `pyfits.Header` or dict
The header from which to read unit information. Needs to be a
`pyfits.Header` instance or another dictionary-like object with the
appropriate information
maskdata : boolean
turn the array into a masked array with all nan and inf values masked
doplot : boolean
Plot the spectrum after loading it?
plotkwargs : dict
keyword arguments to pass to the plotter
xarrkwargs : dict
keyword arguments to pass to the SpectroscopicAxis initialization
(can be used in place of a header)
unit : str
The data unit
filename : string
The file to read the spectrum from. If data, xarr, and error are
specified, leave filename blank.
Examples
--------
>>> sp = pyspeckit.Spectrum(data=np.random.randn(100),
xarr=np.linspace(-50, 50, 100), error=np.ones(100)*0.1,
xarrkwargs={'unit':'km/s', 'refX':4.829, 'refX_unit':'GHz',
'xtype':'VLSR-RAD'}, header={})
>>> xarr = pyspeckit.units.SpectroscopicAxis(np.linspace(-50,50,100),
units='km/s', refX=6562.83, refX_unit='angstroms')
>>> data = np.random.randn(100)*5 + np.random.rand(100)*100
>>> err = np.sqrt(data/5.)*5. # Poisson noise
>>> sp = pyspeckit.Spectrum(data=data, error=err, xarr=xarr, header={})
>>> # if you already have a simple fits file
>>> sp = pyspeckit.Spectrum('test.fits')
"""
if filename_or_magic is not None:
if hasattr(filename_or_magic, 'hdu'):
return self.from_hdu(filename_or_magic.hdu)
elif filename is None:
filename = filename_or_magic
else:
raise ValueError("filename_or_magic was specified incorrectly")
if filename:
if error is not None:
raise ValueError("When reading from a file, you cannot specify"
"the error as an array. Instead, set it "
"separately after reading the file, e.g.: \n"
"sp = Spectrum(filename)\n"
"sp.error[:] = rms")
if xarr is not None:
raise ValueError("Cannot specify xarr when reading from a "
"file. If the xarr in the file is incorrect,"
"change it after reading the file in, i.e., "
"set sp.xarr on another line.")
if filetype is None:
suffix = filename.rsplit('.',1)[1]
if suffix in readers.suffix_types:
# use the default reader for that suffix
filetype = readers.suffix_types[suffix][0]
reader = readers.readers[filetype]
else:
raise TypeError("File with suffix %s is not recognized." % suffix)
else:
if filetype in readers.readers:
reader = readers.readers[filetype]
else:
raise TypeError("Filetype %s not recognized" % filetype)
self.data,self.error,self.xarr,self.header = reader(filename,**kwargs)
# these should probably be replaced with registerable function s...
if filetype in ('fits','tspec','pyfits','sdss'):
self.parse_header(self.header)
elif filetype == 'txt':
self.parse_text_header(self.header)
elif filetype in ('hdf5', 'h5'):
self.parse_hdf5_header(self.header)
if isinstance(filename,str):
# Everything prior to .fits or .txt
self.fileprefix = filename.rsplit('.', 1)[0]
elif xarr is not None and data is not None:
# technically, this is unpythonic. But I don't want to search for
# all 10 attributes required.
if issubclass(type(xarr),units.SpectroscopicAxis):
self.xarr = xarr
else:
self.xarr = units.SpectroscopicAxis(xarr, **xarrkwargs)
self.data = data
if error is not None:
self.error = error
else:
self.error = np.zeros_like(data)
if hasattr(header,'get'):
if not isinstance(header, pyfits.Header):
cards = [pyfits.Card(k, header[k]) for k in header]
self.header = pyfits.Header(cards)
else:
self.header = header
else: # set as blank
warn("WARNING: No header given. Creating an empty one.",
PyspeckitWarning)
self.header = pyfits.Header()
self.parse_header(self.header)
else:
raise ValueError("Must either give a filename or xarr and data "
"keywords to instantiate a pyspeckit.Spectrum")
if hasattr(self.data,'unit'):
# TODO: use the quantity more appropriately
self.unit = str(self.data.unit)
self.data = self.data.value
if hasattr(self.error, 'unit'):
# errors have to have the same units as data, but then should be
# converted to arrays. Long term, we'd like to have everything
# be treated internally as a Quantity, but... not yet.
self.error = self.error.to(self.unit).value
if maskdata:
if hasattr(self.data,'mask'):
self.data.mask += np.isnan(self.data) + np.isinf(self.data)
if hasattr(self.error,'mask'):
self.error.mask += np.isnan(self.data) + np.isinf(self.data)
else:
self.data = np.ma.masked_where(np.isnan(self.data) + np.isinf(self.data), self.data)
self.error = np.ma.masked_where(np.isnan(self.data) + np.isinf(self.data), self.error)
# it is very important that this be done BEFORE the spectofit is set!
self._sort()
self.plotter = plotters.Plotter(self)
self._register_fitters(registry=model_registry)
self.specfit = fitters.Specfit(self, Registry=self.Registry)
self.baseline = baseline.Baseline(self)
self.speclines = speclines
# Special. This needs to be modified to be more flexible; for now I need it to work for nh3
self.plot_special = None
self.plot_special_kwargs = {}
if unit is not None:
self._unit = unit
elif not hasattr(self, '_unit'):
self._unit = u.dimensionless_unscaled
if doplot:
self.plotter(**plotkwargs)
@property
def unit(self):
return self._unit
@property
def units(self):
log.warning("'units' is deprecated; please use 'unit'", DeprecationWarning)
return self._unit
@unit.setter
def unit(self, value):
self._unit = value
@units.setter
def units(self, value):
log.warning("'units' is deprecated; please use 'unit'", DeprecationWarning)
self._unit = value
@property
def data_quantity(self):
return u.Quantity(self.data, unit=self.unit)
@data_quantity.setter
def data_quantity(self, value):
if not hasattr(value, 'unit'):
raise ValueError("To set the data to a Quantity value, it must "
"have a unit.")
if hasattr(self.data, 'mask') and not hasattr(value, 'mask'):
raise ValueError("The original data had a mask. You must use "
"a masked array to set the data value.")
self.data = value.value
self.unit = value.unit
@property
def error_quantity(self):
return u.Quantity(self.error, unit=self.unit)
def _register_fitters(self, registry=None):
"""
Register fitters independently for each spectrum instance
This approach allows you to add fitters to a given Spectrum instance
without modifying the default registry
"""
self.Registry = fitters.Registry()
if registry is None:
registry = fitters.default_Registry
elif not isinstance(registry, fitters.Registry):
raise TypeError("registry must be an instance of the fitters.Registry class")
for modelname, model in iteritems(registry.multifitters):
self.Registry.add_fitter(modelname, model,
registry.npars[modelname],
key=registry.associated_keys.get(modelname))
def _sort(self):
"""
Make sure X axis is monotonic.
"""
if self.xarr.dxarr.min() < 0:
argsort = np.argsort(self.xarr)
self.data = self.data[argsort]
self.error = self.error[argsort]
self.xarr = self.xarr[argsort]
self.xarr.make_dxarr()
def write(self,filename,type=None,**kwargs):
"""
Write the spectrum to a file. The available file types are listed
in spectrum.writers.writers
type - what type of file to write to? If not specified, will attempt
to determine type from suffix
"""
if type:
self.writer = writers.writers[type](self)
else:
suffix = filename.rsplit('.',1)[1]
if suffix in writers.suffix_types:
# use the default reader for that suffix
filetype = writers.suffix_types[suffix][0]
self.writer = writers.writers[filetype](self)
else:
raise TypeError("File with suffix %s is not recognized." % suffix)
self.writer(filename=filename,**kwargs)
def parse_text_header(self,Table):
"""
Grab relevant parameters from a table header (xaxis type, etc)
This function should only exist for Spectrum objects created from
.txt or other atpy table type objects
"""
self.Table = Table
xtype = Table.data.dtype.names[Table.xaxcol]
if xtype in units.xtype_dict.values():
self.xarr.xtype = xtype
unit = Table.columns[xtype].unit
self.xarr.set_unit(unit)
elif xtype in units.xtype_dict:
self.xarr.xtype = units.xtype_dict[xtype]
unit = Table.columns[xtype].unit
self.xarr.set_unit(unit)
else:
warn("Warning: Invalid xtype in text header - this may mean no "
"text header was available. X-axis units will be pixels "
"unless you set them manually "
"(e.g., sp.xarr=SpectroscopicAxis(sp.xarr.value, unit='angstroms')")
self.xarr.xtype = 'pixels'
self.xarr.set_unit(u.pixel)
#raise ValueError("Invalid xtype in text header")
self.ytype = Table.data.dtype.names[Table.datacol]
try:
self.unit = Table.columns[self.ytype].unit
except ValueError:
self.unit = None
pass # astropy 0.2.dev11 introduces an incompatibility here
self.header = pyfits.Header()
self._update_header()
def _update_header(self):
self.header['CUNIT1'] = self.xarr.unit.to_string()
self.header['CTYPE1'] = self.xarr.xtype
self.header['BUNIT'] = self.unit
self.header['BTYPE'] = self.ytype
def parse_hdf5_header(self, hdr):
"""
HDF5 reader will create a hdr dictionary from HDF5 dataset attributes
if they exist. This routine will convert that dict to a pyfits header
instance.
.. todo:: Move this to the hdf5 reader?
"""
self.xarr.xtype = hdr['xtype']
self.xarr.xunit = hdr['xunit']
self.ytype = hdr['ytype']
self.unit = hdr['yunit']
self.header = pyfits.Header()
self.header['CUNIT1'] = self.xarr.xunit
self.header['CTYPE1'] = self.xarr.xtype
self.header['BUNIT'] = self.ytype
self.header['BTYPE'] = self.unit
def parse_header(self,hdr,specname=None):
"""
Parse parameters from a .fits header into required spectrum structure
parameters
.. todo:: This should be moved to the FITSSpectrum subclass when that is available
"""
if hdr.get('BUNIT'):
try:
self.unit = u.Unit(hdr.get('BUNIT').strip())
except (u.UnitsError, ValueError):
self.unit = hdr.get('BUNIT').strip()
elif not hasattr(self, 'unit') or (hasattr(self,'unit') and self.unit
is None):
self.unit = 'undefined'
if hdr.get('BTYPE'):
self.ytype = hdr.get('BTYPE').strip()
else:
self.ytype = 'data'
if specname is not None:
self.specname = specname
elif hdr.get('OBJECT'):
self.specname = hdr.get('OBJECT')
elif hdr.get('OBJNAME'):
self.specname = hdr.get('OBJNAME')
else:
self.specname = ''
def measure(self, z=None, d=None, fluxnorm=None, miscline=None,
misctol=10.0, ignore=None, derive=True, **kwargs):
"""
Initialize the measurements class - only do this after you have run a
fitter otherwise pyspeckit will be angry!
"""
self.measurements=measurements.Measurements(self, z=z, d=d,
fluxnorm=fluxnorm,
miscline=miscline,
misctol=misctol,
ignore=ignore,
derive=derive, **kwargs)
def crop(self, x1, x2, unit=None, **kwargs):
"""
Replace the current spectrum with a subset from x1 to x2 in current
units
Fixes CRPIX1 and baseline and model spectra to match cropped data spectrum
"""
# do slice (this code is redundant... need to figure out how to fix that)
if not hasattr(x1, 'unit') and unit is not None:
x1pix = np.argmin(np.abs(x1-self.xarr.as_unit(unit).value))
x2pix = np.argmin(np.abs(x2-self.xarr.as_unit(unit).value))
elif hasattr(x1, 'unit') and unit is not None:
raise ValueError("If you give x1,x2 as quantities, don't specify "
"the X-axis unit (it must be equivalent, though).")
elif not hasattr(x1, 'unit') and unit is None:
x1pix = x1
x2pix = x2
else:
# Hack: something about the inheritance of xarr prevents equivalent
# unit arithmetic
x1pix = np.argmin(np.abs(x1-u.Quantity(self.xarr)))
x2pix = np.argmin(np.abs(x2-u.Quantity(self.xarr)))
x1 = x1.value
x2 = x2.value
if x1pix > x2pix:
x1pix,x2pix = x2pix,x1pix
elif x1pix == x2pix:
raise IndexError("ERROR: Trying to crop to zero size.")
self = self.slice(x1pix, x2pix, unit='pixels', copy=False, xcopy=True,
**kwargs)
# a baseline spectrum is always defined, even if it is all zeros
# this is needed to prevent size mismatches. There may be a more
# elegant way to do this...
# this is taken care of in Slice now self.baseline.crop(x1pix,x2pix)
# this is taken care of in Slice now self.specfit.crop(x1pix,x2pix)
if hasattr(self.specfit, 'fitter'):
self.specfit._full_model()
if hasattr(self,'header'):
history.write_history(self.header,
"CROP: Cropped from %g to %g (pixel %i to %i)"
% (x1,x2,x1pix,x2pix))
if self.header.get('CRPIX1'):
self.header['CRPIX1'] = self.header.get('CRPIX1') - x1pix
history.write_history(self.header,
"CROP: Changed CRPIX1 from %f to %f"
% (self.header.get('CRPIX1')+x1pix,self.header.get('CRPIX1')))
def slice(self, start=None, stop=None, unit='pixel', copy=True, xcopy=True,
preserve_fits=False):
"""
Slicing the spectrum
.. WARNING:: this is the same as cropping right now, but it returns a
copy instead of cropping inplace
Parameters
----------
start : numpy.float or int or astropy quantity
start of slice
stop : numpy.float or int or astropy quantity
stop of slice
unit : str
allowed values are any supported physical unit, 'pixel'
copy : bool
Return a 'view' of the data or a copy?
preserve_fits : bool
Save the fitted parameters from self.fitter?
"""
if hasattr(start, 'unit'):
start_ind = self.xarr.x_to_pix(start)
elif unit in ('pixel','pixels'):
start_ind = start
else:
start_ind = self.xarr.x_to_pix(start, xval_unit=unit)
if hasattr(stop, 'unit'):
stop_ind = self.xarr.x_to_pix(stop)
elif unit in ('pixel','pixels'):
stop_ind = stop
else:
stop_ind = self.xarr.x_to_pix(stop, xval_unit=unit)
if start_ind > stop_ind:
start_ind,stop_ind = stop_ind,start_ind
spectrum_slice = slice(start_ind,stop_ind)
log.debug("Slicing from {start} to {stop} with unit {unit} and copy="
"{copy}, xcopy={xcopy}, preserve_fits={preserve_fits}."
"start_ind = {start_ind}, stop_ind= {stop_ind}"
.format(start=start, stop=stop, unit=unit, copy=copy,
xcopy=xcopy, preserve_fits=preserve_fits,
start_ind=start_ind, stop_ind=stop_ind))
if copy:
sp = self.copy()
else:
sp = self
sp.data = sp.data[spectrum_slice]
if sp.error is not None:
sp.error = sp.error[spectrum_slice]
if copy or xcopy:
sp.xarr = sp.xarr[spectrum_slice].copy()
else:
sp.xarr = sp.xarr[spectrum_slice]
if copy:
# create new specfit / baseline instances (otherwise they'll be the wrong length)
sp._register_fitters(registry=self.Registry)
sp.baseline = baseline.Baseline(sp)
sp.specfit = fitters.Specfit(sp,Registry=sp.Registry)
else:
# inplace modification
sp.baseline.crop(start_ind, stop_ind)
sp.specfit.crop(start_ind, stop_ind)
if preserve_fits:
sp.specfit.modelpars = self.specfit.modelpars
sp.specfit.parinfo = self.specfit.parinfo
sp.baseline.baselinepars = self.baseline.baselinepars
sp.baseline.order = self.baseline.order
return sp
# For Spectrum1D compatibility, flux = data
@property
def flux(self):
"""
The data in the spectrum (flux = data, for compatibility with astropy's
Spectrum1D object).
"""
return self.data
@flux.setter
def flux(self,value):
self.data = value
def __getitem__(self, indx):
"""
Slice the data using pixel units (not quite the same as self.slice
because indx can be a slice object or numbers)
"""
sp = copy.copy(self)
sp.data = sp.data.__getitem__(indx)
if sp.error is not None:
sp.error = sp.error.__getitem__(indx)
sp.xarr = sp.xarr.__getitem__(indx)
# this should be done by deepcopy, but deepcopy fails with current pyfits
sp.plotter = self.plotter.copy(parent=sp)
sp.plotter.Spectrum = sp
sp.specfit = self.specfit.copy(parent=sp, registry=sp.Registry)
sp.specfit.Spectrum = sp
sp.specfit.Spectrum.plotter = sp.plotter
sp.baseline = self.baseline.copy(parent=sp)
sp.baseline.Spectrum = sp
sp.baseline.Spectrum.plotter = sp.plotter
return sp
def downsample(self, dsfactor):
"""
Downsample the spectrum (and all of its subsidiaries) without smoothing
Parameters
----------
dsfactor : int
Downsampling Factor
"""
dsfactor = round(dsfactor) # ensure int
self.data = self.data[::dsfactor]
self.xarr = self.xarr[::dsfactor]
if len(self.xarr) != len(self.data):
raise ValueError("Convolution resulted in different X and Y array lengths. Convmode should be 'same'.")
if self.error is not None:
self.error = self.error[::dsfactor]
self.baseline.downsample(dsfactor)
self.specfit.downsample(dsfactor)
def smooth(self,smooth,downsample=True,**kwargs):
"""
Smooth the spectrum by factor `smooth`.
Documentation from the :mod:`smooth` module:
"""
smooth = int(round(smooth))
self.data = sm.smooth(self.data,smooth,downsample=downsample,**kwargs)
if downsample:
self.xarr = self.xarr[::smooth]
if len(self.xarr) != len(self.data):
raise ValueError("Convolution resulted in different X and Y array lengths. Convmode should be 'same'.")
if self.error is not None:
self.error = sm.smooth(self.error,smooth,**kwargs)
self.baseline.downsample(smooth)
self.specfit.downsample(smooth)
self._smooth_header(smooth)
smooth.__doc__ += "sm.smooth doc: \n" + sm.smooth.__doc__
def _smooth_header(self,smooth):
"""
Internal - correct the FITS header parameters when smoothing
"""
if self.header.get('CDELT1') is not None and self.header.get('CRPIX1') is not None:
self.header['CDELT1'] = self.header.get('CDELT1') * float(smooth)
self.header['CRPIX1'] = self.header.get('CRPIX1') / float(smooth)
history.write_history(self.header,"SMOOTH: Smoothed and downsampled spectrum by factor %i" % (smooth))
history.write_history(self.header,"SMOOTH: Changed CRPIX1 from %f to %f" % (self.header.get('CRPIX1')*float(smooth),self.header.get('CRPIX1')))
history.write_history(self.header,"SMOOTH: Changed CDELT1 from %f to %f" % (self.header.get('CRPIX1')/float(smooth),self.header.get('CRPIX1')))
def _shape(self):
"""
Return the data shape (a property of the Spectrum)
"""
return self.data.shape
shape = property(_shape)
def __len__(self):
return len(self.data)
def __repr__(self):
if hasattr(self,'specname'):
name = " named %s" % self.specname
else:
name = ""
return (r'<%s object%s over spectral range %6.5g : %6.5g %s and flux range = [%2.1f, %2.1f] %s at %s>' %
(self.__class_name__, name, self.xarr.min().value, self.xarr.max().value,
self.xarr.unit, self.data.min(), self.data.max(), self.unit,
str(hex(self.__hash__()))))
def copy(self,deep=True):
"""
Create a copy of the spectrum with its own plotter, fitter, etc.
Useful for, e.g., comparing smoothed to unsmoothed data
"""
newspec = copy.copy(self)
if deep:
newspec.xarr = copy.copy(self.xarr)
newspec.data = copy.copy(self.data)
if self.error is not None:
newspec.error = copy.copy(self.error)
newspec.header = copy.copy(self.header)
newspec.plotter = self.plotter.copy(parent=newspec)
newspec._register_fitters(registry=self.Registry)
newspec.specfit = self.specfit.copy(parent=newspec, registry=newspec.Registry)
newspec.specfit.Spectrum.plotter = newspec.plotter
newspec.baseline = self.baseline.copy(parent=newspec)
newspec.baseline.Spectrum.plotter = newspec.plotter
return newspec
def stats(self, statrange=(), interactive=False):
"""
Return some statistical measures in a dictionary (somewhat self-explanatory)
Parameters
----------
statrange : 2-element tuple
X-range over which to perform measures
interactive : bool
specify range interactively in plotter
"""
if len(statrange) == 2:
pix1 = self.xarr.x_to_pix(statrange[0])
pix2 = self.xarr.x_to_pix(statrange[1])
if pix1 > pix2:
pix1,pix2 = pix2,pix1
elif pix1 == pix2:
raise ValueError("Invalid statistics range - includes 0 pixels")
data = self.data[pix1:pix2]
elif interactive:
raise NotImplementedError('Not implemented yet. Probably need to move the stats command into a different module first')
else:
data = self.data
stats = {
"npts": data.shape[0],
"std": data.std(),
"mean": data.mean(),
"median": np.median(data),
"min": data.min(),
"max": data.max(),}
return stats
def getlines(self, linetype='radio', **kwargs):
"""
Access a registered database of spectral lines. Will add an attribute
with the name linetype, which then has properties defined by the
speclines module (most likely, a table and a "show" function to display
the lines)
"""
# this is somewhat unreadable, but in short:
# self.__dict__ is a dictionary that defines the class attributes
# so, for example, if linetype is radio, this reads:
# self.radio = speclines.radio.radio_lines(self)
# or optical:
# self.optical = speclines.optical.optical_lines(self)
if linetype not in self.__dict__: # don't replace it if it already exists
self.__dict__[linetype] = speclines.__dict__[linetype].__dict__[linetype+"_lines"](self,**kwargs)
def moments(self, unit='km/s', **kwargs):
"""
Return the moments of the spectrum. In order to assure that the 1st
and 2nd moments are meaningful, a 'default' unit is set. If unit is
not set, will use current unit.
*Documentation imported from the moments module:*
"""
if unit is False or unit is None:
return moments_module.moments(self.xarr, self.data, **kwargs)
else:
return moments_module.moments(self.xarr.as_unit(unit), self.data, **kwargs)
moments.__doc__ += moments_module.moments.__doc__
def _operation_wrapper(operation):
"""
Perform an operation (addition, subtraction, mutiplication, division, etc.)
after checking for shape matching
"""
def ofunc(self, other):
if np.isscalar(other):
newspec = self.copy()
newspec.data = operation(newspec.data, other)
return newspec
elif hasattr(self,'xarr') and hasattr(other,'xarr'): # purely for readability
if self._arithmetic_threshold == 'exact':
xarrcheck = np.all((self.xarr == other.xarr))
else:
if self._arithmetic_threshold_units is None:
# not sure this should ever be allowed
xarrcheck = np.all(np.abs(self.xarr-other.xarr).value < self._arithmetic_threshold)
else:
xarr_u = self.xarr.as_unit(self._arithmetic_threshold_units)
other_xarr_u = other.xarr.as_unit(self._arithmetic_threshold_units)
xarrcheck = np.all(np.abs(xarr_u - other_xarr_u).value <
self._arithmetic_threshold)
if self.shape == other.shape and xarrcheck:
newspec = self.copy()
newspec.data = operation(newspec.data, other.data)
return newspec
elif self.shape != other.shape:
raise ValueError("Shape mismatch in data")
elif not xarrcheck:
raise ValueError("X-axes do not match.")
else:
raise Exception("Unexpected Error")
elif hasattr(self,'shape') and hasattr(other,'shape'):
# allow array subtraction
if self.shape != other.shape:
raise ValueError("Shape mismatch in data")
elif hasattr(self, 'xarr'):
newspec = self.copy()
newspec.data = operation(newspec.data, other)
return newspec
elif hasattr(other, 'xarr'): # is this even possible?
newspec = other.copy()
newspec.data = operation(self, other.data)
return newspec
else:
raise ValueError("Data shapes match but somehow neither side is a Spectrum")
else:
raise ValueError("Data types are incompatible")
return ofunc
@property
def _arithmetic_threshold(self):
return self._arithmetic_threshold_value
@_arithmetic_threshold.setter
def _arithmetic_threshold(self, value, unit=None):
self._arithmetic_threshold_value = value
if unit is None:
self._arithmetic_threshold_units = self.xarr.unit
else:
self._arithmetic_threshold_units = unit
_arithmetic_threshold_value = 'exact'
_arithmetic_threshold_units = None
__add__ = _operation_wrapper(np.add)
__radd__ = _operation_wrapper(np.add)
__sub__ = _operation_wrapper(np.subtract)
__mul__ = _operation_wrapper(np.multiply)
__div__ = _operation_wrapper(np.divide)
class SingleSpectrum(BaseSpectrum):
__class_name__ = 'SingleSpectrum'
@classmethod
def from_spectrum1d(cls, spec1d):
"""
Tool to load a pyspeckit Spectrum from a specutils object
Examples
--------
>>> # grab many spectra from a multiextension FITS file
>>> spectra = specutils.io.fits.read_fits_spectrum1d('AAO.fits')
>>> sp = pyspeckit.Spectrum.from_spectrum1d(spectra[0])
>>> # open a single spectrum that could have been opened directly with pyspeckit
>>> spectrum = specutils.io.fits.read_fits_spectrum1d('gbt_1d.fits')
>>> sp = pyspeckit.Spectrum.from_spectrum1d(spectrum)
"""
xarr = units.SpectroscopicAxis(spec1d.spectral_axis)
data = spec1d.flux
error = spec1d.uncertainty
return cls(data=data, error=error, xarr=xarr,
unit=spec1d._unit, header=pyfits.Header())
@classmethod
def from_hdu(cls, hdu):
"""
Create a pyspeckit Spectrum object from an HDU
"""
spec,errspec,XAxis,hdr = readers.open_1d_pyfits(hdu)
return cls(data=spec, error=errspec, xarr=XAxis, header=hdr)
__class_name__ = 'BaseSpectrum'
class Spectrum(SingleSpectrum):
"""
The core class for the spectroscopic toolkit. Contains the data and error
arrays along with wavelength / frequency / velocity information in various
formats.
"""
# just does what BaseSpectrum and SingleSpectrum do
__class_name__ = 'Spectrum'
class Spectra(BaseSpectrum):
"""
A list of individual Spectrum objects. Intended to be used for
concatenating different wavelength observations of the SAME OBJECT. Can be
operated on just like any Spectrum object, incuding fitting. Useful for
fitting multiple lines on non-continguous axes simultaneously. Be wary of
plotting these though...
Can be indexed like python lists.
X array is forcibly sorted in increasing order
"""
__class_name__ = 'Spectra'
def __init__(self, speclist, xunit=None, model_registry=None, **kwargs):
"""
"""
log.info("Creating spectra")
speclist = list(speclist)
for ii,spec in enumerate(speclist):
if type(spec) is str:
spec = Spectrum(spec)
speclist[ii] = spec
self.speclist = speclist
if xunit is None:
xunit = speclist[0].xarr.unit
else:
xunit = u.Unit(xunit)
if xunit is not None and xunit.is_equivalent(u.km/u.s):
refX = speclist[0].xarr.refX
if refX is None:
warn("Combining spectra with velocity coordinates, "
"but refX is None")
for spec in speclist[1:]:
if spec.xarr.refX != refX:
raise ValueError("When combining spectra in velocity coordinates, "
"they must have the same reference frequency.")
log.info("Concatenating data")
self.xarr = units.SpectroscopicAxes([sp.xarr.as_unit(xunit) for sp in speclist])
self.xarr.set_unit(u.Unit(xunit))
self.xarr.xtype = u.Unit(xunit)
self.data = np.ma.concatenate([sp.data for sp in speclist])
self.error = np.ma.concatenate([sp.error for sp in speclist])
self._sort()
self.header = pyfits.Header()
for spec in speclist:
for key,value in spec.header.items():
try:
self.header[key] = value
except (ValueError, KeyError):
warn("Could not update header KEY=%s to VALUE=%s" % (key,value))
self.plotter = plotters.Plotter(self)
self._register_fitters(registry=model_registry)
self.specfit = fitters.Specfit(self,Registry=self.Registry)
self.baseline = baseline.Baseline(self)
self.unit = speclist[0].unit
for spec in speclist:
if spec.unit != self.unit:
raise ValueError("Mismatched unit")
# Special. This needs to be modified to be more flexible; for now I need it to work for nh3
self.plot_special = None
self.plot_special_kwargs = {}
@classmethod
def from_spectrum1d_list(cls, lst):
"""
Tool to load a collection of pyspeckit Spectra from a specutils list of
Spectrum1D objects
Examples
--------
>>> # grab many spectra from a multiextension FITS file
>>> spectra = specutils.io.fits.read_fits_spectrum1d('AAO.fits')
>>> sp = pyspeckit.Spectrum.from_spectrum1d_list(spectra)
"""
return cls([Spectrum.from_spectrum1d(spec) for spec in lst])
def __add__(self,other):
"""
Adding "Spectra" together will concatenate them
* WARNING * this will probably fail now that I've implemented direct __add__...
"""
if type(other) is Spectra or Spectrum:
self.speclist += other.speclist
elif type(other) is Spectrum:
self.speclist += [other.speclist]
if other.unit != self.unit:
raise ValueError("Mismatched unit")
if other.xarr.unit != self.xarr.unit:
# convert all inputs to same unit
other.xarr.convert_to_unit(self.xarr.unit)
self.xarr = units.SpectroscopicAxes([self.xarr,other.xarr])
self.data = np.concatenate([self.data,other.data])
self.error = np.concatenate([self.error,other.error])
self._sort()
def __getitem__(self,index):
"""
Can index Spectra to get the component Spectrum objects
"""
return self.speclist[index]
def __len__(self):
"""
len(spectra) != len(spectrum) !
"""
return len(self.speclist)
def smooth(self,smooth,**kwargs):
"""
Smooth the spectrum by factor "smooth". Options are defined in sm.smooth
because 'Spectra' does not have a header attribute, don't do anything to it...
"""
smooth = round(smooth)
self.data = sm.smooth(self.data,smooth,**kwargs)
self.xarr = self.xarr[::smooth]
if len(self.xarr) != len(self.data):
raise ValueError("Convolution resulted in different X and Y array lengths. Convmode should be 'same'.")
self.error = sm.smooth(self.error,smooth,**kwargs)
self.baseline.downsample(smooth)
self.specfit.downsample(smooth)
def fiteach(self,**kwargs):
"""
Fit each spectrum within the Spectra object
"""
for sp in self.speclist:
sp.specfit(**kwargs)
if atpyOK:
self.fittable = atpy.Table()
self.fittable.add_column('name',[sp.specname for sp in self.speclist])
self.fittable.add_column('amplitude',[sp.specfit.modelpars[0] for sp in self.speclist],unit=self.unit)
self.fittable.add_column('center',[sp.specfit.modelpars[1] for sp in self.speclist],unit=self.xarr.unit)
self.fittable.add_column('width',[sp.specfit.modelpars[2] for sp in self.speclist],unit=self.xarr.unit)
self.fittable.add_column('amplitudeerr',[sp.specfit.modelerrs[0] for sp in self.speclist],unit=self.unit)
self.fittable.add_column('centererr',[sp.specfit.modelerrs[1] for sp in self.speclist],unit=self.xarr.unit)
self.fittable.add_column('widtherr',[sp.specfit.modelerrs[2] for sp in self.speclist],unit=self.xarr.unit)
def ploteach(self, xunit=None, inherit_fit=False, plot_fit=True, plotfitkwargs={}, **plotkwargs):
"""
Plot each spectrum in its own window
inherit_fit - if specified, will grab the fitter & fitter properties from Spectra
"""
for sp in self.speclist:
if xunit is not None:
sp.xarr.convert_to_unit(xunit,quiet=True)
if inherit_fit:
sp.specfit.fitter = self.specfit.fitter
sp.specfit.modelpars = self.specfit.modelpars
sp.specfit.model = np.interp(sp.xarr.as_unit(self.xarr.unit),
self.xarr,self.specfit.fullmodel)
sp.plotter(**plotkwargs)
if plot_fit and self.specfit.model is not None:
sp.specfit.plot_fit(**plotfitkwargs)
class ObsBlock(Spectra):
"""
An Observation Block
Consists of multiple spectra with a shared X-axis. Intended to hold groups
of observations of the same object in the same setup for later averaging.
ObsBlocks can be indexed like python lists.
"""
def __init__(self, speclist, xtype='frequency', xarr=None, force=False,
model_registry=None, **kwargs):
if xarr is None:
self.xarr = speclist[0].xarr
else:
self.xarr = xarr
self.unit = speclist[0].unit
self.header = speclist[0].header
self.parse_header(self.header)
for spec in speclist:
if not isinstance(spec,Spectrum):
raise TypeError("Must create an ObsBlock with a list of spectra.")
if not np.array_equal(spec.xarr, self.xarr):
if not force:
raise ValueError("Mismatch between X axes in ObsBlock")
if spec.unit != self.unit:
raise ValueError("Mismatched units")
if force:
self.speclist = [interpolation.interp(spec,self) for spec in speclist]
else:
self.speclist = speclist
self.nobs = len(self.speclist)
# Create a 2-dimensional array of the data
self.data = np.array([sp.data for sp in self.speclist]).swapaxes(0,1).squeeze()
self.error = np.array([sp.error for sp in self.speclist]).swapaxes(0,1).squeeze()
self.plotter = plotters.Plotter(self)
self._register_fitters(registry=model_registry)
self.specfit = fitters.Specfit(self, Registry=self.Registry)
self.baseline = baseline.Baseline(self)
def average(self, weight=None, inverse_weight=False, error='erravgrtn', debug=False):
"""
Average all scans in an ObsBlock. Returns a single Spectrum object
Parameters
----------
weight : string
a header keyword to weight by. If not specified, the spectra will be
averaged without weighting
inverse_weight : bool
Is the header keyword an inverse-weight (e.g., a variance?)
error : ['scanrms','erravg','erravgrtn']
estimate the error spectrum by one of three methods.
'scanrms' : the standard deviation of each pixel across all scans
'erravg' : the average of all input error spectra
'erravgrtn' : the average of all input error spectra divided by sqrt(n_obs)
"""
wtarr = np.isfinite(self.data)
if weight is not None:
if inverse_weight:
for ii,sp in enumerate(self.speclist):
wtarr[:,ii] *= 1.0/sp.header.get(weight)
else:
for ii,sp in enumerate(self.speclist):
wtarr[:,ii] *= sp.header.get(weight)
if self.header.get('EXPOSURE'):
self.header['EXPOSURE'] = np.sum([sp.header['EXPOSURE'] for sp in self.speclist])
data_nonan = np.nan_to_num(self.data)
weighted_data = (data_nonan * wtarr)
weighted_data_axsum = weighted_data.sum(axis=1)
weight_axsum = wtarr.sum(axis=1)
avgdata = weighted_data_axsum / weight_axsum
if error == 'scanrms':
# axis swapping is for projection... avgdata = 0'th axis
errspec = np.sqrt((((data_nonan.swapaxes(0,1)-avgdata) *
wtarr.swapaxes(0,1))**2 /
wtarr.swapaxes(0,1)**2).swapaxes(0,1).sum(axis=1)
)
elif error == 'erravg':
errspec = self.error.mean(axis=1)
elif error == 'erravgrtn':
errspec = self.error.mean(axis=1) / np.sqrt(self.error.shape[1])
spec = Spectrum(data=avgdata,error=errspec,xarr=self.xarr.copy(),header=self.header)
spec._arithmetic_threshold = self._arithmetic_threshold
if debug:
# this statement, and much of the text above, is to test an absolutely insane error:
# wtarr.sum(axis=1) randomly - say, one out of every 10-100 occurrences - fills with
# nonsense values (1e-20, 1e-55, whatever). There is no pattern to this; it occurs in
# while loops, but ONLY IN THIS FUNCTION. This is unreproduceable anywhere else.
print("selfdata min: %10g max: %10g" % (self.data.min(), self.data.max()))
print("nonandata min: %10g max: %10g" % (data_nonan.min(), data_nonan.max()))
print("avgdata min: %10g max: %10g" % (avgdata.min(), avgdata.max()))
print("weight sum: %10g" % (wtarr.sum()))
print("data*weight sum: %10g" % ((data_nonan*wtarr).sum()))
if np.abs(data_nonan.min()/avgdata.min()) > 1e10:
import pdb; pdb.set_trace()
return spec
def __len__(self):
return len(self.speclist)
def __getitem__(self,index):
"""
Can index Spectra to get the component Spectrum objects
"""
return self.speclist[index]
def smooth(self,smooth,**kwargs):
"""
Smooth the spectrum by factor "smooth". Options are defined in sm.smooth
"""
smooth = round(smooth)
self.data = sm.smooth_multispec(self.data,smooth,**kwargs)
self.xarr = self.xarr[::smooth]
if len(self.xarr) != len(self.data):
raise ValueError("Convolution resulted in different X and Y array lengths. Convmode should be 'same'.")
self.error = sm.smooth_multispec(self.error,smooth,**kwargs)
self.baseline.downsample(smooth)
self.specfit.downsample(smooth)
self._smooth_header(smooth)
class XCorrSpectrum(Spectrum):
""" extraordinarily thin spectrum; just a name right now """
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| mit | 035e915798c42706fdaf300b1f11d4e8 | 38.160463 | 155 | 0.575816 | 3.984934 | false | false | false | false |
pytest-dev/py | py/__init__.py | 16 | 6022 | """
pylib: rapid testing and development utils
this module uses apipkg.py for lazy-loading sub modules
and classes. The initpkg-dictionary below specifies
name->value mappings where value can be another namespace
dictionary or an import path.
(c) Holger Krekel and others, 2004-2014
"""
from py._error import error
try:
from py._vendored_packages import apipkg
lib_not_mangled_by_packagers = True
vendor_prefix = '._vendored_packages.'
except ImportError:
import apipkg
lib_not_mangled_by_packagers = False
vendor_prefix = ''
try:
from ._version import version as __version__
except ImportError:
# broken installation, we don't even try
__version__ = "unknown"
apipkg.initpkg(__name__, attr={'_apipkg': apipkg, 'error': error}, exportdefs={
# access to all standard lib modules
'std': '._std:std',
'_pydir' : '.__metainfo:pydir',
'version': 'py:__version__', # backward compatibility
# pytest-2.0 has a flat namespace, we use alias modules
# to keep old references compatible
'test' : 'pytest',
# hook into the top-level standard library
'process' : {
'__doc__' : '._process:__doc__',
'cmdexec' : '._process.cmdexec:cmdexec',
'kill' : '._process.killproc:kill',
'ForkedFunc' : '._process.forkedfunc:ForkedFunc',
},
'apipkg' : {
'initpkg' : vendor_prefix + 'apipkg:initpkg',
'ApiModule' : vendor_prefix + 'apipkg:ApiModule',
},
'iniconfig' : {
'IniConfig' : vendor_prefix + 'iniconfig:IniConfig',
'ParseError' : vendor_prefix + 'iniconfig:ParseError',
},
'path' : {
'__doc__' : '._path:__doc__',
'svnwc' : '._path.svnwc:SvnWCCommandPath',
'svnurl' : '._path.svnurl:SvnCommandPath',
'local' : '._path.local:LocalPath',
'SvnAuth' : '._path.svnwc:SvnAuth',
},
# python inspection/code-generation API
'code' : {
'__doc__' : '._code:__doc__',
'compile' : '._code.source:compile_',
'Source' : '._code.source:Source',
'Code' : '._code.code:Code',
'Frame' : '._code.code:Frame',
'ExceptionInfo' : '._code.code:ExceptionInfo',
'Traceback' : '._code.code:Traceback',
'getfslineno' : '._code.source:getfslineno',
'getrawcode' : '._code.code:getrawcode',
'patch_builtins' : '._code.code:patch_builtins',
'unpatch_builtins' : '._code.code:unpatch_builtins',
'_AssertionError' : '._code.assertion:AssertionError',
'_reinterpret_old' : '._code.assertion:reinterpret_old',
'_reinterpret' : '._code.assertion:reinterpret',
'_reprcompare' : '._code.assertion:_reprcompare',
'_format_explanation' : '._code.assertion:_format_explanation',
},
# backports and additions of builtins
'builtin' : {
'__doc__' : '._builtin:__doc__',
'enumerate' : '._builtin:enumerate',
'reversed' : '._builtin:reversed',
'sorted' : '._builtin:sorted',
'any' : '._builtin:any',
'all' : '._builtin:all',
'set' : '._builtin:set',
'frozenset' : '._builtin:frozenset',
'BaseException' : '._builtin:BaseException',
'GeneratorExit' : '._builtin:GeneratorExit',
'_sysex' : '._builtin:_sysex',
'print_' : '._builtin:print_',
'_reraise' : '._builtin:_reraise',
'_tryimport' : '._builtin:_tryimport',
'exec_' : '._builtin:exec_',
'_basestring' : '._builtin:_basestring',
'_totext' : '._builtin:_totext',
'_isbytes' : '._builtin:_isbytes',
'_istext' : '._builtin:_istext',
'_getimself' : '._builtin:_getimself',
'_getfuncdict' : '._builtin:_getfuncdict',
'_getcode' : '._builtin:_getcode',
'builtins' : '._builtin:builtins',
'execfile' : '._builtin:execfile',
'callable' : '._builtin:callable',
'bytes' : '._builtin:bytes',
'text' : '._builtin:text',
},
# input-output helping
'io' : {
'__doc__' : '._io:__doc__',
'dupfile' : '._io.capture:dupfile',
'TextIO' : '._io.capture:TextIO',
'BytesIO' : '._io.capture:BytesIO',
'FDCapture' : '._io.capture:FDCapture',
'StdCapture' : '._io.capture:StdCapture',
'StdCaptureFD' : '._io.capture:StdCaptureFD',
'TerminalWriter' : '._io.terminalwriter:TerminalWriter',
'ansi_print' : '._io.terminalwriter:ansi_print',
'get_terminal_width' : '._io.terminalwriter:get_terminal_width',
'saferepr' : '._io.saferepr:saferepr',
},
# small and mean xml/html generation
'xml' : {
'__doc__' : '._xmlgen:__doc__',
'html' : '._xmlgen:html',
'Tag' : '._xmlgen:Tag',
'raw' : '._xmlgen:raw',
'Namespace' : '._xmlgen:Namespace',
'escape' : '._xmlgen:escape',
},
'log' : {
# logging API ('producers' and 'consumers' connected via keywords)
'__doc__' : '._log:__doc__',
'_apiwarn' : '._log.warning:_apiwarn',
'Producer' : '._log.log:Producer',
'setconsumer' : '._log.log:setconsumer',
'_setstate' : '._log.log:setstate',
'_getstate' : '._log.log:getstate',
'Path' : '._log.log:Path',
'STDOUT' : '._log.log:STDOUT',
'STDERR' : '._log.log:STDERR',
'Syslog' : '._log.log:Syslog',
},
})
| mit | 8e88bd8c59e740b489c871908ac7d93b | 37.602564 | 79 | 0.497343 | 3.586659 | false | false | false | false |
pyspeckit/pyspeckit | pyspeckit/spectrum/__init__.py | 8 | 2600 | """
"""
from .classes import Spectrum,Spectra,ObsBlock
from .OpticalSpectrum import OpticalSpectrum
from . import fitters,plotters,baseline,units
from . import smooth
from . import correlate
from . import headers
from . import moments
from . import units
from . import utils
from . import readers
from . import writers
from . import logger
from .. import config
def register_reader(filetype, function, suffix, default=False):
'''
Register a reader function.
Parameters
----------
filetype: str
The file type name
function: function
The reader function. Should take a filename as input and return an
X-axis object (see units.py), a spectrum, an error spectrum (initialize
it to 0's if empty), and a pyfits header instance
suffix: int
What suffix should the file have?
'''
readers.readers[filetype] = function
if suffix in readers.suffix_types:
if default:
readers.suffix_types[suffix].insert(0,filetype)
else:
readers.suffix_types[suffix].append(filetype)
else: # if it's the first, it defaults to default!
readers.suffix_types[suffix] = [filetype]
register_reader('fits',readers.open_1d_fits,'fits',default=True)
register_reader('fits',readers.open_1d_fits,'fit')
register_reader('sdss',readers.read_sdss,'fits')
register_reader('pyfits',readers.open_1d_pyfits,'')
register_reader('txt',readers.open_1d_txt,'txt')
register_reader('txt',readers.open_1d_txt,'dat')
register_reader('tspec',readers.tspec_reader,'fits')
register_reader('hdf5',readers.open_hdf5,'hdf5')
def register_writer(filetype, function, suffix, default=False):
'''
Register a writer function.
Parameters
----------
filetype: string
The file type name
function: function
The writer function. Will be an attribute of Spectrum
object, and called as spectrum.Spectrum.write_hdf5(),
for example.
suffix: int
What suffix should the file have?
'''
writers.writers[filetype] = function
if suffix in writers.suffix_types:
if default:
writers.suffix_types[suffix].insert(0,filetype)
else:
writers.suffix_types[suffix].append(filetype)
else: # if it's the first, it defaults to default!
writers.suffix_types[suffix] = [filetype]
register_writer('hdf5',writers.write_hdf5,'hdf5',default=False)
register_writer('fits',writers.write_fits,'fits',default=True)
#register_writer('txt',writers.write_ascii,'txt')
#register_writer('txt',writers.write_ascii,'txt')
| mit | 2f85c4ce537610ebff0f40274c73acb1 | 30.707317 | 79 | 0.681154 | 3.79562 | false | false | false | false |
pyspeckit/pyspeckit | pyspeckit/parallel_map/parallel_map.py | 8 | 5273 | """
Parellel Map snippet by Brian Refsdal
http://www.astropython.org/snippet/2010/3/Parallel-map-using-multiprocessing
"""
from __future__ import print_function
import numpy
import warnings
from astropy import log
_multi=False
_ncpus=1
try:
# May raise ImportError
import multiprocessing
_multi=True
# May raise NotImplementedError
_ncpus = multiprocessing.cpu_count()
except Exception as ex:
pmap_exception = ex
_multi=False
__all__ = ('parallel_map',)
def worker(f, ii, chunk, out_q, err_q, lock):
"""
A worker function that maps an input function over a
slice of the input iterable.
:param f : callable function that accepts argument from iterable
:param ii : process ID
:param chunk: slice of input iterable
:param out_q: thread-safe output queue
:param err_q: thread-safe queue to populate on exception
:param lock : thread-safe lock to protect a resource
( useful in extending parallel_map() )
"""
vals = []
# iterate over slice
for val in chunk:
try:
result = f(val)
except Exception as e:
err_q.put(e)
return
vals.append(result)
# output the result and task ID to output queue
out_q.put( (ii, vals) )
def run_tasks(procs, err_q, out_q, num):
"""
A function that executes populated processes and processes
the resultant array. Checks error queue for any exceptions.
:param procs: list of Process objects
:param out_q: thread-safe output queue
:param err_q: thread-safe queue to populate on exception
:param num : length of resultant array
"""
# function to terminate processes that are still running.
die = (lambda vals : [val.terminate() for val in vals
if val.exitcode is None])
try:
for proc in procs:
proc.start()
for proc in procs:
proc.join()
except Exception as e:
# kill all slave processes on ctrl-C
die(procs)
raise e
if not err_q.empty():
# kill all on any exception from any one slave
die(procs)
raise err_q.get()
# Processes finish in arbitrary order. Process IDs double
# as index in the resultant array.
results=[None]*num;
while not out_q.empty():
idx, result = out_q.get()
results[idx] = result
try:
# Remove extra dimension added by array_split
return list(numpy.concatenate(results))
except ValueError:
return list(results)
def parallel_map(function, sequence, numcores=None):
"""
A parallelized version of the native Python map function that
utilizes the Python multiprocessing module to divide and
conquer sequence.
parallel_map does not yet support multiple argument sequences.
:param function: callable function that accepts argument from iterable
:param sequence: iterable sequence
:param numcores: number of cores to use
"""
if not callable(function):
raise TypeError("input function '%s' is not callable" %
repr(function))
if not numpy.iterable(sequence):
raise TypeError("input '%s' is not iterable" %
repr(sequence))
size = len(sequence)
if not _multi or size == 1 or numcores == 1:
return map(function, sequence)
if numcores is not None and numcores > _ncpus:
warnings.warn("Number of requested cores is greated than the "
"number of available CPUs.")
elif numcores is None:
numcores = _ncpus
# Returns a started SyncManager object which can be used for sharing
# objects between processes. The returned manager object corresponds
# to a spawned child process and has methods which will create shared
# objects and return corresponding proxies.
manager = multiprocessing.Manager()
# Create FIFO queue and lock shared objects and return proxies to them.
# The managers handles a server process that manages shared objects that
# each slave process has access to. Bottom line -- thread-safe.
out_q = manager.Queue()
err_q = manager.Queue()
lock = manager.Lock()
# if sequence is less than numcores, only use len sequence number of
# processes
if size < numcores:
log.info("Reduced number of cores to {0}".format(size))
numcores = size
# group sequence into numcores-worth of chunks
sequence = numpy.array_split(sequence, numcores)
procs = [multiprocessing.Process(target=worker,
args=(function, ii, chunk, out_q, err_q, lock))
for ii, chunk in enumerate(sequence)]
return run_tasks(procs, err_q, out_q, numcores)
if __name__ == "__main__":
"""
Unit test of parallel_map()
Create an arbitrary length list of references to a single
matrix containing random floats and compute the eigenvals
in serial and parallel. Compare the results and timings.
"""
import time
numtasks = 5
#size = (1024,1024)
size = (512,512)
vals = numpy.random.rand(*size)
f = numpy.linalg.eigvals
iterable = [vals]*numtasks
print('Running numpy.linalg.eigvals %iX on matrix size [%i,%i]' %
(numtasks,size[0],size[1]))
tt = time.time()
presult = parallel_map(f, iterable)
print('parallel map in %g secs' % (time.time()-tt))
tt = time.time()
result = map(f, iterable)
print('serial map in %g secs' % (time.time()-tt))
assert (numpy.asarray(result) == numpy.asarray(presult)).all()
| mit | 73dcb2b05c9e0f10a503f1978f048472 | 26.180412 | 76 | 0.683103 | 3.891513 | false | false | false | false |
pyspeckit/pyspeckit | pyspeckit/spectrum/velocity_frames.py | 7 | 5736 | from __future__ import print_function
import numpy as np
import units
def header_to_vlsr(header,**kwargs):
"""
Attempt to determine the vlsr offset using default header keywords
"""
if 'RA' in header:
ra = sexagesimal_to_decimal(header['RA'],ra=True)
dec = sexagesimal_to_decimal(header['DEC'],ra=False)
elif 'CRVAL1' in header and 'RA' in header['CTYPE1']:
ra = header['CRVAL1']
dec = header['CRVAL2']
latitude = header['LATITUDE']
if 'UT1' in header:
datestr = header['UT1']
elif 'DATE-OBS' in header:
datestr = header['DATE-OBS']
else:
print("Not sure what date keywords exist...")
jd = date_to_JD(datestr)
if 'EQUINOX' in header:
epoch = header['EQUINOX']
elif 'EPOCH' in header:
epoch = header['EPOCH']
else:
epoch = 2000.
lst = sexagesimal_to_decimal(header['LST'])
print(ra,dec,latitude,lst,jd,epoch)
return topo_to_lsr(ra,dec,latitude,lst,jd,epoch)
def date_to_JD(datestr, **kwargs):
import pyslalib
import dateutil
date = dateutil.parser.parse(datestr, **kwargs)
day = date.day + (date.hour/24.) + (date.minute/60.)/24. + (date.second/3600.)/24.
JD = pyslalib.slalib.sla_cldj(date.year,date.month,day)
return JD
def sexagesimal_to_decimal(string, delimiter=":", ra=False):
hh,mm,ss = [float(x) for x in string.split(delimiter)]
decim = hh+mm/60.+ss/3600.
if ra:
decim *= 15.
return decim
def topo_to_geo(ra,dec,latitude,lst,height=None):
"""
:ra: Decimal right ascension (degrees)
:dec: Decimal declination (degrees)
:latitude: Observatory latitude in degrees
:lst: Local sidereal time (hours)
.. warning: this option was implemented but returns inconsistent results
and has therefore been disabled
:height: (optional) Height above the reference spheroid in meters
(if height is specified, a more precise version of the code is used)
"""
import pyslalib
ra_rad = ra*np.pi/180.
dec_rad = dec*np.pi/180.
latitude_rad = latitude*np.pi/180.
lst_rad = lst*15.*np.pi/180.
if height is None:
return pyslalib.slalib.sla_rverot(ra_rad,dec_rad,latitude_rad,lst_rad)
else:
return pyslalib.slalib.sla_rverot(ra_rad,dec_rad,latitude_rad,lst_rad)
vearth_6d = pyslalib.slalib.sla_pvobs(latitude_rad, height, lst_rad)
vradec_3d = pyslalib.slalib.sla_dcs2c(ra_rad, dec_rad)
projected_vearth = -1*pyslalib.slalib.sla_dvdv(vearth_6d[3:],vradec_3d)
return projected_vearth * units.astronomical_unit_cm*units.length_dict['cm']/units.length_dict['km']
def geo_to_bary(ra,dec,jd,epoch):
"""
For a given ra/dec, return the conversion from geocentric velocity to heliocentric
:ra: Decimal right ascension (degrees)
:dec: Decimal declination (degrees)
:jd: Modified (2000 = 51544) julian date
:epoch: Epoch of observations (e.g., 2000, 2008.202)
follows instructions given here:
http://star-www.rl.ac.uk/docs/sun67.htx/node230.html
* Star vector, J2000
CALL sla_DCS2C(RM,DM,V)
* Earth/Sun velocity and position, J2000
CALL sla_EVP(TDB,2000D0,DVB,DPB,DVH,DPH)
* Radial velocity correction due to Earth orbit (km/s)
VCORB = -sla_DVDV(V,DVH)*149.597870D6
"""
import pyslalib
velocity_3d = pyslalib.slalib.sla_dcs2c(ra*np.pi/180.,dec*np.pi/180.)
dvb, dpb, dvh, dph = pyslalib.slalib.sla_evp(jd,epoch)
vcorb = -pyslalib.slalib.sla_dvdv(velocity_3d,dvb)*149.597870e6
return vcorb
def geo_to_helio(ra,dec,jd,epoch):
"""
For a given ra/dec, return the conversion from geocentric velocity to heliocentric
:ra: Decimal right ascension (degrees)
:dec: Decimal declination (degrees)
:jd: Modified (2000 = 51544) julian date
:epoch: Epoch of observations (e.g., 2000, 2008.202)
follows instructions given here:
http://star-www.rl.ac.uk/docs/sun67.htx/node230.html
* Star vector, J2000
CALL sla_DCS2C(RM,DM,V)
* Earth/Sun velocity and position, J2000
CALL sla_EVP(TDB,2000D0,DVB,DPB,DVH,DPH)
* Radial velocity correction due to Earth orbit (km/s)
VCORB = -sla_DVDV(V,DVH)*149.597870D6
"""
import pyslalib
velocity_3d = pyslalib.slalib.sla_dcs2c(ra*np.pi/180.,dec*np.pi/180.)
dvb, dpb, dvh, dph = pyslalib.slalib.sla_evp(jd,epoch)
vcorb = -pyslalib.slalib.sla_dvdv(velocity_3d,dvh)*149.597870e6
return vcorb
def helio_to_lsr(ra, dec):
"""
"""
import pyslalib
return pyslalib.slalib.sla_rvlsrk(ra/180.*np.pi, dec/180.*np.pi)
def topo_to_lsr(ra, dec, latitude, lst, jd, epoch, height=None):
print("helio->lsr: ",helio_to_lsr(ra,dec))
print("geo->helio: ",geo_to_helio(ra,dec,jd,epoch))
print("topo->geo: ",topo_to_geo(ra,dec,latitude,lst,height=height))
return helio_to_lsr(ra,dec) + geo_to_helio(ra,dec,jd,epoch) + topo_to_geo(ra,dec,latitude,lst,height=height)
def frame_grid(ra=0.0,dec=0.0,latitude=0.0,jd=51544,lst=0,vtopo=0.0,epoch=2000,):
frame_names = ['TOPO','GEO','BARY','HELI','LSRK']
frame1_to_frame2 = dict([(n1,None) for n1 in frame_names])
for frame1 in frame_names:
frame1_to_frame2[frame1] = dict([(n2,None) for n2 in frame_names])
for frame2 in frame_names:
if frame1 == frame2:
frame1_to_frame2[frame1][frame2] = 0.0
elif frame1 == 'TOPO' and frame2 == 'GEO':
frame1_to_frame2[frame1][frame2] = topo_to_geo(ra,dec,latitude,lst)
if __name__ == "__main__":
import pytest
@pytest.mark.parametrize
def test_vlsr(ra, dec, utyr, utmon, utday, epoch, latitude, elevation):
pass
| mit | b89121816f58954752de4e70dba6d48c | 33.142857 | 112 | 0.645572 | 2.827008 | false | false | false | false |
pyspeckit/pyspeckit | pyspeckit/spectrum/smooth.py | 7 | 3104 | from __future__ import print_function
import numpy as np
from . import interpolation
def smoothed(spectrum, **kwargs):
sp = spectrum.copy()
sp.smooth(**kwargs)
return sp
def smooth(data, smooth, smoothtype='gaussian', downsample=True,
downsample_factor=None, convmode='same'):
"""
Smooth and downsample the data array. NaN data points will be replaced
with interpolated values
Parameters
----------
smooth : float
Number of pixels to smooth by
smoothtype : [ 'gaussian','hanning', or 'boxcar' ]
type of smoothing kernel to use
downsample : bool
Downsample the data?
downsample_factor : int
Downsample by the smoothing factor, or something else?
convmode : [ 'full','valid','same' ]
see :mod:`numpy.convolve`. 'same' returns an array of the same length as
'data' (assuming data is larger than the kernel)
"""
roundsmooth = int(round(smooth)) # can only downsample by integers
if downsample_factor is None and downsample:
downsample_factor = roundsmooth
elif downsample_factor is None:
downsample_factor = 1
if smooth > len(data) or downsample_factor > len(data):
raise ValueError("Error: trying to smooth by more than the spectral length.")
if smoothtype == 'hanning':
kernel = np.hanning(2+roundsmooth)/np.hanning(2+roundsmooth).sum()
elif smoothtype == 'gaussian':
xkern = np.linspace(-5*smooth,5*smooth,smooth*11)
kernel = np.exp(-xkern**2/(2*(smooth/np.sqrt(8*np.log(2)))**2))
kernel /= kernel.sum()
if len(kernel) > len(data):
lengthdiff = len(kernel)-len(data)
if lengthdiff % 2 == 0: # make kernel same size as data
kernel = kernel[lengthdiff/2:-lengthdiff/2]
else: # make kernel 1 pixel smaller than data but still symmetric
kernel = kernel[lengthdiff/2+1:-lengthdiff/2-1]
elif smoothtype == 'boxcar':
kernel = np.ones(roundsmooth)/float(roundsmooth)
# deal with NANs or masked values
if hasattr(data,'mask'):
if type(data.mask) is np.ndarray:
OK = ~data.mask
if OK.sum() > 0:
data = interpolation._interp(np.arange(len(data)),np.arange(len(data))[OK],data[OK])
else:
data = OK
if np.any(~np.isfinite(data)):
OK = np.isfinite(data)
if OK.sum() > 0:
data = interpolation._interp(np.arange(len(data)),np.arange(len(data))[OK],data[OK])
else:
data = OK
if np.any(~np.isfinite(data)):
raise ValueError("NANs in data array after they have been forcibly removed.")
smdata = np.convolve(data,kernel,convmode)[::downsample_factor]
return smdata
def smooth_multispec(data,smoothfactor,**kwargs):
"""
Smooth multiple spectra as from an ObsBlock (shape should be [speclen, nspec])
"""
newdata = np.array([smooth(D,smoothfactor,**kwargs)
for D in data.swapaxes(0,1)]).swapaxes(0,1)
return newdata
| mit | 217acc5e4b120131cebbdfca5484af9a | 35.093023 | 100 | 0.615013 | 3.704057 | false | false | false | false |
pyspeckit/pyspeckit | pyspeckit/spectrum/headers.py | 8 | 1312 | from __future__ import print_function
try:
import astropy.io.fits as pyfits
except ImportError:
import pyfits
def intersection(header1, header2, if_conflict=None):
"""
Return a pyfits Header containing the intersection of two pyfits Headers
*if_conflict* [ '1'/1/'Header1' | '2'/2/'Header2' | None ]
Defines behavior if a keyword conflict is found. Default is to remove the key
"""
newheader = pyfits.Header()
for key,value in header1.items():
if key in header2:
try:
if value == header2[key]:
newheader[key] = value
elif if_conflict in ('1',1,'Header1'):
newheader[key] = value
elif if_conflict in ('2',2,'Header2'):
newheader[key] = Header2[key]
except KeyError:
""" Assume pyfits doesn't want you to have that keyword
(because it shouldn't be possible to get here otherwise) """
pass
else:
try:
newheader[key] = value
except KeyError:
""" Assume pyfits doesn't want you to have that keyword
(because it shouldn't be possible to get here otherwise) """
pass
return newheader
| mit | f76b452a7055a4448136485d4bc13157 | 32.641026 | 86 | 0.551067 | 4.555556 | false | false | false | false |
softlayer/softlayer-python | SoftLayer/managers/autoscale.py | 2 | 5854 | """
SoftLayer.autoscale
~~~~~~~~~~~~~~~~~~~
Autoscale manager
:license: MIT, see LICENSE for more details.
"""
class AutoScaleManager(object):
"""Manager for interacting with Autoscale instances."""
def __init__(self, client):
self.client = client
def list(self, mask=None):
"""Calls `SoftLayer_Account::getScaleGroups()`_
:param mask: optional SoftLayer_Scale_Group objectMask
.. _SoftLayer_Account::getScaleGroups():
https://sldn.softlayer.com/reference/services/SoftLayer_Account/getScaleGroups/
"""
if not mask:
mask = "mask[status,virtualGuestMemberCount]"
return self.client.call('SoftLayer_Account', 'getScaleGroups', mask=mask, iter=True)
def details(self, identifier, mask=None):
"""Calls `SoftLayer_Scale_Group::getObject()`_
:param identifier: SoftLayer_Scale_Group id
:param mask: optional SoftLayer_Scale_Group objectMask
.. _SoftLayer_Scale_Group::getObject():
https://sldn.softlayer.com/reference/services/SoftLayer_Scale_Group/getObject/
"""
if not mask:
mask = """mask[virtualGuestMembers[id,virtualGuest[id,hostname,domain,provisionDate]], terminationPolicy,
virtualGuestMemberCount, virtualGuestMemberTemplate[sshKeys],
policies[id,name,createDate,cooldown,actions,triggers,scaleActions],
networkVlans[networkVlanId,networkVlan[networkSpace,primaryRouter[hostname]]],
loadBalancers, regionalGroup[locations]]"""
return self.client.call('SoftLayer_Scale_Group', 'getObject', id=identifier, mask=mask)
def get_policy(self, identifier, mask=None):
"""Calls `SoftLayer_Scale_Policy::getObject()`_
:param identifier: SoftLayer_Scale_Policy id
:param mask: optional SoftLayer_Scale_Policy objectMask
.. _SoftLayer_Scale_Policy::getObject():
https://sldn.softlayer.com/reference/services/SoftLayer_Scale_Policy/getObject/
"""
if not mask:
mask = """mask[cooldown, createDate, id, name, actions, triggers[type]
]"""
return self.client.call('SoftLayer_Scale_Policy', 'getObject', id=identifier, mask=mask)
def scale(self, identifier, amount):
"""Calls `SoftLayer_Scale_Group::scale()`_
:param identifier: SoftLayer_Scale_Group Id
:param amount: positive or negative number to scale the group by
.. _SoftLayer_Scale_Group::scale():
https://sldn.softlayer.com/reference/services/SoftLayer_Scale_Group/scale/
"""
return self.client.call('SoftLayer_Scale_Group', 'scale', amount, id=identifier)
def scale_to(self, identifier, amount):
"""Calls `SoftLayer_Scale_Group::scaleTo()`_
:param identifier: SoftLayer_Scale_Group Id
:param amount: number to scale the group to.
.. _SoftLayer_Scale_Group::scaleTo():
https://sldn.softlayer.com/reference/services/SoftLayer_Scale_Group/scaleTo/
"""
return self.client.call('SoftLayer_Scale_Group', 'scaleTo', amount, id=identifier)
def get_logs(self, identifier, mask=None, object_filter=None):
"""Calls `SoftLayer_Scale_Group::getLogs()`_
:param identifier: SoftLayer_Scale_Group Id
:param mask: optional SoftLayer_Scale_Group_Log objectMask
:param object_filter: optional SoftLayer_Scale_Group_Log objectFilter
.. _SoftLayer_Scale_Group::getLogs():
https://sldn.softlayer.com/reference/services/SoftLayer_Scale_Group/getLogs/
"""
return self.client.call('SoftLayer_Scale_Group', 'getLogs', id=identifier, mask=mask, filter=object_filter,
iter=True)
def get_virtual_guests(self, identifier, mask=None):
"""Calls `SoftLayer_Scale_Group::getVirtualGuestMembers()`_
:param identifier: SoftLayer_Scale_Group Id
:param mask: optional SoftLayer_Scale_Member objectMask
.. _SoftLayer_Scale_Group::getVirtualGuestMembers():
https://sldn.softlayer.com/reference/services/SoftLayer_Scale_Group/getVirtualGuestMembers/
"""
return self.client.call('SoftLayer_Scale_Group', 'getVirtualGuestMembers', id=identifier, mask=mask, iter=True)
def edit(self, identifier, template):
"""Calls `SoftLayer_Scale_Group::editObject()`_
:param identifier: SoftLayer_Scale_Group id
:param template: `SoftLayer_Scale_Group`_
.. _SoftLayer_Scale_Group::editObject():
https://sldn.softlayer.com/reference/services/SoftLayer_Scale_Group/editObject/
.. _SoftLayer_Scale_Group: https://sldn.softlayer.com/reference/datatypes/SoftLayer_Scale_Group/
"""
return self.client.call('SoftLayer_Scale_Group', 'editObject', template, id=identifier)
def create(self, template):
"""Calls `SoftLayer_Scale_Group::createObject()`_
:param template: `SoftLayer_Scale_Group`_
.. _SoftLayer_Scale_Group::createObject():
https://sldn.softlayer.com/reference/services/SoftLayer_Scale_Group/createObject/
.. _SoftLayer_Scale_Group: https://sldn.softlayer.com/reference/datatypes/SoftLayer_Scale_Group/
"""
return self.client.call('SoftLayer_Scale_Group', 'createObject', template)
def delete(self, identifier):
"""Calls `SoftLayer_Scale_Group::forceDeleteObject()`_
:param identifier: SoftLayer_Scale_Group id
.. _SoftLayer_Scale_Group::forceDeleteObject():
https://sldn.softlayer.com/reference/services/SoftLayer_Scale_Group/forceDeleteObject/
"""
return self.client.call('SoftLayer_Scale_Group', 'forceDeleteObject', id=identifier)
| mit | add43b486cbccd48e0fde1d26a39341d | 41.115108 | 119 | 0.652545 | 3.985024 | false | false | false | false |
softlayer/softlayer-python | SoftLayer/CLI/loadbal/health.py | 2 | 3182 | """Manage LBaaS health checks."""
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import exceptions
from SoftLayer.exceptions import SoftLayerAPIError
from SoftLayer import utils
@click.command(cls=SoftLayer.CLI.command.SLCommand, )
@click.argument('identifier')
@click.option('--uuid', required=True, help="Health check UUID to modify.")
@click.option('--interval', '-i', type=click.IntRange(2, 60), help="Seconds between checks. [2-60]")
@click.option('--retry', '-r', type=click.IntRange(1, 10), help="Number of times before marking as DOWN. [1-10]")
@click.option('--timeout', '-t', type=click.IntRange(1, 59), help="Seconds to wait for a connection. [1-59]")
@click.option('--url', '-u', help="Url path for HTTP/HTTPS checks.")
@environment.pass_env
def cli(env, identifier, uuid, interval, retry, timeout, url):
"""Manage LBaaS health checks."""
if not any([interval, retry, timeout, url]):
raise exceptions.ArgumentError("Specify either interval, retry, timeout, url")
# map parameters to expected API names
template = {'healthMonitorUuid': uuid, 'interval': interval,
'maxRetries': retry, 'timeout': timeout, 'urlPath': url}
# Removes those empty values
clean_template = {k: v for k, v in template.items() if v is not None}
mgr = SoftLayer.LoadBalancerManager(env.client)
# Need to get the LBaaS uuid if it wasn't supplied
lb_uuid, lb_id = mgr.get_lbaas_uuid_id(identifier)
print("UUID: {}, ID: {}".format(lb_uuid, lb_id))
# Get the current health checks, and find the one we are updating.
mask = "mask[healthMonitors, listeners[uuid,defaultPool[healthMonitor]]]"
lbaas = mgr.get_lb(lb_id, mask=mask)
check = {}
# Set the default values, because these all need to be set if we are not updating them.
for listener in lbaas.get('listeners', []):
if utils.lookup(listener, 'defaultPool', 'healthMonitor', 'uuid') == uuid:
check['backendProtocol'] = utils.lookup(listener, 'defaultPool', 'protocol')
check['backendPort'] = utils.lookup(listener, 'defaultPool', 'protocolPort')
check['healthMonitorUuid'] = uuid
check['interval'] = utils.lookup(listener, 'defaultPool', 'healthMonitor', 'interval')
check['maxRetries'] = utils.lookup(listener, 'defaultPool', 'healthMonitor', 'maxRetries')
check['timeout'] = utils.lookup(listener, 'defaultPool', 'healthMonitor', 'timeout')
check['urlPath'] = utils.lookup(listener, 'defaultPool', 'healthMonitor', 'urlPath')
if url and check['backendProtocol'] == 'TCP':
raise exceptions.ArgumentError('--url cannot be used with TCP checks')
# Update existing check with supplied values
for key in clean_template.keys():
check[key] = clean_template[key]
try:
mgr.update_lb_health_monitors(lb_uuid, [check])
click.secho('Health Check {} updated successfully'.format(uuid), fg='green')
except SoftLayerAPIError as exception:
click.secho('Failed to update {}'.format(uuid), fg='red')
click.secho("ERROR: {}".format(exception.faultString), fg='red')
| mit | e774e706cf78c0932eab511272cccb20 | 48.71875 | 113 | 0.675361 | 3.79261 | false | false | false | false |
softlayer/softlayer-python | SoftLayer/shell/cmd_help.py | 3 | 1103 | """Print help text."""
# :license: MIT, see LICENSE for more details.
import click
from click import formatting
from SoftLayer.CLI import core as cli_core
from SoftLayer.CLI import environment
from SoftLayer.shell import routes
@click.command()
@environment.pass_env
@click.pass_context
def cli(ctx, env):
"""Print shell help text."""
env.out("Welcome to the SoftLayer shell.")
env.out("")
formatter = formatting.HelpFormatter()
commands = []
shell_commands = []
for name in cli_core.cli.list_commands(ctx):
command = cli_core.cli.get_command(ctx, name)
if command.short_help is None:
command.short_help = command.help
details = (name, command.short_help)
if name in dict(routes.ALL_ROUTES):
shell_commands.append(details)
else:
commands.append(details)
with formatter.section('Shell Commands'):
formatter.write_dl(shell_commands)
with formatter.section('Commands'):
formatter.write_dl(commands)
for line in formatter.buffer:
env.out(line, newline=False)
| mit | 73f9bf4702a9661bb5a19e0dfc456048 | 26.575 | 53 | 0.663645 | 3.925267 | false | false | false | false |
softlayer/softlayer-python | SoftLayer/managers/block.py | 2 | 8588 | """
SoftLayer.block
~~~~~~~~~~~~~~~
Block Storage Manager
:license: MIT, see LICENSE for more details.
"""
from SoftLayer.managers.storage import StorageManager
from SoftLayer.managers import storage_utils
from SoftLayer import utils
# pylint: disable=too-many-public-methods
class BlockStorageManager(StorageManager):
"""Manages SoftLayer Block Storage volumes.
See product information here: https://www.ibm.com/cloud/block-storage
"""
def list_block_volume_limit(self):
"""Returns a list of block volume count limit.
:return: Returns a list of block volume count limit.
"""
return self.get_volume_count_limits()
def list_block_volumes(self, datacenter=None, username=None, storage_type=None, order=None, **kwargs):
"""Returns a list of block volumes.
:param datacenter: Datacenter short name (e.g.: dal09)
:param username: Name of volume.
:param storage_type: Type of volume: Endurance or Performance
:param order: Volume order id.
:param kwargs:
:return: Returns a list of block volumes.
"""
if 'mask' not in kwargs:
items = [
'id',
'username',
'lunId',
'capacityGb',
'bytesUsed',
'serviceResource.datacenter[name]',
'serviceResourceBackendIpAddress',
'activeTransactionCount',
'replicationPartnerCount'
]
kwargs['mask'] = ','.join(items)
_filter = utils.NestedDict(kwargs.get('filter') or {})
_filter['iscsiNetworkStorage']['serviceResource']['type']['type'] = \
(utils.query_filter('!~ ISCSI'))
_filter['iscsiNetworkStorage']['storageType']['keyName'] = (
utils.query_filter('*BLOCK_STORAGE*'))
if storage_type:
_filter['iscsiNetworkStorage']['storageType']['keyName'] = (
utils.query_filter('%s_BLOCK_STORAGE*' % storage_type.upper()))
if datacenter:
_filter['iscsiNetworkStorage']['serviceResource']['datacenter'][
'name'] = (utils.query_filter(datacenter))
if username:
_filter['iscsiNetworkStorage']['username'] = \
(utils.query_filter(username))
if order:
_filter['iscsiNetworkStorage']['billingItem']['orderItem'][
'order']['id'] = (utils.query_filter(order))
kwargs['filter'] = _filter.to_dict()
return self.client.call('Account', 'getIscsiNetworkStorage', iter=True, **kwargs)
def get_block_volume_details(self, volume_id, **kwargs):
"""Returns details about the specified volume.
:param volume_id: ID of volume.
:param kwargs:
:return: Returns details about the specified volume.
"""
return self.get_volume_details(volume_id, **kwargs)
def get_block_volume_access_list(self, volume_id, **kwargs):
"""Returns a list of authorized hosts for a specified volume.
:param volume_id: ID of volume.
:param kwargs:
:return: Returns a list of authorized hosts for a specified volume.
"""
return self.get_volume_access_list(volume_id, **kwargs)
def get_block_volume_snapshot_list(self, volume_id, **kwargs):
"""Returns a list of snapshots for the specified volume.
:param volume_id: ID of volume.
:param kwargs:
:return: Returns a list of snapshots for the specified volume.
"""
return self.get_volume_snapshot_list(volume_id, **kwargs)
def assign_subnets_to_acl(self, access_id, subnet_ids):
"""Assigns subnet records to ACL for the access host.
access_id is the host_id obtained by: slcli block access-list <volume_id>
:param integer access_id: id of the access host
:param list subnet_ids: The ids of the subnets to be assigned
:return: Returns int array of assigned subnet ids
"""
return self.client.call('Network_Storage_Allowed_Host', 'assignSubnetsToAcl', subnet_ids, id=access_id)
def remove_subnets_from_acl(self, access_id, subnet_ids):
"""Removes subnet records from ACL for the access host.
access_id is the host_id obtained by: slcli block access-list <volume_id>
:param integer access_id: id of the access host
:param list subnet_ids: The ids of the subnets to be removed
:return: Returns int array of removed subnet ids
"""
return self.client.call('Network_Storage_Allowed_Host', 'removeSubnetsFromAcl', subnet_ids, id=access_id)
def get_subnets_in_acl(self, access_id):
"""Returns a list of subnet records for the access host.
access_id is the host_id obtained by: slcli block access-list <volume_id>
:param integer access_id: id of the access host
:return: Returns an array of SoftLayer_Network_Subnet objects
"""
return self.client.call('Network_Storage_Allowed_Host', 'getSubnetsInAcl', id=access_id)
def order_block_volume(self, storage_type, location, size, os_type,
iops=None, tier_level=None, snapshot_size=None,
service_offering='storage_as_a_service',
hourly_billing_flag=False):
"""Places an order for a block volume.
:param storage_type: 'performance' or 'endurance'
:param location: Datacenter in which to order iSCSI volume
:param size: Size of the desired volume, in GB
:param os_type: OS Type to use for volume alignment, see help for list
:param iops: Number of IOPs for a "Performance" order
:param tier_level: Tier level to use for an "Endurance" order
:param snapshot_size: The size of optional snapshot space,
if snapshot space should also be ordered (None if not ordered)
:param service_offering: Requested offering package to use in the order
('storage_as_a_service', 'enterprise', or 'performance')
:param hourly_billing_flag: Billing type, monthly (False)
or hourly (True), default to monthly.
"""
order = storage_utils.prepare_volume_order_object(
self, storage_type, location, size, iops, tier_level,
snapshot_size, service_offering, 'block', hourly_billing_flag
)
order['osFormatType'] = {'keyName': os_type}
return self.client.call('Product_Order', 'placeOrder', order)
def cancel_block_volume(self, volume_id, reason='No longer needed', immediate=False):
"""Cancels the given block storage volume.
:param integer volume_id: The volume ID
:param string reason: The reason for cancellation
:param boolean immediate_flag: Cancel immediately or on anniversary date
"""
return self.cancel_volume(volume_id, reason, immediate)
def set_credential_password(self, access_id, password):
"""Sets the password for an access host
:param integer access_id: id of the access host
:param string password: password to set
"""
return self.client.call('Network_Storage_Allowed_Host', 'setCredentialPassword',
password, id=access_id)
def create_or_update_lun_id(self, volume_id, lun_id):
"""Set the LUN ID on a volume.
:param integer volume_id: The id of the volume
:param integer lun_id: LUN ID to set on the volume
:return: a SoftLayer_Network_Storage_Property object
"""
return self.client.call('Network_Storage', 'createOrUpdateLunId', lun_id, id=volume_id)
def _get_ids_from_username(self, username):
object_mask = "mask[id]"
results = self.list_block_volumes(username=username, mask=object_mask)
if results:
return [result['id'] for result in results]
return []
def get_cloud_list(self):
"""Returns a list cloud object storage.
return: Returns a list cloud object storage.
"""
mask = 'mask[id,username,billingItem,storageType, notes]'
return self.client.call('Account', 'getHubNetworkStorage', mask=mask)
def get_buckets(self, object_id):
"""Return buckets data of the cloud storage.
:param object_id cloud object storage identifier
Returns: Get buckets
"""
return self.client.call('SoftLayer_Network_Storage_Hub_Cleversafe_Account', 'getBuckets', id=object_id)
| mit | 2adab59fbacb25978e4a57f082dae5b8 | 38.214612 | 113 | 0.625058 | 4.185185 | false | false | false | false |
softlayer/softlayer-python | SoftLayer/CLI/file/snapshot/list.py | 2 | 1756 | """List file storage snapshots."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import columns as column_helper
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
from SoftLayer.CLI import helpers
COLUMNS = [
column_helper.Column('id', ('id',), mask='id'),
column_helper.Column('name', ('notes',), mask='notes'),
column_helper.Column('created', ('snapshotCreationTimestamp',),
mask='snapshotCreationTimestamp'),
column_helper.Column('size_bytes', ('snapshotSizeBytes',),
mask='snapshotSizeBytes'),
]
DEFAULT_COLUMNS = [
'id',
'name',
'created',
'size_bytes'
]
@click.command(cls=SoftLayer.CLI.command.SLCommand, )
@click.argument('volume_id')
@click.option('--sortby', help='Column to sort by',
default='created')
@click.option('--columns',
callback=column_helper.get_formatter(COLUMNS),
help='Columns to display. Options: {0}'.format(
', '.join(column.name for column in COLUMNS)),
default=','.join(DEFAULT_COLUMNS))
@environment.pass_env
def cli(env, volume_id, sortby, columns):
"""List file storage snapshots."""
file_manager = SoftLayer.FileStorageManager(env.client)
resolved_id = helpers.resolve_id(file_manager.resolve_ids, volume_id, 'Volume Id')
snapshots = file_manager.get_file_volume_snapshot_list(
resolved_id,
mask=columns.mask()
)
table = formatting.Table(columns.columns)
table.sortby = sortby
for snapshot in snapshots:
table.add_row([value or formatting.blank()
for value in columns.row(snapshot)])
env.fout(table)
| mit | dc1a6aaf6d99173af9bc441a33863af9 | 31.518519 | 86 | 0.644077 | 3.972851 | false | false | false | false |
softlayer/softlayer-python | SoftLayer/managers/storage_utils.py | 2 | 42407 | """
SoftLayer.storage_utils
~~~~~~~~~~~~~~~
Utility functions used by File and Block Storage Managers
:license: MIT, see LICENSE for more details.
"""
from SoftLayer import exceptions
from SoftLayer import utils
# pylint: disable=too-many-lines
ENDURANCE_TIERS = {
0.25: 100,
2: 200,
4: 300,
10: 1000,
}
def populate_host_templates(hardware_ids=None,
virtual_guest_ids=None,
ip_address_ids=None,
subnet_ids=None):
"""Returns a populated array with the IDs provided
:param hardware_ids: A List of SoftLayer_Hardware ids
:param virtual_guest_ids: A List of SoftLayer_Virtual_Guest ids
:param ip_address_ids: A List of SoftLayer_Network_Subnet_IpAddress ids
:param subnet_ids: A List of SoftLayer_Network_Subnet ids
:return: array of objects formatted for allowAccessFromHostList
"""
host_templates = []
if hardware_ids is not None:
for hardware_id in hardware_ids:
host_templates.append({
'objectType': 'SoftLayer_Hardware',
'id': hardware_id
})
if virtual_guest_ids is not None:
for virtual_guest_id in virtual_guest_ids:
host_templates.append({
'objectType': 'SoftLayer_Virtual_Guest',
'id': virtual_guest_id
})
if ip_address_ids is not None:
for ip_address_id in ip_address_ids:
host_templates.append({
'objectType': 'SoftLayer_Network_Subnet_IpAddress',
'id': ip_address_id
})
if subnet_ids is not None:
for subnet_id in subnet_ids:
host_templates.append({
'objectType': 'SoftLayer_Network_Subnet',
'id': subnet_id
})
return host_templates
def get_package(manager, category_code):
"""Returns a product package based on type of storage.
:param manager: The storage manager which calls this function.
:param category_code: Category code of product package.
:return: Returns a packaged based on type of storage.
"""
_filter = utils.NestedDict({})
_filter['categories']['categoryCode'] = (
utils.query_filter(category_code))
_filter['statusCode'] = (utils.query_filter('ACTIVE'))
packages = manager.client.call(
'Product_Package', 'getAllObjects',
filter=_filter.to_dict(),
mask='id,name,items[prices[categories],attributes]'
)
if len(packages) == 0:
raise ValueError('No packages were found for %s' % category_code)
if len(packages) > 1:
raise ValueError('More than one package was found for %s'
% category_code)
return packages[0]
def get_location_id(manager, location):
"""Returns location id
:param manager: The storage manager which calls this function.
:param location: Datacenter short name
:return: Returns location id
"""
loc_svc = manager.client['Location_Datacenter']
datacenters = loc_svc.getDatacenters(mask='mask[longName,id,name]')
for datacenter in datacenters:
if datacenter['name'] == location:
location = datacenter['id']
return location
raise ValueError('Invalid datacenter name specified.')
def find_price_by_category(package, price_category):
"""Find the price in the given package that has the specified category
:param package: The AsAService, Enterprise, or Performance product package
:param price_category: The price category code to search for
:return: Returns the price for the given category, or an error if not found
"""
for item in package['items']:
price_id = _find_price_id(item['prices'], price_category)
if price_id:
return price_id
raise ValueError("Could not find price with the category, %s" % price_category)
def find_ent_space_price(package, category, size, tier_level):
"""Find the space price for the given category, size, and tier
:param package: The Enterprise (Endurance) product package
:param category: The category of space (endurance, replication, snapshot)
:param size: The size for which a price is desired
:param tier_level: The endurance tier for which a price is desired
:return: Returns the matching price, or an error if not found
"""
if category == 'snapshot':
category_code = 'storage_snapshot_space'
elif category == 'replication':
category_code = 'performance_storage_replication'
else: # category == 'endurance'
category_code = 'performance_storage_space'
level = ENDURANCE_TIERS.get(tier_level)
for item in package['items']:
if int(item['capacity']) != size:
continue
price_id = _find_price_id(item['prices'], category_code, 'STORAGE_TIER_LEVEL', level)
if price_id:
return price_id
raise ValueError("Could not find price for %s storage space" % category)
def find_ent_endurance_tier_price(package, tier_level):
"""Find the price in the given package with the specified tier level
:param package: The Enterprise (Endurance) product package
:param tier_level: The endurance tier for which a price is desired
:return: Returns the price for the given tier, or an error if not found
"""
for item in package['items']:
for attribute in item.get('attributes', []):
if int(attribute['value']) == ENDURANCE_TIERS.get(tier_level):
break
else:
continue
price_id = _find_price_id(item['prices'], 'storage_tier_level')
if price_id:
return price_id
raise ValueError("Could not find price for endurance tier level")
def find_endurance_tier_iops_per_gb(volume):
"""Find the tier for the given endurance volume (IOPS per GB)
:param volume: The volume for which the tier level is desired
:return: Returns a float value indicating the IOPS per GB for the volume
"""
tier = volume['storageTierLevel']
iops_per_gb = 0.25
if tier == "LOW_INTENSITY_TIER":
iops_per_gb = 0.25
elif tier == "READHEAVY_TIER":
iops_per_gb = 2
elif tier == "WRITEHEAVY_TIER":
iops_per_gb = 4
elif tier == "10_IOPS_PER_GB":
iops_per_gb = 10
else:
raise ValueError("Could not find tier IOPS per GB for this volume")
return iops_per_gb
def find_perf_space_price(package, size):
"""Find the price in the given package with the specified size
:param package: The Performance product package
:param size: The storage space size for which a price is desired
:return: Returns the price for the given size, or an error if not found
"""
for item in package['items']:
if int(item['capacity']) != size:
continue
price_id = _find_price_id(item['prices'], 'performance_storage_space')
if price_id:
return price_id
raise ValueError("Could not find performance space price for this volume")
def find_perf_iops_price(package, size, iops):
"""Find the price in the given package with the specified size and iops
:param package: The Performance product package
:param size: The size of storage space for which an IOPS price is desired
:param iops: The number of IOPS for which a price is desired
:return: Returns the price for the size and IOPS, or an error if not found
"""
for item in package['items']:
if int(item['capacity']) != int(iops):
continue
price_id = _find_price_id(item['prices'], 'performance_storage_iops', 'STORAGE_SPACE', size)
if price_id:
return price_id
raise ValueError("Could not find price for iops for the given volume")
def find_saas_endurance_space_price(package, size, tier_level):
"""Find the SaaS endurance storage space price for the size and tier
:param package: The Storage As A Service product package
:param size: The volume size for which a price is desired
:param tier_level: The endurance tier for which a price is desired
:return: Returns the price for the size and tier, or an error if not found
"""
if tier_level != 0.25:
tier_level = int(tier_level)
key_name = 'STORAGE_SPACE_FOR_{0}_IOPS_PER_GB'.format(tier_level)
key_name = key_name.replace(".", "_")
for item in package['items']:
if key_name not in item['keyName']:
continue
if 'capacityMinimum' not in item or 'capacityMaximum' not in item:
continue
capacity_minimum = int(item['capacityMinimum'])
capacity_maximum = int(item['capacityMaximum'])
if size < capacity_minimum or size > capacity_maximum:
continue
price_id = _find_price_id(item['prices'], 'performance_storage_space')
if price_id:
return price_id
raise ValueError("Could not find price for endurance storage space")
def find_saas_endurance_tier_price(package, tier_level):
"""Find the SaaS storage tier level price for the specified tier level
:param package: The Storage As A Service product package
:param tier_level: The endurance tier for which a price is desired
:return: Returns the price for the given tier, or an error if not found
"""
target_capacity = ENDURANCE_TIERS.get(tier_level)
for item in package['items']:
if 'itemCategory' not in item\
or 'categoryCode' not in item['itemCategory']\
or item['itemCategory']['categoryCode']\
!= 'storage_tier_level':
continue
if int(item['capacity']) != target_capacity:
continue
price_id = _find_price_id(item['prices'], 'storage_tier_level')
if price_id:
return price_id
raise ValueError("Could not find price for endurance tier level")
def find_saas_perform_space_price(package, size):
"""Find the SaaS performance storage space price for the given size
:param package: The Storage As A Service product package
:param size: The volume size for which a price is desired
:return: Returns the price for the size and tier, or an error if not found
"""
for item in package['items']:
if 'itemCategory' not in item\
or 'categoryCode' not in item['itemCategory']\
or item['itemCategory']['categoryCode']\
!= 'performance_storage_space':
continue
if 'capacityMinimum' not in item or 'capacityMaximum' not in item:
continue
capacity_minimum = int(item['capacityMinimum'])
capacity_maximum = int(item['capacityMaximum'])
if size < capacity_minimum or size > capacity_maximum:
continue
key_name = '{0}_{1}_GBS'.format(capacity_minimum, capacity_maximum)
if item['keyName'] != key_name:
continue
price_id = _find_price_id(item['prices'], 'performance_storage_space')
if price_id:
return price_id
raise ValueError("Could not find price for performance storage space")
def find_saas_perform_iops_price(package, size, iops):
"""Find the SaaS IOPS price for the specified size and iops
:param package: The Storage As A Service product package
:param size: The volume size for which a price is desired
:param iops: The number of IOPS for which a price is desired
:return: Returns the price for the size and IOPS, or an error if not found
"""
for item in package['items']:
if 'itemCategory' not in item\
or 'categoryCode' not in item['itemCategory']\
or item['itemCategory']['categoryCode']\
!= 'performance_storage_iops':
continue
if 'capacityMinimum' not in item or 'capacityMaximum' not in item:
continue
capacity_minimum = int(item['capacityMinimum'])
capacity_maximum = int(item['capacityMaximum'])
if iops < capacity_minimum or iops > capacity_maximum:
continue
price_id = _find_price_id(item['prices'], 'performance_storage_iops', 'STORAGE_SPACE', size)
if price_id:
return price_id
raise ValueError("Could not find price for iops for the given volume")
def find_saas_snapshot_space_price(package, size, tier=None, iops=None):
"""Find the price in the SaaS package for the desired snapshot space size
:param package: The product package of the endurance storage type
:param size: The snapshot space size for which a price is desired
:param tier: The tier of the volume for which space is being ordered
:param iops: The IOPS of the volume for which space is being ordered
:return: Returns the price for the given size, or an error if not found
"""
if tier is not None:
target_value = ENDURANCE_TIERS.get(tier)
target_restriction_type = 'STORAGE_TIER_LEVEL'
else:
target_value = iops
target_restriction_type = 'IOPS'
for item in package['items']:
if int(item['capacity']) != size:
continue
price_id = _find_price_id(item['prices'], 'storage_snapshot_space', target_restriction_type, target_value)
if price_id:
return price_id
raise ValueError("Could not find price for snapshot space")
def find_saas_replication_price(package, tier=None, iops=None):
"""Find the price in the given package for the desired replicant volume
:param package: The product package of the endurance storage type
:param tier: The tier of the primary storage volume
:param iops: The IOPS of the primary storage volume
:return: Returns the replication price, or an error if not found
"""
if tier is not None:
target_value = ENDURANCE_TIERS.get(tier)
target_item_keyname = 'REPLICATION_FOR_TIERBASED_PERFORMANCE'
target_restriction_type = 'STORAGE_TIER_LEVEL'
else:
target_value = iops
target_item_keyname = 'REPLICATION_FOR_IOPSBASED_PERFORMANCE'
target_restriction_type = 'IOPS'
for item in package['items']:
if item['keyName'] != target_item_keyname:
continue
price_id = _find_price_id(
item['prices'],
'performance_storage_replication',
target_restriction_type,
target_value
)
if price_id:
return price_id
raise ValueError("Could not find price for replicant volume")
def find_snapshot_schedule_id(volume, snapshot_schedule_keyname):
"""Find the snapshot schedule ID for the given volume and keyname
:param volume: The volume for which the snapshot ID is desired
:param snapshot_schedule_keyname: The keyname of the snapshot schedule
:return: Returns an int value indicating the volume's snapshot schedule ID
"""
for schedule in volume['schedules']:
if 'type' in schedule and 'keyname' in schedule['type']:
if schedule['type']['keyname'] == snapshot_schedule_keyname:
return schedule['id']
raise ValueError("The given snapshot schedule ID was not found for "
"the given storage volume")
def prepare_snapshot_order_object(manager, volume, capacity, tier, upgrade):
"""Prepare the snapshot space order object for the placeOrder() method
:param manager: The File or Block manager calling this function
:param integer volume: The volume for which snapshot space is ordered
:param integer capacity: The snapshot space size to order, in GB
:param float tier: The tier level of the volume, in IOPS per GB (optional)
:param boolean upgrade: Flag to indicate if this order is an upgrade
:return: Returns the order object for the
Product_Order service's placeOrder() method
"""
# Ensure the storage volume has not been cancelled
if 'billingItem' not in volume:
raise exceptions.SoftLayerError(
'This volume has been cancelled; unable to order snapshot space')
# Determine and validate the storage volume's billing item category
billing_item_category_code = volume['billingItem']['categoryCode']
if billing_item_category_code == 'storage_as_a_service':
order_type_is_saas = True
elif billing_item_category_code == 'storage_service_enterprise':
order_type_is_saas = False
else:
raise exceptions.SoftLayerError(
"Snapshot space cannot be ordered for a primary volume with a "
"billing item category code of '%s'" % billing_item_category_code)
# Use the volume's billing item category code to get the product package
package = get_package(manager, billing_item_category_code)
# Find prices based on the volume's type and billing item category
if order_type_is_saas: # 'storage_as_a_service' package
volume_storage_type = volume['storageType']['keyName']
if 'ENDURANCE' in volume_storage_type:
if tier is None:
tier = find_endurance_tier_iops_per_gb(volume)
prices = [find_saas_snapshot_space_price(
package, capacity, tier=tier)]
elif 'PERFORMANCE' in volume_storage_type:
if not _staas_version_is_v2_or_above(volume):
raise exceptions.SoftLayerError(
"Snapshot space cannot be ordered for this performance "
"volume since it does not support Encryption at Rest.")
iops = int(volume['provisionedIops'])
prices = [find_saas_snapshot_space_price(
package, capacity, iops=iops)]
else:
raise exceptions.SoftLayerError(
"Storage volume does not have a valid storage type "
"(with an appropriate keyName to indicate the "
"volume is a PERFORMANCE or an ENDURANCE volume)")
else: # 'storage_service_enterprise' package
if tier is None:
tier = find_endurance_tier_iops_per_gb(volume)
prices = [find_ent_space_price(package, 'snapshot', capacity, tier)]
# Currently, these types are valid for snapshot space orders, whether
# the base volume's order container was Enterprise or AsAService
if upgrade:
complex_type = 'SoftLayer_Container_Product_Order_'\
'Network_Storage_Enterprise_SnapshotSpace_Upgrade'
else:
complex_type = 'SoftLayer_Container_Product_Order_'\
'Network_Storage_Enterprise_SnapshotSpace'
# Determine if hourly billing should be used
hourly_billing_flag = utils.lookup(volume, 'billingItem', 'hourlyFlag')
if hourly_billing_flag is None:
hourly_billing_flag = False
# Build and return the order object
snapshot_space_order = {
'complexType': complex_type,
'packageId': package['id'],
'prices': prices,
'quantity': 1,
'location': volume['billingItem']['location']['id'],
'volumeId': volume['id'],
'useHourlyPricing': hourly_billing_flag
}
return snapshot_space_order
def prepare_volume_order_object(manager, storage_type, location, size,
iops, tier, snapshot_size, service_offering,
volume_type, hourly_billing_flag=False):
"""Prepare the order object which is submitted to the placeOrder() method
:param manager: The File or Block manager calling this function
:param storage_type: "performance" or "endurance"
:param location: Requested datacenter location name for the ordered volume
:param size: Desired size of the volume, in GB
:param iops: Number of IOPs for a "Performance" volume order
:param tier: Tier level to use for an "Endurance" volume order
:param snapshot_size: The size of snapshot space for the volume (optional)
:param service_offering: Requested offering package to use for the order
:param volume_type: The type of the volume to order ('file' or 'block')
:param hourly_billing_flag: Billing type, monthly (False) or hourly (True)
:return: Returns the order object for the
Product_Order service's placeOrder() method
"""
# Ensure the volume storage type is valid
if storage_type != 'performance' and storage_type != 'endurance':
raise exceptions.SoftLayerError(
"Volume storage type must be either performance or endurance")
# Find the ID for the requested location
try:
location_id = get_location_id(manager, location)
except ValueError as ex:
message = "Invalid datacenter name specified. Please provide the lower case short name (e.g.: dal09)"
raise exceptions.SoftLayerError(message) from ex
# Determine the category code to use for the order (and product package)
order_type_is_saas, order_category_code = _get_order_type_and_category(
service_offering,
storage_type,
volume_type
)
# Get the product package for the given category code
package = get_package(manager, order_category_code)
# Based on the storage type and product package, build up the complex type
# and array of price codes to include in the order object
base_type_name = 'SoftLayer_Container_Product_Order_Network_'
if order_type_is_saas:
complex_type = base_type_name + 'Storage_AsAService'
if storage_type == 'performance':
prices = [
find_price_by_category(package, order_category_code),
find_price_by_category(package, 'storage_' + volume_type),
find_saas_perform_space_price(package, size),
find_saas_perform_iops_price(package, size, iops)
]
if snapshot_size is not None:
prices.append(find_saas_snapshot_space_price(
package, snapshot_size, iops=iops))
else: # storage_type == 'endurance'
prices = [
find_price_by_category(package, order_category_code),
find_price_by_category(package, 'storage_' + volume_type),
find_saas_endurance_space_price(package, size, tier),
find_saas_endurance_tier_price(package, tier)
]
if snapshot_size is not None:
prices.append(find_saas_snapshot_space_price(
package, snapshot_size, tier=tier))
else: # offering package is enterprise or performance
if storage_type == 'performance':
if volume_type == 'block':
complex_type = base_type_name + 'PerformanceStorage_Iscsi'
else:
complex_type = base_type_name + 'PerformanceStorage_Nfs'
prices = [
find_price_by_category(package, order_category_code),
find_perf_space_price(package, size),
find_perf_iops_price(package, size, iops),
]
else: # storage_type == 'endurance'
complex_type = base_type_name + 'Storage_Enterprise'
prices = [
find_price_by_category(package, order_category_code),
find_price_by_category(package, 'storage_' + volume_type),
find_ent_space_price(package, 'endurance', size, tier),
find_ent_endurance_tier_price(package, tier),
]
if snapshot_size is not None:
prices.append(find_ent_space_price(
package, 'snapshot', snapshot_size, tier))
# Build and return the order object
order = {
'complexType': complex_type,
'packageId': package['id'],
'prices': prices,
'quantity': 1,
'location': location_id,
'useHourlyPricing': hourly_billing_flag
}
if order_type_is_saas:
order['volumeSize'] = size
if storage_type == 'performance':
order['iops'] = iops
return order
def _get_order_type_and_category(service_offering, storage_type, volume_type):
if service_offering == 'storage_as_a_service':
order_type_is_saas = True
order_category_code = 'storage_as_a_service'
elif service_offering == 'enterprise':
order_type_is_saas = False
if storage_type == 'endurance':
order_category_code = 'storage_service_enterprise'
else:
raise exceptions.SoftLayerError(
"The requested offering package, '%s', is not available for "
"the '%s' storage type." % (service_offering, storage_type))
elif service_offering == 'performance':
order_type_is_saas = False
if storage_type == 'performance':
if volume_type == 'block':
order_category_code = 'performance_storage_iscsi'
else:
order_category_code = 'performance_storage_nfs'
else:
raise exceptions.SoftLayerError(
"The requested offering package, '%s', is not available for "
"the '%s' storage type." % (service_offering, storage_type))
else:
raise exceptions.SoftLayerError(
"The requested service offering package is not valid. "
"Please check the available options and try again.")
return order_type_is_saas, order_category_code
def prepare_replicant_order_object(manager, snapshot_schedule, location,
tier, volume, volume_type):
"""Prepare the order object which is submitted to the placeOrder() method
:param manager: The File or Block manager calling this function
:param snapshot_schedule: The primary volume's snapshot
schedule to use for replication
:param location: The location for the ordered replicant volume
:param tier: The tier (IOPS per GB) of the primary volume
:param volume: The primary volume as a SoftLayer_Network_Storage object
:param volume_type: The type of the primary volume ('file' or 'block')
:return: Returns the order object for the
Product_Order service's placeOrder() method
"""
# Ensure the primary volume and snapshot space are not set for cancellation
if 'billingItem' not in volume\
or volume['billingItem'].get('cancellationDate'):
raise exceptions.SoftLayerError(
'This volume is set for cancellation; '
'unable to order replicant volume')
for child in volume['billingItem']['activeChildren']:
if child['categoryCode'] == 'storage_snapshot_space'\
and child.get('cancellationDate'):
raise exceptions.SoftLayerError(
'The snapshot space for this volume is set for '
'cancellation; unable to order replicant volume')
# Find the ID for the requested location
try:
location_id = get_location_id(manager, location)
except ValueError as ex:
message = "Invalid datacenter name specified. Please provide the lower case short name (e.g.: dal09)"
raise exceptions.SoftLayerError(message) from ex
# Get sizes and properties needed for the order
volume_size = int(volume['capacityGb'])
billing_item_category_code = volume['billingItem']['categoryCode']
if billing_item_category_code == 'storage_as_a_service':
order_type_is_saas = True
elif billing_item_category_code == 'storage_service_enterprise':
order_type_is_saas = False
else:
raise exceptions.SoftLayerError(
"A replicant volume cannot be ordered for a primary volume with a "
"billing item category code of '%s'" % billing_item_category_code)
if 'snapshotCapacityGb' in volume:
snapshot_size = int(volume['snapshotCapacityGb'])
else:
raise exceptions.SoftLayerError(
"Snapshot capacity not found for the given primary volume")
snapshot_schedule_id = find_snapshot_schedule_id(
volume,
'SNAPSHOT_' + snapshot_schedule
)
# Use the volume's billing item category code to get the product package
package = get_package(manager, billing_item_category_code)
# Find prices based on the primary volume's type and billing item category
if order_type_is_saas: # 'storage_as_a_service' package
complex_type = 'SoftLayer_Container_Product_Order_'\
'Network_Storage_AsAService'
volume_storage_type = volume['storageType']['keyName']
if 'ENDURANCE' in volume_storage_type:
volume_is_performance = False
if tier is None:
tier = find_endurance_tier_iops_per_gb(volume)
prices = [
find_price_by_category(package, billing_item_category_code),
find_price_by_category(package, 'storage_' + volume_type),
find_saas_endurance_space_price(package, volume_size, tier),
find_saas_endurance_tier_price(package, tier),
find_saas_snapshot_space_price(
package, snapshot_size, tier=tier),
find_saas_replication_price(package, tier=tier)
]
elif 'PERFORMANCE' in volume_storage_type:
if not _staas_version_is_v2_or_above(volume):
raise exceptions.SoftLayerError(
"A replica volume cannot be ordered for this performance "
"volume since it does not support Encryption at Rest.")
volume_is_performance = True
iops = int(volume['provisionedIops'])
prices = [
find_price_by_category(package, billing_item_category_code),
find_price_by_category(package, 'storage_' + volume_type),
find_saas_perform_space_price(package, volume_size),
find_saas_perform_iops_price(package, volume_size, iops),
find_saas_snapshot_space_price(
package, snapshot_size, iops=iops),
find_saas_replication_price(package, iops=iops)
]
else:
raise exceptions.SoftLayerError(
"Storage volume does not have a valid storage type "
"(with an appropriate keyName to indicate the "
"volume is a PERFORMANCE or an ENDURANCE volume)")
else: # 'storage_service_enterprise' package
complex_type = 'SoftLayer_Container_Product_Order_'\
'Network_Storage_Enterprise'
volume_is_performance = False
if tier is None:
tier = find_endurance_tier_iops_per_gb(volume)
prices = [
find_price_by_category(package, billing_item_category_code),
find_price_by_category(package, 'storage_' + volume_type),
find_ent_space_price(package, 'endurance', volume_size, tier),
find_ent_endurance_tier_price(package, tier),
find_ent_space_price(package, 'snapshot', snapshot_size, tier),
find_ent_space_price(package, 'replication', volume_size, tier)
]
# Determine if hourly billing should be used
hourly_billing_flag = utils.lookup(volume, 'billingItem', 'hourlyFlag')
if hourly_billing_flag is None:
hourly_billing_flag = False
# Build and return the order object
replicant_order = {
'complexType': complex_type,
'packageId': package['id'],
'prices': prices,
'quantity': 1,
'location': location_id,
'originVolumeId': volume['id'],
'originVolumeScheduleId': snapshot_schedule_id,
'useHourlyPricing': hourly_billing_flag
}
if order_type_is_saas:
replicant_order['volumeSize'] = volume_size
if volume_is_performance:
replicant_order['iops'] = iops
return replicant_order
def prepare_duplicate_order_object(manager, origin_volume, iops, tier,
duplicate_size, duplicate_snapshot_size,
volume_type, hourly_billing_flag=False,
dependent_duplicate=False):
"""Prepare the duplicate order to submit to SoftLayer_Product::placeOrder()
:param manager: The File or Block manager calling this function
:param origin_volume: The origin volume which is being duplicated
:param iops: The IOPS for the duplicate volume (performance)
:param tier: The tier level for the duplicate volume (endurance)
:param duplicate_size: The requested size for the duplicate volume
:param duplicate_snapshot_size: The size for the duplicate snapshot space
:param volume_type: The type of the origin volume ('file' or 'block')
:param hourly_billing_flag: Billing type, monthly (False) or hourly (True)
:param dependent_duplicate: Duplicate type, normal (False) or dependent
duplicate (True)
:return: Returns the order object to be passed to the
placeOrder() method of the Product_Order service
"""
# Verify that the origin volume has not been cancelled
if 'billingItem' not in origin_volume:
raise exceptions.SoftLayerError(
"The origin volume has been cancelled; "
"unable to order duplicate volume")
# Verify that the origin volume has snapshot space (needed for duplication)
if isinstance(utils.lookup(origin_volume, 'snapshotCapacityGb'), str):
origin_snapshot_size = int(origin_volume['snapshotCapacityGb'])
else:
raise exceptions.SoftLayerError(
"Snapshot space not found for the origin volume. "
"Origin snapshot space is needed for duplication.")
# Obtain the datacenter location ID for the duplicate
if isinstance(utils.lookup(origin_volume, 'billingItem',
'location', 'id'), int):
location_id = origin_volume['billingItem']['location']['id']
else:
raise exceptions.SoftLayerError(
"Cannot find origin volume's location")
# Ensure the origin volume is STaaS v2 or higher
# and supports Encryption at Rest
if not _staas_version_is_v2_or_above(origin_volume):
raise exceptions.SoftLayerError(
"This volume cannot be duplicated since it "
"does not support Encryption at Rest.")
# If no specific snapshot space was requested for the duplicate,
# use the origin snapshot space size
if duplicate_snapshot_size is None:
duplicate_snapshot_size = origin_snapshot_size
# Use the origin volume size if no size was specified for the duplicate
if duplicate_size is None:
duplicate_size = origin_volume['capacityGb']
# Get the appropriate package for the order
# ('storage_as_a_service' is currently used for duplicate volumes)
package = get_package(manager, 'storage_as_a_service')
# Determine the IOPS or tier level for the duplicate volume, along with
# the type and prices for the order
origin_storage_type = origin_volume['storageType']['keyName']
if 'PERFORMANCE' in origin_storage_type:
volume_is_performance = True
if iops is None:
iops = int(origin_volume.get('provisionedIops', 0))
if iops <= 0:
raise exceptions.SoftLayerError("Cannot find origin volume's provisioned IOPS")
# Set up the price array for the order
prices = [
find_price_by_category(package, 'storage_as_a_service'),
find_price_by_category(package, 'storage_' + volume_type),
find_saas_perform_space_price(package, duplicate_size),
find_saas_perform_iops_price(package, duplicate_size, iops),
]
# Add the price code for snapshot space as well, unless 0 GB was given
if duplicate_snapshot_size > 0:
prices.append(find_saas_snapshot_space_price(
package, duplicate_snapshot_size, iops=iops))
elif 'ENDURANCE' in origin_storage_type:
volume_is_performance = False
if tier is None:
tier = find_endurance_tier_iops_per_gb(origin_volume)
# Set up the price array for the order
prices = [
find_price_by_category(package, 'storage_as_a_service'),
find_price_by_category(package, 'storage_' + volume_type),
find_saas_endurance_space_price(package, duplicate_size, tier),
find_saas_endurance_tier_price(package, tier),
]
# Add the price code for snapshot space as well, unless 0 GB was given
if duplicate_snapshot_size > 0:
prices.append(find_saas_snapshot_space_price(
package, duplicate_snapshot_size, tier=tier))
else:
raise exceptions.SoftLayerError(
"Origin volume does not have a valid storage type "
"(with an appropriate keyName to indicate the "
"volume is a PERFORMANCE or an ENDURANCE volume)")
duplicate_order = {
'complexType': 'SoftLayer_Container_Product_Order_'
'Network_Storage_AsAService',
'packageId': package['id'],
'prices': prices,
'volumeSize': duplicate_size,
'quantity': 1,
'location': location_id,
'duplicateOriginVolumeId': origin_volume['id'],
'useHourlyPricing': hourly_billing_flag
}
if volume_is_performance:
duplicate_order['iops'] = iops
if dependent_duplicate:
duplicate_order['isDependentDuplicateFlag'] = 1
return duplicate_order
def prepare_modify_order_object(manager, volume, new_iops, new_tier, new_size):
"""Prepare the modification order to submit to SoftLayer_Product::placeOrder()
:param manager: The File or Block manager calling this function
:param volume: The volume which is being modified
:param new_iops: The new IOPS for the volume (performance)
:param new_tier: The new tier level for the volume (endurance)
:param new_size: The requested new size for the volume
:return: Returns the order object to be passed to the placeOrder() method of the Product_Order service
"""
# Verify that the origin volume has not been cancelled
if 'billingItem' not in volume:
raise exceptions.SoftLayerError("The volume has been cancelled; unable to modify volume.")
# Ensure the origin volume is STaaS v2 or higher and supports Encryption at Rest
if not _staas_version_is_v2_or_above(volume):
raise exceptions.SoftLayerError("This volume cannot be modified since it does not support Encryption at Rest.")
# Get the appropriate package for the order ('storage_as_a_service' is currently used for modifying volumes)
package = get_package(manager, 'storage_as_a_service')
# Based on volume storage type, ensure at least one volume property is being modified,
# use current values if some are not specified, and lookup price codes for the order
volume_storage_type = volume['storageType']['keyName']
if 'PERFORMANCE' in volume_storage_type:
volume_is_performance = True
if new_size is None and new_iops is None:
raise exceptions.SoftLayerError("A size or IOPS value must be given to modify this performance volume.")
if new_size is None:
new_size = volume['capacityGb']
elif new_iops is None:
new_iops = int(volume.get('provisionedIops', 0))
if new_iops <= 0:
raise exceptions.SoftLayerError("Cannot find volume's provisioned IOPS.")
# Set up the prices array for the order
prices = [
find_price_by_category(package, 'storage_as_a_service'),
find_saas_perform_space_price(package, new_size),
find_saas_perform_iops_price(package, new_size, new_iops),
]
elif 'ENDURANCE' in volume_storage_type:
volume_is_performance = False
if new_size is None and new_tier is None:
raise exceptions.SoftLayerError("A size or tier value must be given to modify this endurance volume.")
if new_size is None:
new_size = volume['capacityGb']
elif new_tier is None:
new_tier = find_endurance_tier_iops_per_gb(volume)
# Set up the prices array for the order
prices = [
find_price_by_category(package, 'storage_as_a_service'),
find_saas_endurance_space_price(package, new_size, new_tier),
find_saas_endurance_tier_price(package, new_tier),
]
else:
raise exceptions.SoftLayerError("Volume does not have a valid storage type (with an appropriate "
"keyName to indicate the volume is a PERFORMANCE or an ENDURANCE volume).")
modify_order = {
'complexType': 'SoftLayer_Container_Product_Order_Network_Storage_AsAService_Upgrade',
'packageId': package['id'],
'prices': prices,
'volume': {'id': volume['id']},
'volumeSize': new_size
}
if volume_is_performance:
modify_order['iops'] = new_iops
return modify_order
def block_or_file(storage_type_keyname):
"""returns either 'block' or 'file'
:param storage_type_keyname: the Network_Storage['storageType']['keyName']
:returns: 'block' or 'file'
"""
return 'block' if 'BLOCK_STORAGE' in storage_type_keyname else 'file'
def _has_category(categories, category_code):
return any(
True
for category
in categories
if category['categoryCode'] == category_code
)
def _staas_version_is_v2_or_above(volume):
return int(volume['staasVersion']) > 1 and volume['hasEncryptionAtRest']
def _find_price_id(prices, category, restriction_type=None, restriction_value=None):
for price in prices:
# Only collect prices from valid location groups.
if price['locationGroupId']:
continue
if restriction_type is not None and restriction_value is not None:
if restriction_type != price['capacityRestrictionType']\
or restriction_value < int(price['capacityRestrictionMinimum'])\
or restriction_value > int(price['capacityRestrictionMaximum']):
continue
if not _has_category(price['categories'], category):
continue
return {'id': price['id']}
| mit | 4ae5ee2b8d22d8d357162d0372e0467c | 40.171845 | 119 | 0.637607 | 4.17803 | false | false | false | false |
softlayer/softlayer-python | SoftLayer/CLI/block/object_storage_permission.py | 2 | 1662 | """Display permission details for a cloud object storage."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
@click.command(cls=SoftLayer.CLI.command.SLCommand, )
@click.argument('object_id')
@environment.pass_env
def cli(env, object_id):
"""Display permission details for a cloud object storage."""
block_manager = SoftLayer.BlockStorageManager(env.client)
cloud = block_manager.get_network_message_delivery_accounts(object_id)
end_points = block_manager.get_end_points(object_id)
table = formatting.Table(['Name', 'Value'])
table_credentials = formatting.Table(['Id', 'Access Key ID', 'Secret Access Key', 'Description'])
for credential in cloud.get('credentials'):
table_credentials.add_row([credential.get('id'),
credential.get('username'),
credential.get('password'),
credential['type']['description']])
table_url = formatting.Table(['Region',
'Location',
'Type',
'URL'])
for end_point in end_points:
table_url.add_row([end_point.get('region') or '',
end_point.get('location') or '',
end_point.get('type'),
end_point.get('url'), ])
table.add_row(['UUID', cloud.get('uuid')])
table.add_row(['Credentials', table_credentials])
table.add_row(['EndPoint URL´s', table_url])
env.fout(table)
| mit | 385456db505a254c256c2a7ed7a4a8da | 36.75 | 101 | 0.576761 | 4.382586 | false | false | false | false |
softlayer/softlayer-python | SoftLayer/CLI/hardware/list.py | 2 | 3126 | """List hardware servers."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import columns as column_helper
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
from SoftLayer.CLI import helpers
# pylint: disable=unnecessary-lambda
COLUMNS = [
column_helper.Column('guid', ('globalIdentifier',)),
column_helper.Column('primary_ip', ('primaryIpAddress',)),
column_helper.Column('backend_ip', ('primaryBackendIpAddress',)),
column_helper.Column('datacenter', ('datacenter', 'name')),
column_helper.Column(
'action',
lambda server: formatting.active_txn(server),
mask='activeTransaction[id, transactionStatus[name, friendlyName]]'),
column_helper.Column(
'created_by',
('billingItem', 'orderItem', 'order', 'userRecord', 'username')),
column_helper.Column(
'tags',
lambda server: formatting.tags(server.get('tagReferences')),
mask="tagReferences.tag.name"),
]
DEFAULT_COLUMNS = [
'id',
'hostname',
'primary_ip',
'backend_ip',
'datacenter',
'action',
]
@click.command(cls=SoftLayer.CLI.command.SLCommand, )
@click.option('--cpu', '-c', help='Filter by number of CPU cores')
@click.option('--domain', '-D', help='Filter by domain')
@click.option('--datacenter', '-d', help='Filter by datacenter')
@click.option('--hostname', '-H', help='Filter by hostname')
@click.option('--memory', '-m', help='Filter by memory in gigabytes')
@click.option('--network', '-n', help='Filter by network port speed in Mbps')
@helpers.multi_option('--tag', help='Filter by tags')
@click.option('--sortby', help='Column to sort by', default='hostname', show_default=True)
@click.option('--columns',
callback=column_helper.get_formatter(COLUMNS),
help='Columns to display. [options: %s]' % ', '.join(column.name for column in COLUMNS),
default=','.join(DEFAULT_COLUMNS),
show_default=True)
@click.option('--limit', '-l',
help='How many results to get in one api call, default is 100',
default=100,
show_default=True)
@environment.pass_env
def cli(env, sortby, cpu, domain, datacenter, hostname, memory, network, tag, columns, limit):
"""List hardware servers."""
manager = SoftLayer.HardwareManager(env.client)
servers = manager.list_hardware(hostname=hostname,
domain=domain,
cpus=cpu,
memory=memory,
datacenter=datacenter,
nic_speed=network,
tags=tag,
mask="mask(SoftLayer_Hardware_Server)[%s]" % columns.mask(),
limit=limit)
table = formatting.Table(columns.columns)
table.sortby = sortby
for server in servers:
table.add_row([value or formatting.blank()
for value in columns.row(server)])
env.fout(table)
| mit | bb1b7ffa2540fd347db4ca198d385d39 | 36.662651 | 102 | 0.599488 | 4.190349 | false | false | false | false |
softlayer/softlayer-python | SoftLayer/CLI/report/dc_closures.py | 2 | 5519 | """Report on Resources in closing datacenters"""
import click
from SoftLayer.CLI.command import SLCommand as SLCommand
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
from SoftLayer import utils
@click.command(cls=SLCommand, short_help="""Report on Resources in closing datacenters""")
@environment.pass_env
def cli(env):
"""Report on Resources in closing datacenters
Displays a list of Datacenters soon to be shutdown, and any resources on the account
in those locations
"""
closing_filter = {
'capabilities': {
'operation': 'in',
'options': [{'name': 'data', 'value': ['CLOSURE_ANNOUNCED']}]
},
'name': {
'operation': 'orderBy',
'options': [{'name': 'sort', 'value': ['DESC']}]
}
}
mask = """mask[name, datacenterLongName, frontendRouterId, capabilities, datacenterId, backendRouterId,
backendRouterName, frontendRouterName]"""
closing_pods = env.client.call('SoftLayer_Network_Pod', 'getAllObjects', mask=mask, filter=closing_filter)
# Find all VLANs in the POD that is going to close.
search = "_objectType:SoftLayer_Network_Vlan primaryRouter.hostname: \"{}\" || primaryRouter.hostname: \"{}\""
resource_mask = """mask[
resource(SoftLayer_Network_Vlan)[
id,fullyQualifiedName,name,note,vlanNumber,networkSpace,
virtualGuests[id,fullyQualifiedDomainName,billingItem[cancellationDate]],
hardware[id,fullyQualifiedDomainName,billingItem[cancellationDate]],
networkVlanFirewall[id,primaryIpAddress,billingItem[cancellationDate]],
privateNetworkGateways[id,name,networkSpace],
publicNetworkGateways[id,name,networkSpace]
]
]
"""
table_title = "Resources in closing datacenters"
resource_table = formatting.Table(["Id", "Name", "Public VLAN", "Private VLAN", "Type", "Datacenter",
"POD", "Cancellation Date"], title=table_title)
resource_table.align = 'l'
for pod in closing_pods:
resources = {'hardware': {}, 'virtual': {}, 'firewall': {}, 'gateway': {}}
vlans = env.client.call('SoftLayer_Search', 'advancedSearch',
search.format(pod.get('backendRouterName'), pod.get('frontendRouterName')),
iter=True, mask=resource_mask)
# Go through the vlans and coalate the resources into a data structure that is easy to print out
for vlan in vlans:
resources = process_vlan(vlan.get('resource', {}), resources)
# Go through each resource and add it to the table
for resource_type, resource_values in resources.items():
for resource_id, resource_object in resource_values.items():
resource_table.add_row([
resource_id,
resource_object['name'],
resource_object['vlan'].get('PUBLIC', '-'),
resource_object['vlan'].get('PRIVATE', '-'),
resource_type,
pod.get('datacenterLongName'),
pod.get('backendRouterName'),
resource_object['cancelDate']
])
env.fout(resource_table)
# returns a Table Row for a given resource
def process_vlan(vlan, resources=None):
"""Takes in a vlan object and pulls out the needed resources"""
if resources is None:
resources = {'hardware': {}, 'virtual': {}, 'firewall': {}, 'gateway': {}}
type_x = "virtual"
for obj_x in vlan.get('virtualGuests', {}):
existing = resources[type_x].get(obj_x.get('id'))
resources[type_x][obj_x['id']] = build_resource_object('fullyQualifiedDomainName', vlan, obj_x, existing)
type_x = 'hardware'
for obj_x in vlan.get('hardware', {}):
existing = resources[type_x].get(obj_x.get('id'))
resources[type_x][obj_x['id']] = build_resource_object('fullyQualifiedDomainName', vlan, obj_x, existing)
type_x = 'firewall'
for obj_x in vlan.get('networkVlanFirewall', {}):
existing = resources[type_x].get(obj_x.get('id'))
resources[type_x][obj_x['id']] = build_resource_object('primaryIpAddress', vlan, obj_x, existing)
type_x = 'gateway'
for obj_x in vlan.get('privateNetworkGateways', {}):
existing = resources[type_x].get(obj_x.get('id'))
resources[type_x][obj_x['id']] = build_resource_object('name', vlan, obj_x, existing)
for obj_x in vlan.get('publicNetworkGateways', {}):
existing = resources[type_x].get(obj_x.get('id'))
resources[type_x][obj_x['id']] = build_resource_object('name', vlan, obj_x, existing)
return resources
def build_resource_object(name_property, vlan, resource, entry):
"""builds out a resource object and puts the required values in the right place.
:param: name_property is what property to use as the name from resource
:param: vlan is the vlan object
:param: resource has the data we want
:param: entry is for any existing data
"""
new_entry = {
'id': resource.get('id'),
'name': resource.get(name_property),
'vlan': {vlan.get('networkSpace'): vlan.get('vlanNumber')},
'cancelDate': utils.clean_time(utils.lookup(resource, 'billingItem', 'cancellationDate'))
}
if entry:
entry['vlan'][vlan.get('networkSpace')] = vlan.get('vlanNumber')
else:
entry = new_entry
return entry
| mit | de4a75213f29bf452d7a5857313444ae | 42.801587 | 115 | 0.621671 | 3.9906 | false | false | false | false |
softlayer/softlayer-python | SoftLayer/fixtures/SoftLayer_Network_Storage_Hub_Cleversafe_Account.py | 2 | 2658 | credentialCreate = {
"accountId": "12345",
"createDate": "2019-04-05T13:25:25-06:00",
"id": 11111,
"password": "nwUEUsx6PiEoN0B1Xe9z9hUCyXMkAFhDOjHqYJva",
"username": "XfHhBNBPlPdlWyaPPJAI",
"type": {
"description": "A credential for generating S3 Compatible Signatures.",
"keyName": "S3_COMPATIBLE_SIGNATURE",
"name": "S3 Compatible Signature"
}
}
getCredentials = [
{
"accountId": "12345",
"createDate": "2019-04-05T13:25:25-06:00",
"id": 11111,
"password": "nwUEUsx6PiEoN0B1Xe9z9hUCyXMkAFhDOjHqYJva",
"username": "XfHhBNBPlPdlWyaPPJAI",
"type": {
"description": "A credential for generating S3 Compatible Signatures.",
"keyName": "S3_COMPATIBLE_SIGNATURE",
"name": "S3 Compatible Signature"
}
},
{
"accountId": "12345",
"createDate": "2019-04-05T13:25:25-06:00",
"id": 11111,
"password": "nwUEUsx6PiEoN0B1Xe9z9hUCyXMkAFhDOjHqYJva",
"username": "XfHhBNBPlPdlWyaPPJAI",
"type": {
"description": "A credential for generating S3 Compatible Signatures.",
"keyName": "S3_COMPATIBLE_SIGNATURE",
"name": "S3 Compatible Signature"
}
}
]
getBuckets = [
{
"bytesUsed": 40540117,
"name": "normal-bucket",
"objectCount": 4,
"storageLocation": "us-standard"
}
]
getEndpoints = [
{
'legacy': False,
'region': 'us-geo',
'type': 'public',
'url': 's3.us.cloud-object-storage.appdomain.cloud'
},
{
'legacy': False,
'region': 'us-geo',
'type': 'private',
'url': 's3.private.us.cloud-object-storage.appdomain.cloud'
}
]
getCredentialLimit = 2
credentialDelete = True
getObject = {
'id': 123456,
'username': 'TEST307608-1',
'credentials': [
{
'id': 1933496,
'password': 'Um1Bp420FIFNvAg2QHjn5Sci2c2x4RNDXpVDDvnfsdsd1010',
'username': 'Kv9aNIhtNa7ZRceabecs',
'type': {
'description': 'A credential for generating S3 Compatible Signatures.',
'keyName': 'S3_COMPATIBLE_SIGNATURE'
}
},
{
'id': 1732820,
'password': 'q6NtwqeuXDaRqGc0Jrugg2sDgbatyNsoN9sPEmjo',
'username': '252r9BN8ibuDSQAXLOeL',
'type': {
'description': 'A credential for generating S3 Compatible Signatures.',
'keyName': 'S3_COMPATIBLE_SIGNATURE',
}
}
],
'uuid': '01c449c484ae4a58a42d9b79d4c5e4ed'
}
| mit | 01816d9c1008e9d56217cdda5047c5af | 27.580645 | 87 | 0.553047 | 3.010193 | false | false | false | false |
softlayer/softlayer-python | docs/conf.py | 3 | 8516 | # -*- coding: utf-8 -*-
#
# SoftLayer API Python Client documentation build configuration file, created
# by sphinx-quickstart on Fri Mar 22 11:08:48 2013.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx_click.ext']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'SoftLayer API Python Client'
# Hack to avoid the "Redefining built-in 'copyright'" error from static
# analysis tools
globals()['copyright'] = u'2019, SoftLayer Technologies, Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'latest'
# The full version, including alpha/beta/rc tags.
release = 'latest'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
html_theme = 'default'
else:
html_theme = 'nature'
html_style = "style.css"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'SoftLayerAPIPythonClientdoc'
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author,
# documentclass [howto/manual]).
latex_documents = [
('index',
'SoftLayerAPIPythonClient.tex',
u'SoftLayer API Python Client Documentation',
u'SoftLayer Technologies, Inc.',
'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index',
'softlayerapipythonclient',
u'SoftLayer API Python Client Documentation',
[u'SoftLayer Technologies, Inc.'],
1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index',
'SoftLayerAPIPythonClient',
u'SoftLayer API Python Client Documentation',
u'SoftLayer Technologies, Inc.',
'SoftLayerAPIPythonClient',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| mit | d6ef61616886820e97acc6376b8c35b7 | 30.895131 | 79 | 0.694692 | 3.822262 | false | true | false | false |
softlayer/softlayer-python | SoftLayer/CLI/loadbal/ns_list.py | 2 | 1458 | """List active Netscaler devices."""
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
from SoftLayer import utils
@click.command(cls=SoftLayer.CLI.command.SLCommand, )
@environment.pass_env
def cli(env):
"""List active Netscaler devices."""
mgr = SoftLayer.LoadBalancerManager(env.client)
netscalers = mgr.get_adcs()
if netscalers:
adc_table = generate_netscaler_table(netscalers)
env.fout(adc_table)
else:
env.fout("No Netscalers")
def location_sort(location):
"""Quick function that just returns the datacenter longName for sorting"""
return utils.lookup(location, 'datacenter', 'longName')
def generate_netscaler_table(netscalers):
"""Tales a list of SoftLayer_Network_Application_Delivery_Controller and makes a table"""
table = formatting.Table([
'Id', 'Location', 'Name', 'Description', 'IP Address', 'Management Ip', 'Bandwidth', 'Create Date'
], title="Netscalers")
for adc in sorted(netscalers, key=location_sort):
table.add_row([
adc.get('id'),
utils.lookup(adc, 'datacenter', 'longName'),
adc.get('name'),
adc.get('description'),
adc.get('primaryIpAddress'),
adc.get('managementIpAddress'),
adc.get('outboundPublicBandwidthUsage', 0),
utils.clean_time(adc.get('createDate'))
])
return table
| mit | c587773eec8d0cc165e73b66b0981ecf | 31.4 | 106 | 0.657064 | 3.919355 | false | false | false | false |
softlayer/softlayer-python | SoftLayer/CLI/file/cancel.py | 2 | 1406 | """Cancel an existing file storage volume."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import exceptions
from SoftLayer.CLI import formatting
@click.command(cls=SoftLayer.CLI.command.SLCommand, )
@click.argument('volume-id')
@click.option('--reason', help="An optional reason for cancellation")
@click.option('--immediate',
is_flag=True,
help="Cancels the file storage volume immediately instead "
"of on the billing anniversary")
@environment.pass_env
def cli(env, volume_id, reason, immediate):
"""Cancel an existing file storage volume."""
file_storage_manager = SoftLayer.FileStorageManager(env.client)
if not (env.skip_confirmations or formatting.no_going_back(volume_id)):
raise exceptions.CLIAbort('Aborted')
cancelled = file_storage_manager.cancel_file_volume(volume_id,
reason, immediate)
if cancelled:
if immediate:
click.echo('File volume with id %s has been marked'
' for immediate cancellation' % volume_id)
else:
click.echo('File volume with id %s has been marked'
' for cancellation' % volume_id)
else:
click.echo('Unable to cancle file volume %s' % volume_id)
| mit | e96c541b2f69f3e1e5459b409eff1518 | 35.051282 | 75 | 0.642959 | 4.23494 | false | false | false | false |
softlayer/softlayer-python | SoftLayer/CLI/file/access/authorize.py | 2 | 2239 | """Authorizes hosts on a specific file volume."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import exceptions
MULTIPLE = '(Multiple allowed)'
@click.command(cls=SoftLayer.CLI.command.SLCommand, )
@click.argument('volume_id')
@click.option('--hardware-id', '-h', multiple=True,
help='The id of one SoftLayer_Hardware to authorize ' + MULTIPLE)
@click.option('--virtual-id', '-v', multiple=True,
help='The id of one SoftLayer_Virtual_Guest to authorize ' + MULTIPLE)
@click.option('--ip-address-id', '-i', multiple=True,
help='The id of one SoftLayer_Network_Subnet_IpAddress'
' to authorize ' + MULTIPLE)
@click.option('--ip-address', multiple=True,
help='An IP address to authorize ' + MULTIPLE)
@click.option('--subnet-id', '-s', multiple=True,
help='The id of one SoftLayer_Network_Subnet to authorize ' + MULTIPLE)
@environment.pass_env
def cli(env, volume_id, hardware_id, virtual_id, ip_address_id,
ip_address, subnet_id):
"""Authorizes hosts to access a given volume"""
file_manager = SoftLayer.FileStorageManager(env.client)
ip_address_id_list = list(ip_address_id)
# Convert actual IP Addresses to their SoftLayer ids
if ip_address is not None:
network_manager = SoftLayer.NetworkManager(env.client)
for ip_address_value in ip_address:
ip_address_object = network_manager.ip_lookup(ip_address_value)
if ip_address_object == "":
click.echo("IP Address not found on your account. Please confirm IP and try again.")
raise exceptions.ArgumentError('Incorrect IP Address')
ip_address_id_list.append(ip_address_object['id'])
file_manager.authorize_host_to_volume(volume_id,
hardware_id,
virtual_id,
ip_address_id_list,
subnet_id)
# If no exception was raised, the command succeeded
click.echo('The specified hosts were authorized to access %s' % volume_id)
| mit | 799895fe6df627845fb149c3fa768d8d | 44.693878 | 100 | 0.624833 | 4.019749 | false | false | false | false |
softlayer/softlayer-python | SoftLayer/CLI/sshkey/add.py | 2 | 1321 | """Add a new SSH key."""
# :license: MIT, see LICENSE for more details.
from os import path
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import exceptions
@click.command(cls=SoftLayer.CLI.command.SLCommand, )
@click.argument('label')
@click.option('--in-file', '-f',
type=click.Path(exists=True),
help="The id_rsa.pub file to import for this key")
@click.option('--key', '-k', help="The actual SSH key")
@click.option('--note', help="Extra note that will be associated with key")
@environment.pass_env
def cli(env, label, in_file, key, note):
"""Add a new SSH key."""
if in_file is None and key is None:
raise exceptions.ArgumentError(
'Either [-f | --in-file] or [-k | --key] arguments are required to add a key'
)
if in_file and key:
raise exceptions.ArgumentError(
'[-f | --in-file] is not allowed with [-k | --key]'
)
if key:
key_text = key
else:
with open(path.expanduser(in_file), encoding="utf-8") as key_file:
key_text = key_file.read().strip()
key_file.close()
mgr = SoftLayer.SshKeyManager(env.client)
result = mgr.add_key(key_text, label, note)
env.fout("SSH key added: %s" % result.get('fingerprint'))
| mit | 8ba82592c63b983c7ad383155c440700 | 29.72093 | 89 | 0.619228 | 3.503979 | false | false | false | false |
softlayer/softlayer-python | SoftLayer/CLI/ticket/summary.py | 2 | 1234 | """Summary info about tickets."""
# :license: MIT, see LICENSE for more details.
import click
from SoftLayer.CLI.command import SLCommand as SLCommand
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
@click.command(cls=SLCommand)
@environment.pass_env
def cli(env):
"""Summary info about tickets."""
mask = ('openTicketCount, closedTicketCount, '
'openBillingTicketCount, openOtherTicketCount, '
'openSalesTicketCount, openSupportTicketCount, '
'openAccountingTicketCount')
account = env.client['Account'].getObject(mask=mask)
table = formatting.Table(['Status', 'count'])
nested = formatting.Table(['Type', 'count'])
nested.add_row(['Accounting',
account['openAccountingTicketCount']])
nested.add_row(['Billing', account['openBillingTicketCount']])
nested.add_row(['Sales', account['openSalesTicketCount']])
nested.add_row(['Support', account['openSupportTicketCount']])
nested.add_row(['Other', account['openOtherTicketCount']])
nested.add_row(['Total', account['openTicketCount']])
table.add_row(['Open', nested])
table.add_row(['Closed', account['closedTicketCount']])
env.fout(table)
| mit | 294b73e894fceb4faf8c976a6c15d035 | 35.294118 | 66 | 0.691248 | 3.955128 | false | false | false | false |
nugget/python-insteonplm | insteonplm/states/x10.py | 1 | 10877 | """X10 states."""
import logging
from insteonplm.messages.x10send import X10Send
from insteonplm.messages.x10received import X10Received
from insteonplm.states import State
from insteonplm.constants import (
X10_COMMAND_ALL_UNITS_OFF,
X10_COMMAND_ALL_LIGHTS_ON,
X10_COMMAND_ALL_LIGHTS_OFF,
X10_COMMAND_ON,
X10_COMMAND_OFF,
X10_COMMAND_DIM,
X10_COMMAND_BRIGHT,
)
_LOGGER = logging.getLogger(__name__)
class X10OnOffSwitch(State):
"""On / Off state for an X10 device."""
def __init__(
self,
address,
statename,
group,
send_message_method,
message_callbacks,
defaultvalue=None,
):
"""Init the X10OnOff state."""
super().__init__(
address,
statename,
group,
send_message_method,
message_callbacks,
defaultvalue,
)
self._register_messages()
def on(self):
"""Send the On command to an X10 device."""
msg = X10Send.unit_code_msg(
self.address.x10_housecode, self.address.x10_unitcode
)
self._send_method(msg)
msg = X10Send.command_msg(self.address.x10_housecode, X10_COMMAND_ON)
self._send_method(msg, False)
self._update_subscribers(0xFF)
def off(self):
"""Send the Off command to an X10 device."""
msg = X10Send.unit_code_msg(
self.address.x10_housecode, self.address.x10_unitcode
)
self._send_method(msg)
msg = X10Send.command_msg(self.address.x10_housecode, X10_COMMAND_OFF)
self._send_method(msg, False)
self._update_subscribers(0x00)
# pylint: disable=unused-argument
def _on_message_received(self, msg):
"""Receive an ON message."""
self._update_subscribers(0xFF)
# pylint: disable=unused-argument
def _off_message_received(self, msg):
"""Receive an OFF message."""
self._update_subscribers(0x00)
def _register_messages(self):
on_msg = X10Received.command_msg(self.address.x10_housecode, X10_COMMAND_ON)
off_msg = X10Received.command_msg(self.address.x10_housecode, X10_COMMAND_OFF)
all_on_msg = X10Received.command_msg(
self.address.x10_housecode, X10_COMMAND_ALL_LIGHTS_ON
)
all_off_msg = X10Received.command_msg(
self.address.x10_housecode, X10_COMMAND_ALL_LIGHTS_OFF
)
all_units_off_msg = X10Received.command_msg(
self.address.x10_housecode, X10_COMMAND_ALL_UNITS_OFF
)
self._message_callbacks.add(on_msg, self._on_message_received)
self._message_callbacks.add(off_msg, self._off_message_received)
self._message_callbacks.add(all_on_msg, self._on_message_received)
self._message_callbacks.add(all_off_msg, self._off_message_received)
self._message_callbacks.add(all_units_off_msg, self._off_message_received)
class X10DimmableSwitch(X10OnOffSwitch):
"""Dimmable X10 Switch."""
def __init__(
self,
address,
statename,
group,
send_message_method,
message_callbacks,
defaultvalue=0,
dim_steps=22,
):
"""Init the Dimmable state."""
super().__init__(
address,
statename,
group,
send_message_method,
message_callbacks,
defaultvalue,
)
self._steps = dim_steps
@property
def steps(self):
"""Return the number of steps from OFF to full ON."""
return self._steps
@steps.setter
def steps(self, val: int):
"""Set the number of steps from OFF to full ON."""
self._steps = val
def set_level(self, val):
"""Set the device ON LEVEL."""
if val == 0:
self.off()
elif val == 255:
self.on()
else:
setlevel = 255
if val < 1:
setlevel = val * 255
elif val <= 0xFF:
setlevel = val
change = setlevel - self._value
increment = 255 / self._steps
steps = round(abs(change) / increment)
print("Steps: ", steps)
if change > 0:
method = self.brighten
self._value += round(steps * increment)
self._value = min(255, self._value)
else:
method = self.dim
self._value -= round(steps * increment)
self._value = max(0, self._value)
# pylint: disable=unused-variable
for step in range(0, steps):
method(True)
self._update_subscribers(self._value)
def brighten(self, defer_update=False):
"""Brighten the device one step."""
msg = X10Send.unit_code_msg(
self.address.x10_housecode, self.address.x10_unitcode
)
self._send_method(msg)
msg = X10Send.command_msg(self.address.x10_housecode, X10_COMMAND_BRIGHT)
self._send_method(msg, False)
if not defer_update:
self._update_subscribers(self._value + 255 / self._steps)
def dim(self, defer_update=False):
"""Dim the device one step."""
msg = X10Send.unit_code_msg(
self.address.x10_housecode, self.address.x10_unitcode
)
self._send_method(msg)
msg = X10Send.command_msg(self.address.x10_housecode, X10_COMMAND_DIM)
self._send_method(msg, False)
if not defer_update:
self._update_subscribers(self._value - 255 / self._steps)
# pylint: disable=unused-argument
def _dim_message_received(self, msg):
val = max(self._value - (255 / self._steps), 0)
self._update_subscribers(val)
# pylint: disable=unused-argument
def _bright_message_received(self, msg):
val = min(self._value + (255 / self._steps), 255)
self._update_subscribers(val)
def _register_messages(self):
super()._register_messages()
dim_msg = X10Received.command_msg(self.address.x10_housecode, X10_COMMAND_DIM)
bri_msg = X10Received.command_msg(
self.address.x10_housecode, X10_COMMAND_BRIGHT
)
self._message_callbacks.add(dim_msg, self._dim_message_received)
self._message_callbacks.add(bri_msg, self._bright_message_received)
class X10OnOffSensor(State):
"""On / Off state for an X10 device."""
def __init__(
self,
address,
statename,
group,
send_message_method,
message_callbacks,
defaultvalue=None,
):
"""Init the X10OnOff state."""
super().__init__(
address,
statename,
group,
send_message_method,
message_callbacks,
defaultvalue,
)
self._register_messages()
# pylint: disable=unused-argument
def _on_message_received(self, msg):
"""Receive an ON message."""
self._update_subscribers(0xFF)
# pylint: disable=unused-argument
def _off_message_received(self, msg):
"""Receive an OFF message."""
self._update_subscribers(0x00)
def _register_messages(self):
on_msg = X10Received.command_msg(self.address.x10_housecode, X10_COMMAND_ON)
off_msg = X10Received.command_msg(self.address.x10_housecode, X10_COMMAND_OFF)
all_units_off_msg = X10Received.command_msg(
self.address.x10_housecode, X10_COMMAND_ALL_UNITS_OFF
)
self._message_callbacks.add(on_msg, self._on_message_received)
self._message_callbacks.add(off_msg, self._off_message_received)
self._message_callbacks.add(all_units_off_msg, self._off_message_received)
class X10AllUnitsOffSensor(State):
"""All Units Off state for an X10 device."""
def __init__(
self,
address,
statename,
group,
send_message_method,
message_callbacks,
defaultvalue=0xFF,
):
"""Init the X10AllUnitsOff state."""
super().__init__(
address,
statename,
group,
send_message_method,
message_callbacks,
defaultvalue,
)
self._register_messages()
def reset(self):
"""Reset the state to ON."""
self._update_subscribers(0xFF)
# pylint: disable=unused-argument
def _off_message_received(self, msg):
"""Receive an OFF message."""
self._update_subscribers(0x00)
def _register_messages(self):
all_units_off_msg = X10Received.command_msg(
self.address.x10_housecode, X10_COMMAND_ALL_UNITS_OFF
)
self._message_callbacks.add(all_units_off_msg, self._off_message_received)
class X10AllLightsOnSensor(State):
"""All Units Off state for an X10 device."""
def __init__(
self,
address,
statename,
group,
send_message_method,
message_callbacks,
defaultvalue=0x00,
):
"""Init the X10AllLightsOn state."""
super().__init__(
address,
statename,
group,
send_message_method,
message_callbacks,
defaultvalue,
)
self._register_messages()
def reset(self):
"""Reset the state to OFF."""
self._update_subscribers(0x00)
# pylint: disable=unused-argument
def _on_message_received(self, msg):
"""Receive an ON message."""
self._update_subscribers(0xFF)
def _register_messages(self):
all_on_msg = X10Received.command_msg(
self.address.x10_housecode, X10_COMMAND_ALL_LIGHTS_ON
)
self._message_callbacks.add(all_on_msg, self._on_message_received)
class X10AllLightsOffSensor(State):
"""All Lights Off state for an X10 device."""
def __init__(
self,
address,
statename,
group,
send_message_method,
message_callbacks,
defaultvalue=0xFF,
):
"""Init the X10AllLightsOff state."""
super().__init__(
address,
statename,
group,
send_message_method,
message_callbacks,
defaultvalue,
)
self._register_messages()
def reset(self):
"""Reset the state to ON."""
self._update_subscribers(0xFF)
# pylint: disable=unused-argument
def _off_message_received(self, msg):
"""Receive an OFF message."""
self._update_subscribers(0x00)
def _register_messages(self):
all_off_msg = X10Received.command_msg(
self.address.x10_housecode, X10_COMMAND_ALL_LIGHTS_OFF
)
self._message_callbacks.add(all_off_msg, self._off_message_received)
| mit | e796fa26eff7ada0407071a9328dd7a3 | 28.557065 | 86 | 0.575526 | 3.803147 | false | false | false | false |
nugget/python-insteonplm | insteonplm/devices/dimmableLightingControl.py | 1 | 5358 | """INSTEON Device Type Dimmable Lighting Control Module."""
from insteonplm.devices import Device
from insteonplm.states.dimmable import (
DimmableSwitch,
DimmableSwitch_Fan,
DimmableKeypadA,
)
from insteonplm.states.onOff import OnOffKeypad, OnOffKeypadLed
class DimmableLightingControl(Device):
"""Dimmable Lighting Controller.
INSTEON On/Off switch device class. Available device control options are:
- light_on(onlevel=0xff)
- light_on_fast(onlevel=0xff)
- light_off()
- light_off_fast()
To monitor changes to the state of the device subscribe to the state
monitor:
- lightOnLevel.connect(callback) (state='LightOnLevel')
where callback defined as:
- callback(self, device_id, state, state_value)
"""
def __init__(
self, plm, address, cat, subcat, product_key=None, description=None, model=None
):
"""Init the DimmableLightingControl Class."""
Device.__init__(
self, plm, address, cat, subcat, product_key, description, model
)
self._stateList[0x01] = DimmableSwitch(
self._address,
"lightOnLevel",
0x01,
self._send_msg,
self._message_callbacks,
0x00,
)
class DimmableLightingControl_2475F(DimmableLightingControl):
"""FanLinc model 2475F Dimmable Lighting Control.
Device Class 0x01 subcat 0x2e
Two separate INSTEON On/Off switch devices are created with ID
1) Ligth
- ID: xxxxxx (where xxxxxx is the Insteon address of the device)
- Controls:
- light_on(onlevel=0xff)
- light_on_fast(onlevel=0xff)
- light_off()
- light_off_fast()
- Monitor: lightOnLevel.connect(callback)
2) Fan
- ID: xxxxxx_2 (where xxxxxx is the Insteon address of the device)
- Controls:
- fan_on(onlevel=0xff)
- fan_off()
- light_on(onlevel=0xff) - Same as fan_on(onlevel=0xff)
- light_off() - Same as fan_off()
- Monitor: fanSpeed.connect(callback)
where callback defined as:
- callback(self, device_id, state, state_value)
"""
def __init__(
self, plm, address, cat, subcat, product_key=None, description=None, model=None
):
"""Init the DimmableLightingControl_2475F Class."""
super().__init__(plm, address, cat, subcat, product_key, description, model)
self._stateList[0x01] = DimmableSwitch(
self._address,
"lightOnLevel",
0x01,
self._send_msg,
self._message_callbacks,
0x00,
)
self._stateList[0x02] = DimmableSwitch_Fan(
self._address,
"fanOnLevel",
0x02,
self._send_msg,
self._message_callbacks,
0x00,
)
class DimmableLightingControl_2334_222(Device):
"""On/Off KeypadLinc Switched Lighting Control."""
def __init__(
self, plm, address, cat, subcat, product_key=None, description=None, model=None
):
"""Init the SwichedLightingControlKeypad device class."""
super().__init__(plm, address, cat, subcat, product_key, description, model)
self._leds = OnOffKeypadLed(
self._address,
"keypadLEDs",
0x00,
self._send_msg,
self._message_callbacks,
0x00,
self._plm.loop,
)
self._stateList[0x01] = DimmableKeypadA(
self._address,
"keypadButtonMain",
0x01,
self._send_msg,
self._message_callbacks,
0x00,
self._leds,
)
def _add_buttons(self, button_list):
for group in button_list:
self._stateList[group] = OnOffKeypad(
self._address,
"keypadButton{}".format(button_list[group]),
group,
self._send_msg,
self._message_callbacks,
0x00,
self._plm.loop,
self._leds,
)
self._leds.register_led_updates(self._stateList[group].led_changed, group)
class DimmableLightingControl_2334_222_8(DimmableLightingControl_2334_222):
"""Dimmable 8 Button KeypadLinc Switched Lighting Control."""
def __init__(
self, plm, address, cat, subcat, product_key=None, description=None, model=None
):
"""Init the SwitchedLightingControl_2487S device class."""
super().__init__(plm, address, cat, subcat, product_key, description, model)
button_list = {2: "B", 3: "C", 4: "D", 5: "E", 6: "F", 7: "G", 8: "H"}
self._add_buttons(button_list)
class DimmableLightingControl_2334_222_6(DimmableLightingControl_2334_222):
"""Dimmable 6 Button KeypadLinc Switched Lighting Control."""
def __init__(
self, plm, address, cat, subcat, product_key=None, description=None, model=None
):
"""Init the SwitchedLightingControl_2487S device class."""
super().__init__(plm, address, cat, subcat, product_key, description, model)
button_list = {3: "A", 4: "B", 5: "C", 6: "D"}
self._add_buttons(button_list)
| mit | 40a3b302d6ad9e9f6cd7a8ad2d9dec1b | 31.472727 | 87 | 0.571669 | 3.639946 | false | false | false | false |
nugget/python-insteonplm | insteonplm/messages/messageFlags.py | 1 | 9721 | """Message Flags class."""
import logging
import binascii
from insteonplm.constants import (
MESSAGE_FLAG_EXTENDED_0X10,
MESSAGE_TYPE_ALL_LINK_BROADCAST,
MESSAGE_TYPE_ALL_LINK_CLEANUP,
MESSAGE_TYPE_ALL_LINK_CLEANUP_ACK,
MESSAGE_TYPE_ALL_LINK_CLEANUP_NAK,
MESSAGE_TYPE_BROADCAST_MESSAGE,
MESSAGE_TYPE_DIRECT_MESSAGE_ACK,
MESSAGE_TYPE_DIRECT_MESSAGE_NAK,
)
_LOGGER = logging.getLogger(__name__)
class MessageFlags:
"""Message Flags class use in Standard and Extended messages."""
def __init__(self, flags=0x00):
"""Init the MessageFlags class."""
self._messageType = None
self._extended = None
self._hopsLeft = None
self._hopsMax = None
if flags is not None:
self._set_properties(flags)
def __repr__(self):
"""Representation of the message flags."""
return self.hex
def __str__(self):
"""Return a string representation of message flags."""
return self.hex
def __eq__(self, other):
"""Test for equality."""
if hasattr(other, "messageType"):
is_eq = self._messageType == other.messageType
is_eq = is_eq and self._extended == other.extended
return is_eq
return False
def __ne__(self, other):
"""Test for not equals."""
if hasattr(other, "messageType"):
return not self.__eq__(other)
return True
def matches_pattern(self, other):
"""Test if current message match a patterns or template."""
if hasattr(other, "messageType"):
messageTypeIsEqual = False
if self.messageType is None or other.messageType is None:
messageTypeIsEqual = True
else:
messageTypeIsEqual = self.messageType == other.messageType
extendedIsEqual = False
if self.extended is None or other.extended is None:
extendedIsEqual = True
else:
extendedIsEqual = self.extended == other.extended
return messageTypeIsEqual and extendedIsEqual
return False
@classmethod
def get_properties(cls):
"""Get all properties of the MessageFlags class."""
property_names = [p for p in dir(cls) if isinstance(getattr(cls, p), property)]
return property_names
@property
def isBroadcast(self):
"""Test if the message is a broadcast message type."""
return (
self._messageType & MESSAGE_TYPE_BROADCAST_MESSAGE
== MESSAGE_TYPE_BROADCAST_MESSAGE
)
@property
def isDirect(self):
"""Test if the message is a direct message type."""
direct = self._messageType == 0x00
if self.isDirectACK or self.isDirectNAK:
direct = True
return direct
@property
def isDirectACK(self):
"""Test if the message is a direct ACK message type."""
return self._messageType == MESSAGE_TYPE_DIRECT_MESSAGE_ACK
@property
def isDirectNAK(self):
"""Test if the message is a direct NAK message type."""
return self._messageType == MESSAGE_TYPE_DIRECT_MESSAGE_NAK
@property
def isAllLinkBroadcast(self):
"""Test if the message is an ALl-Link broadcast message type."""
return self._messageType == MESSAGE_TYPE_ALL_LINK_BROADCAST
@property
def isAllLinkCleanup(self):
"""Test if the message is a All-Link cleanup message type."""
return self._messageType == MESSAGE_TYPE_ALL_LINK_CLEANUP
@property
def isAllLinkCleanupACK(self):
"""Test if the message is a All-LInk cleanup ACK message type."""
return self._messageType == MESSAGE_TYPE_ALL_LINK_CLEANUP_ACK
@property
def isAllLinkCleanupNAK(self):
"""Test if the message is a All-Link cleanup NAK message type."""
return self._messageType == MESSAGE_TYPE_ALL_LINK_CLEANUP_NAK
@property
def isExtended(self):
"""Test if the message is an extended message type."""
return self._extended == 1
@property
def hopsLeft(self):
"""Return the number of hops left in message the trasport."""
return self._hopsLeft
@property
def hopsMax(self):
"""Return the maximum number of hops allowed for this message."""
return self._hopsMax
@hopsMax.setter
def hopsMax(self, val):
"""Set the maximum number of hops allowed for this message."""
self._hopsMax = val
@property
def messageType(self):
"""Return the message type."""
return self._messageType
@messageType.setter
def messageType(self, val):
"""Set the message type."""
if val in range(0, 8):
self._messageType = val
else:
raise ValueError
@property
def extended(self):
"""Return the extended flag."""
return self._extended
@extended.setter
def extended(self, val):
"""Set the extended flag."""
if val in [None, 0, 1]:
self._extended = val
else:
raise ValueError
# pylint: disable=protected-access
@classmethod
def create(cls, messageType, extended, hopsleft=3, hopsmax=3):
"""Create message flags.
messageType: integter 0 to 7:
MESSAGE_TYPE_DIRECT_MESSAGE = 0
MESSAGE_TYPE_DIRECT_MESSAGE_ACK = 1
MESSAGE_TYPE_ALL_LINK_CLEANUP = 2
MESSAGE_TYPE_ALL_LINK_CLEANUP_ACK = 3
MESSAGE_TYPE_BROADCAST_MESSAGE = 4
MESSAGE_TYPE_DIRECT_MESSAGE_NAK = 5
MESSAGE_TYPE_ALL_LINK_BROADCAST = 6
MESSAGE_TYPE_ALL_LINK_CLEANUP_NAK = 7
extended: 1 for extended, 0 for standard
hopsleft: int 0 - 3
hopsmax: int 0 - 3
"""
flags = MessageFlags(None)
if messageType < 8:
flags._messageType = messageType
else:
flags._messageType = messageType >> 5
if extended in [0, 1, True, False]:
if extended:
flags._extended = 1
else:
flags._extended = 0
else:
flags._extended = extended >> 4
flags._hopsLeft = hopsleft
flags._hopsMax = hopsmax
return flags
@classmethod
def template(cls, messageType=None, extended=None, hopsleft=None, hopsmax=None):
"""Create message flags template.
messageType: integter 0 to 7 or None:
MESSAGE_TYPE_DIRECT_MESSAGE = 0
MESSAGE_TYPE_DIRECT_MESSAGE_ACK = 1
MESSAGE_TYPE_ALL_LINK_CLEANUP = 2
MESSAGE_TYPE_ALL_LINK_CLEANUP_ACK = 3
MESSAGE_TYPE_BROADCAST_MESSAGE = 4
MESSAGE_TYPE_DIRECT_MESSAGE_NAK = 5
MESSAGE_TYPE_ALL_LINK_BROADCAST = 6
MESSAGE_TYPE_ALL_LINK_CLEANUP_NAK = 7
extended: 1 for extended, 0 for standard or None
hopsleft: int 0 - 3
hopsmax: int 0 - 3
"""
flags = MessageFlags(None)
if messageType is None:
flags._messageType = None
elif messageType < 8:
flags._messageType = messageType
else:
flags._messageType = messageType >> 5
if extended is None:
flags._extended = None
elif extended in [0, 1, True, False]:
if extended:
flags._extended = 1
else:
flags._extended = 0
else:
flags._extended = extended >> 4
flags._hopsLeft = hopsleft
flags._hopsMax = hopsmax
return flags
@property
def bytes(self):
"""Return a byte representation of the message flags."""
flagByte = 0x00
messageType = 0
if self._messageType is not None:
messageType = self._messageType << 5
extendedBit = 0 if self._extended is None else self._extended << 4
hopsMax = 0 if self._hopsMax is None else self._hopsMax
hopsLeft = 0 if self._hopsLeft is None else (self._hopsLeft << 2)
flagByte = flagByte | messageType | extendedBit | hopsLeft | hopsMax
return bytes([flagByte])
@property
def hex(self):
"""Return a hexadecimal representation of the message flags."""
return binascii.hexlify(self.bytes).decode()
# pylint: disable=no-self-use
def _normalize(self, flags):
"""Take any format of flags and turn it into a hex string."""
norm = None
if isinstance(flags, MessageFlags):
norm = flags.bytes
elif isinstance(flags, bytearray):
norm = binascii.hexlify(flags)
elif isinstance(flags, int):
norm = bytes([flags])
elif isinstance(flags, bytes):
norm = binascii.hexlify(flags)
elif isinstance(flags, str):
flags = flags[0:2]
norm = binascii.hexlify(binascii.unhexlify(flags.lower()))
elif flags is None:
norm = None
else:
_LOGGER.warning("MessageFlags with unknown type %s: %r", type(flags), flags)
return norm
def _set_properties(self, flags):
"""Set the properties of the message flags based on a byte input."""
flagByte = self._normalize(flags)
if flagByte is not None:
self._messageType = (flagByte[0] & 0xE0) >> 5
self._extended = (flagByte[0] & MESSAGE_FLAG_EXTENDED_0X10) >> 4
self._hopsLeft = (flagByte[0] & 0x0C) >> 2
self._hopsMax = flagByte[0] & 0x03
else:
self._messageType = None
self._extended = None
self._hopsLeft = None
self._hopsMax = None
| mit | d81a254da1ce497e2eb3c460e5a98a98 | 32.405498 | 88 | 0.587285 | 4.120814 | false | false | false | false |
nugget/python-insteonplm | insteonplm/states/cover.py | 1 | 7686 | """Window Coverings states."""
import logging
from insteonplm.constants import (
COMMAND_LIGHT_INSTANT_CHANGE_0X21_NONE,
COMMAND_LIGHT_MANUALLY_TURNED_OFF_0X22_0X00,
COMMAND_LIGHT_MANUALLY_TURNED_ON_0X23_0X00,
COMMAND_LIGHT_OFF_0X13_0X00,
COMMAND_LIGHT_OFF_FAST_0X14_0X00,
COMMAND_LIGHT_ON_0X11_NONE,
COMMAND_LIGHT_ON_FAST_0X12_NONE,
COMMAND_LIGHT_STATUS_REQUEST_0X19_0X00,
COMMAND_LIGHT_STOP_MANUAL_CHANGE_0X18_0X00,
MESSAGE_TYPE_ALL_LINK_BROADCAST,
)
from insteonplm.messages.standardSend import StandardSend
from insteonplm.messages.standardReceive import StandardReceive
from insteonplm.messages.messageFlags import MessageFlags
from insteonplm.states import State
_LOGGER = logging.getLogger(__name__)
class Cover(State):
"""Device state representing cover that is controllable.
Available methods are:
open()
close()
set_position()
async_refresh_state()
"""
def __init__(
self,
address,
statename,
group,
send_message_method,
message_callbacks,
defaultvalue=None,
):
"""Init the Cover Class."""
super().__init__(
address,
statename,
group,
send_message_method,
message_callbacks,
defaultvalue,
)
self._updatemethod = self._send_status_request
self._register_messages()
def _register_messages(self):
_LOGGER.debug("Registering callbacks for Cover device %s", self._address.human)
template_on_broadcast = StandardReceive.template(
commandtuple=COMMAND_LIGHT_ON_0X11_NONE,
address=self._address,
target=bytearray([0x00, 0x00, self._group]),
flags=MessageFlags.template(MESSAGE_TYPE_ALL_LINK_BROADCAST, None),
)
template_on_fast_broadcast = StandardReceive.template(
commandtuple=COMMAND_LIGHT_ON_FAST_0X12_NONE,
address=self._address,
target=bytearray([0x00, 0x00, self._group]),
flags=MessageFlags.template(MESSAGE_TYPE_ALL_LINK_BROADCAST, None),
)
template_off_broadcast = StandardReceive.template(
commandtuple=COMMAND_LIGHT_OFF_0X13_0X00,
address=self._address,
target=bytearray([0x00, 0x00, self._group]),
flags=MessageFlags.template(MESSAGE_TYPE_ALL_LINK_BROADCAST, None),
cmd2=None,
)
template_off_fast_broadcast = StandardReceive.template(
commandtuple=COMMAND_LIGHT_OFF_FAST_0X14_0X00,
address=self._address,
target=bytearray([0x00, 0x00, self._group]),
flags=MessageFlags.template(MESSAGE_TYPE_ALL_LINK_BROADCAST, None),
cmd2=None,
)
template_manual_broadcast = StandardReceive.template(
commandtuple=COMMAND_LIGHT_STOP_MANUAL_CHANGE_0X18_0X00,
address=self._address,
target=bytearray([0x00, 0x00, self._group]),
flags=MessageFlags.template(MESSAGE_TYPE_ALL_LINK_BROADCAST, None),
cmd2=None,
)
template_instant_broadcast = StandardReceive.template(
commandtuple=COMMAND_LIGHT_INSTANT_CHANGE_0X21_NONE,
address=self._address,
target=bytearray([0x00, 0x00, self._group]),
flags=MessageFlags.template(MESSAGE_TYPE_ALL_LINK_BROADCAST, None),
cmd2=None,
)
template_manual_off_broadcast = StandardReceive.template(
commandtuple=COMMAND_LIGHT_MANUALLY_TURNED_OFF_0X22_0X00,
address=self._address,
target=bytearray([0x00, 0x00, self._group]),
flags=MessageFlags.template(MESSAGE_TYPE_ALL_LINK_BROADCAST, None),
cmd2=None,
)
template_manual_on_broadcast = StandardReceive.template(
commandtuple=COMMAND_LIGHT_MANUALLY_TURNED_ON_0X23_0X00,
address=self._address,
target=bytearray([0x00, 0x00, self._group]),
flags=MessageFlags.template(MESSAGE_TYPE_ALL_LINK_BROADCAST, None),
cmd2=None,
)
self._message_callbacks.add(template_on_broadcast, self._open_message_received)
self._message_callbacks.add(
template_on_fast_broadcast, self._open_message_received
)
self._message_callbacks.add(
template_off_broadcast, self._closed_message_received
)
self._message_callbacks.add(
template_off_fast_broadcast, self._closed_message_received
)
self._message_callbacks.add(
template_manual_broadcast, self._manual_change_received
)
self._message_callbacks.add(
template_instant_broadcast, self._manual_change_received
)
self._message_callbacks.add(
template_manual_off_broadcast, self._manual_change_received
)
self._message_callbacks.add(
template_manual_on_broadcast, self._manual_change_received
)
def open(self):
"""Turn the device ON."""
open_command = StandardSend(
self._address, COMMAND_LIGHT_ON_0X11_NONE, cmd2=0xFF
)
self._send_method(open_command, self._open_message_received)
def open_fast(self):
"""Turn the device ON Fast."""
open_command = StandardSend(
self._address, COMMAND_LIGHT_ON_FAST_0X12_NONE, cmd2=0xFF
)
self._send_method(open_command, self._open_message_received)
def close(self):
"""Turn the device off."""
close_command = StandardSend(self._address, COMMAND_LIGHT_OFF_0X13_0X00)
self._send_method(close_command, self._closed_message_received)
def close_fast(self):
"""Turn the device off."""
close_command = StandardSend(self._address, COMMAND_LIGHT_OFF_FAST_0X14_0X00)
self._send_method(close_command, self._closed_message_received)
def set_position(self, val):
"""Set the devive OPEN LEVEL."""
if val == 0:
self.close()
else:
setlevel = 255
if val < 1:
setlevel = val * 100
elif val <= 0xFF:
setlevel = val
set_command = StandardSend(
self._address, COMMAND_LIGHT_ON_0X11_NONE, cmd2=setlevel
)
self._send_method(set_command, self._open_message_received)
def set_position_fast(self, val):
"""Set the devive OPEN LEVEL."""
if val == 0:
self.close_fast()
else:
setlevel = 255
if val < 1:
setlevel = val * 100
elif val <= 0xFF:
setlevel = val
set_command = StandardSend(
self._address, COMMAND_LIGHT_ON_FAST_0X12_NONE, cmd2=setlevel
)
self._send_method(set_command, self._open_message_received)
def _open_message_received(self, msg):
cmd2 = msg.cmd2 if msg.cmd2 else 255
self._update_subscribers(cmd2)
# pylint: disable=unused-argument
def _closed_message_received(self, msg):
self._update_subscribers(0x00)
# pylint: disable=unused-argument
def _manual_change_received(self, msg):
self._send_status_request()
def _send_status_request(self):
status_command = StandardSend(
self._address, COMMAND_LIGHT_STATUS_REQUEST_0X19_0X00
)
self._send_method(status_command, self._status_message_received)
def _status_message_received(self, msg):
_LOGGER.debug("Cover status message received called")
self._update_subscribers(msg.cmd2)
| mit | f5f882b1b558d1085f3fed73013fc282 | 35.6 | 87 | 0.611632 | 3.808722 | false | false | false | false |
nugget/python-insteonplm | insteonplm/messages/__init__.py | 1 | 7319 | """INSTEON Messages Module."""
import logging
import binascii
from insteonplm.constants import (
MESSAGE_ALL_LINK_CEANUP_FAILURE_REPORT_0X56,
MESSAGE_ALL_LINK_CLEANUP_STATUS_REPORT_0X58,
MESSAGE_ALL_LINK_RECORD_RESPONSE_0X57,
MESSAGE_ALL_LINKING_COMPLETED_0X53,
MESSAGE_BUTTON_EVENT_REPORT_0X54,
MESSAGE_CANCEL_ALL_LINKING_0X65,
MESSAGE_EXTENDED_MESSAGE_RECEIVED_0X51,
MESSAGE_GET_FIRST_ALL_LINK_RECORD_0X69,
MESSAGE_GET_IM_CONFIGURATION_0X73,
MESSAGE_GET_IM_INFO_0X60,
MESSAGE_GET_NEXT_ALL_LINK_RECORD_0X6A,
MESSAGE_RESET_IM_0X67,
MESSAGE_SEND_ALL_LINK_COMMAND_0X61,
MESSAGE_SEND_STANDARD_MESSAGE_0X62,
MESSAGE_STANDARD_MESSAGE_RECEIVED_0X50,
MESSAGE_START_ALL_LINKING_0X64,
MESSAGE_START_CODE_0X02,
MESSAGE_USER_RESET_DETECTED_0X55,
MESSAGE_X10_MESSAGE_RECEIVED_0X52,
MESSAGE_X10_MESSAGE_SEND_0X63,
MESSAGE_SET_IM_CONFIGURATION_0X6B,
MESSAGE_MANAGE_ALL_LINK_RECORD_0X6F,
)
from insteonplm.messages.standardReceive import StandardReceive
from insteonplm.messages.extendedReceive import ExtendedReceive
from insteonplm.messages.x10received import X10Received
from insteonplm.messages.allLinkComplete import AllLinkComplete
from insteonplm.messages.buttonEventReport import ButtonEventReport
from insteonplm.messages.userReset import UserReset
from insteonplm.messages.allLinkCleanupFailureReport import AllLinkCleanupFailureReport
from insteonplm.messages.allLinkRecordResponse import AllLinkRecordResponse
from insteonplm.messages.allLinkCleanupStatusReport import AllLinkCleanupStatusReport
from insteonplm.messages.getIMInfo import GetImInfo
from insteonplm.messages.sendAlllinkCommand import SendAllLinkCommand
from insteonplm.messages.standardSend import StandardSend
from insteonplm.messages.x10send import X10Send
from insteonplm.messages.startAllLinking import StartAllLinking
from insteonplm.messages.cancelAllLinking import CancelAllLinking
from insteonplm.messages.resetIM import ResetIM
from insteonplm.messages.setImConfiguration import SetIMConfiguration
from insteonplm.messages.getFirstAllLinkRecord import GetFirstAllLinkRecord
from insteonplm.messages.getNextAllLinkRecord import GetNextAllLinkRecord
from insteonplm.messages.getImConfiguration import GetImConfiguration
from insteonplm.messages.manageAllLinkRecord import ManageAllLinkRecord
_LOGGER = logging.getLogger(__name__)
def create(rawmessage):
"""Return an INSTEON message class based on a raw byte stream."""
rawmessage = _trim_buffer_garbage(rawmessage)
if len(rawmessage) < 2:
return (None, rawmessage)
code = rawmessage[1]
msgclass = _get_msg_class(code)
msg = None
remaining_data = rawmessage
if msgclass is None:
_LOGGER.debug("Did not find message class 0x%02x", rawmessage[1])
rawmessage = rawmessage[1:]
rawmessage = _trim_buffer_garbage(rawmessage, False)
if rawmessage:
_LOGGER.debug("Create: %s", create)
_LOGGER.debug("rawmessage: %s", binascii.hexlify(rawmessage))
msg, remaining_data = create(rawmessage)
else:
remaining_data = rawmessage
else:
if iscomplete(rawmessage):
msg = msgclass.from_raw_message(rawmessage)
if msg:
remaining_data = rawmessage[len(msg.bytes) :]
# _LOGGER.debug("Returning msg: %s", msg)
# _LOGGER.debug('Returning buffer: %s', binascii.hexlify(remaining_data))
return (msg, remaining_data)
def iscomplete(rawmessage):
"""Test if the raw message is a complete message."""
if len(rawmessage) < 2:
return False
if rawmessage[0] != 0x02:
raise ValueError("message does not start with 0x02")
messageBuffer = bytearray()
filler = bytearray(30)
messageBuffer.extend(rawmessage)
messageBuffer.extend(filler)
msg = _get_msg_class(rawmessage[1])
if hasattr(msg, "receivedSize") and msg.receivedSize:
expectedSize = msg.receivedSize
else:
_LOGGER.error("Unable to find a receivedSize for code 0x%x", rawmessage[1])
return ValueError
is_expected_size = False
if len(rawmessage) >= expectedSize:
is_expected_size = True
return is_expected_size
def _get_msg_class(code):
"""Get the message class based on the message code."""
msg_classes = {}
msg_classes = _add_msg_class(
msg_classes, MESSAGE_STANDARD_MESSAGE_RECEIVED_0X50, StandardReceive
)
msg_classes = _add_msg_class(
msg_classes, MESSAGE_EXTENDED_MESSAGE_RECEIVED_0X51, ExtendedReceive
)
msg_classes = _add_msg_class(
msg_classes, MESSAGE_X10_MESSAGE_RECEIVED_0X52, X10Received
)
msg_classes = _add_msg_class(
msg_classes, MESSAGE_ALL_LINKING_COMPLETED_0X53, AllLinkComplete
)
msg_classes = _add_msg_class(
msg_classes, MESSAGE_BUTTON_EVENT_REPORT_0X54, ButtonEventReport
)
msg_classes = _add_msg_class(
msg_classes, MESSAGE_USER_RESET_DETECTED_0X55, UserReset
)
msg_classes = _add_msg_class(
msg_classes,
MESSAGE_ALL_LINK_CEANUP_FAILURE_REPORT_0X56,
AllLinkCleanupFailureReport,
)
msg_classes = _add_msg_class(
msg_classes, MESSAGE_ALL_LINK_RECORD_RESPONSE_0X57, AllLinkRecordResponse
)
msg_classes = _add_msg_class(
msg_classes,
MESSAGE_ALL_LINK_CLEANUP_STATUS_REPORT_0X58,
AllLinkCleanupStatusReport,
)
msg_classes = _add_msg_class(msg_classes, MESSAGE_GET_IM_INFO_0X60, GetImInfo)
msg_classes = _add_msg_class(
msg_classes, MESSAGE_SEND_ALL_LINK_COMMAND_0X61, SendAllLinkCommand
)
msg_classes = _add_msg_class(
msg_classes, MESSAGE_SEND_STANDARD_MESSAGE_0X62, StandardSend
)
msg_classes = _add_msg_class(msg_classes, MESSAGE_X10_MESSAGE_SEND_0X63, X10Send)
msg_classes = _add_msg_class(
msg_classes, MESSAGE_START_ALL_LINKING_0X64, StartAllLinking
)
msg_classes = _add_msg_class(
msg_classes, MESSAGE_CANCEL_ALL_LINKING_0X65, CancelAllLinking
)
msg_classes = _add_msg_class(msg_classes, MESSAGE_RESET_IM_0X67, ResetIM)
msg_classes = _add_msg_class(
msg_classes, MESSAGE_GET_FIRST_ALL_LINK_RECORD_0X69, GetFirstAllLinkRecord
)
msg_classes = _add_msg_class(
msg_classes, MESSAGE_GET_NEXT_ALL_LINK_RECORD_0X6A, GetNextAllLinkRecord
)
msg_classes = _add_msg_class(
msg_classes, MESSAGE_MANAGE_ALL_LINK_RECORD_0X6F, ManageAllLinkRecord
)
msg_classes = _add_msg_class(
msg_classes, MESSAGE_SET_IM_CONFIGURATION_0X6B, SetIMConfiguration
)
msg_classes = _add_msg_class(
msg_classes, MESSAGE_GET_IM_CONFIGURATION_0X73, GetImConfiguration
)
return msg_classes.get(code, None)
def _add_msg_class(msg_list, code, msg_class):
msg_list[code] = msg_class
return msg_list
def _trim_buffer_garbage(rawmessage, debug=True):
"""Remove leading bytes from a byte stream.
A proper message byte stream begins with 0x02.
"""
while rawmessage and rawmessage[0] != MESSAGE_START_CODE_0X02:
if debug:
_LOGGER.debug("Buffer content: %s", binascii.hexlify(rawmessage))
_LOGGER.debug("Trimming leading buffer garbage")
rawmessage = rawmessage[1:]
return rawmessage
| mit | 40a675b60aafff5fd37e8e19e485b8e9 | 35.595 | 87 | 0.714169 | 3.239929 | false | false | false | false |
nugget/python-insteonplm | insteonplm/utils.py | 1 | 1774 | """Utility methods."""
from insteonplm.constants import (
HC_LOOKUP,
UC_LOOKUP,
X10_COMMAND_ALL_UNITS_OFF,
X10_COMMAND_ALL_LIGHTS_ON,
X10_COMMAND_ALL_LIGHTS_OFF,
X10CommandType,
)
def housecode_to_byte(housecode):
"""Return the byte value of an X10 housecode."""
return HC_LOOKUP.get(housecode.lower())
def unitcode_to_byte(unitcode):
"""Return the byte value of an X10 unitcode."""
return UC_LOOKUP.get(unitcode)
def byte_to_housecode(bytecode):
"""Return an X10 housecode value from a byte value."""
hc = list(HC_LOOKUP.keys())[list(HC_LOOKUP.values()).index(bytecode)]
return hc.upper()
def byte_to_unitcode(bytecode):
"""Return an X10 unitcode value from a byte value."""
return list(UC_LOOKUP.keys())[list(UC_LOOKUP.values()).index(bytecode)]
def x10_command_type(command):
"""Return the X10 command type from an X10 command."""
command_type = X10CommandType.DIRECT
if command in [
X10_COMMAND_ALL_UNITS_OFF,
X10_COMMAND_ALL_LIGHTS_ON,
X10_COMMAND_ALL_LIGHTS_OFF,
]:
command_type = X10CommandType.BROADCAST
return command_type
def rawX10_to_bytes(rawX10):
"""Return the byte value of a raw X10 command."""
yield rawX10 >> 4
yield rawX10 & 0x0F
def bit_is_set(bitmask, bit):
"""Return True if a specific bit is set in a bitmask.
Uses the low bit is 1 and the high bit is 8.
"""
bitshift = bit - 1
return bool(bitmask & (1 << bitshift))
def set_bit(bitmask, bit, is_on):
"""Set the value of a bit in a bitmask on or off.
Uses the low bit is 1 and the high bit is 8.
"""
bitshift = bit - 1
if is_on:
return bitmask | (1 << bitshift)
return bitmask & (0xFF & ~(1 << bitshift))
| mit | b5607124d122b5bab936176957bf56ea | 24.342857 | 75 | 0.644307 | 3.196396 | false | false | false | false |
nugget/python-insteonplm | insteonplm/messages/userdata.py | 1 | 5743 | """Extended Message User Data Type."""
import logging
import binascii
_LOGGER = logging.getLogger(__name__)
class Userdata:
"""Extended Message User Data Type."""
def __init__(self, userdata=None):
"""Init the Userdata Class."""
self._userdata = self.normalize(self.create_empty(0x00), userdata)
def __len__(self):
"""Init Userdata Class."""
return len(self._userdata)
def __str__(self):
"""Return string representation of user data."""
return self.human
def __iter__(self):
"""Iterate through the user data bytes."""
for itm in self._userdata:
yield itm
def __getitem__(self, key):
"""Return a single byte of the user data."""
return self._userdata.get(key)
def __setitem__(self, key, val):
"""Set a user data element."""
self._userdata[key] = val
def __eq__(self, other):
"""Test if the current user data equals another user data instance."""
isequal = False
if isinstance(other, Userdata):
for key in self._userdata:
if self._userdata[key] == other[key]:
isequal = True
else:
isequal = False
break
return isequal
def __ne__(self, other):
"""Test if the current user data is not equal to another instance."""
return bool(self != other)
@property
def human(self):
"""Emit the address in human-readible format (AA.BB.CC)."""
strout = ""
first = True
for i in range(0, 28, 2):
if first:
first = False
else:
strout = strout + "."
strout = strout + self.hex[i : i + 2]
return strout
@property
def hex(self):
"""Emit the address in bare hex format."""
return binascii.hexlify(self.bytes).decode()
@property
def bytes(self):
"""Emit the address in bytes format."""
byteout = bytearray()
for i in range(1, 15):
key = "d" + str(i)
if self._userdata[key] is not None:
byteout.append(self._userdata[key])
else:
byteout.append(0x00)
return byteout
@classmethod
def from_raw_message(cls, rawmessage):
"""Create a user data instance from a raw byte stream."""
empty = cls.create_empty(0x00)
userdata_dict = cls.normalize(empty, rawmessage)
return Userdata(userdata_dict)
@classmethod
def create_pattern(cls, userdata):
"""Create a user data instance with all values the same."""
empty = cls.create_empty(None)
userdata_dict = cls.normalize(empty, userdata)
return Userdata(userdata_dict)
@classmethod
def create(cls):
"""Create an empty user data instance."""
empty = cls.create_empty(0x00)
return Userdata(empty)
@classmethod
def template(cls, userdata):
"""Create a template instance used for message callbacks."""
ud = Userdata(cls.normalize(cls.create_empty(None), userdata))
return ud
def matches_pattern(self, other):
"""Test if the current instance matches a template instance."""
ismatch = False
if isinstance(other, Userdata):
for key in self._userdata:
if self._userdata[key] is None or other[key] is None:
ismatch = True
elif self._userdata[key] == other[key]:
ismatch = True
else:
ismatch = False
break
return ismatch
def get(self, key):
"""Return a single byte of the user data."""
return self[key]
def to_dict(self):
"""Return userdata as a dict object."""
return self._userdata
@classmethod
def _dict_to_dict(cls, empty, userdata):
if isinstance(userdata, dict):
for key in userdata:
if key in [
"d1",
"d2",
"d3",
"d4",
"d5",
"d6",
"d7",
"d8",
"d9",
"d10",
"d11",
"d12",
"d13",
"d14",
]:
empty[key] = userdata[key]
return empty
@classmethod
def _bytes_to_dict(cls, empty, userdata):
if len(userdata) == 14:
for i in range(1, 15):
key = "d{}".format(i)
empty[key] = userdata[i - 1]
else:
raise ValueError
return empty
@classmethod
def create_empty(cls, val=0x00):
"""Create an empty Userdata object.
val: value to fill the empty user data fields with (default is 0x00)
"""
userdata_dict = {}
for i in range(1, 15):
key = "d{}".format(i)
userdata_dict.update({key: val})
return userdata_dict
@classmethod
def normalize(cls, empty, userdata):
"""Return normalized user data as a dictionary.
empty: an empty dictionary
userdata: data in the form of Userdata, dict or None
"""
if isinstance(userdata, Userdata):
return userdata.to_dict()
if isinstance(userdata, dict):
return cls._dict_to_dict(empty, userdata)
if isinstance(userdata, (bytes, bytearray)):
return cls._bytes_to_dict(empty, userdata)
if userdata is None:
return empty
raise ValueError
| mit | d47b3df8409c2de2e39ae2bba736cf1c | 29.226316 | 78 | 0.517325 | 4.324548 | false | false | false | false |
nugget/python-insteonplm | insteonplm/devices/windowCoverings.py | 1 | 1135 | """INSTEON Device Type Window Coverings Control Module."""
from insteonplm.devices import Device
from insteonplm.states.cover import Cover
class WindowCovering(Device):
"""Window Covering Controller.
INSTEON Window Covering device class. Available device control options are:
- open()
- open_fast()
- set_position(openlevel=0xff)
- close()
- close_fast()
To monitor changes to the state of the device subscribe to the state
monitor:
- _states[0x01].connect(callback) (state='LightOnLevel')
where callback defined as:
- callback(self, device_id, state, state_value)
"""
def __init__(
self, plm, address, cat, subcat, product_key=None, description=None, model=None
):
"""Init the WindowCovering Class."""
Device.__init__(
self, plm, address, cat, subcat, product_key, description, model
)
self._stateList[0x01] = Cover(
self._address,
"coverOpenLevel",
0x01,
self._send_msg,
self._message_callbacks,
0x00,
)
| mit | 181544ee7ac4f89d4b8d0c229ec18145 | 28.102564 | 87 | 0.597357 | 3.954704 | false | false | false | false |
nugget/python-insteonplm | insteonplm/messages/manageAllLinkRecord.py | 1 | 3149 | """INSTEON Message Manage All-Link Record."""
from insteonplm.messages.message import Message
from insteonplm.constants import (
MESSAGE_MANAGE_ALL_LINK_RECORD_0X6F,
MESSAGE_MANAGE_ALL_LINK_RECORD_SIZE,
MESSAGE_MANAGE_ALL_LINK_RECORD_RECEIVED_SIZE,
MESSAGE_ACK,
MESSAGE_NAK,
)
from insteonplm.address import Address
# pylint: disable=too-many-instance-attributes
class ManageAllLinkRecord(Message):
"""Insteon Get First All Link Record Message.
Message type 0x69
"""
_code = MESSAGE_MANAGE_ALL_LINK_RECORD_0X6F
_sendSize = MESSAGE_MANAGE_ALL_LINK_RECORD_SIZE
_receivedSize = MESSAGE_MANAGE_ALL_LINK_RECORD_RECEIVED_SIZE
_description = "Insteon Manage All Link Record Message"
def __init__(
self,
control_code,
flags,
group,
address,
linkdata1,
linkdata2,
linkdata3,
acknak=None,
):
"""Init the ManageAllLinkRecord Class."""
self._controlCode = control_code
self._controlFlags = flags
self._group = group
self._address = Address(address)
self._linkdata1 = linkdata1
self._linkdata2 = linkdata2
self._linkdata3 = linkdata3
self._acknak = self._setacknak(acknak)
@classmethod
def from_raw_message(cls, rawmessage):
"""Create message from raw byte stream."""
return ManageAllLinkRecord(
rawmessage[2:3],
rawmessage[3:4],
rawmessage[4:7],
rawmessage[7:8],
rawmessage[8:9],
rawmessage[9:10],
rawmessage[10:11],
)
@property
def controlFlags(self):
"""Return the link record control flags."""
return self._controlFlags
@property
def group(self):
"""Return the link record group."""
return self._group
@property
def address(self):
"""Return the device address."""
return self._address
@property
def linkdata1(self):
"""Return the first link data field."""
return self._linkdata1
@property
def linkdata2(self):
"""Return the second link data field."""
return self._linkdata2
@property
def linkdata3(self):
"""Return the third link data field."""
return self._linkdata3
@property
def acknak(self):
"""Return the ACK/NAK byte."""
return self._acknak
@property
def isack(self):
"""Test if this is an ACK message."""
return self._acknak is not None and self._acknak == MESSAGE_ACK
@property
def isnak(self):
"""Test if this is a NAK message."""
return self._acknak is not None and self._acknak == MESSAGE_NAK
def _message_properties(self):
return [
{"controlCode": self._controlCode},
{"controlFlags": self._controlFlags},
{"group": self._group},
{"address": self._address},
{"linkdata1": self._linkdata1},
{"linkdata2": self._linkdata2},
{"linkdata3": self._linkdata3},
{"acknak": self._acknak},
]
| mit | db84f2dba8890458cc2ad5404008c97d | 26.622807 | 71 | 0.590664 | 3.826245 | false | false | false | false |
okfn/bibserver | bibserver/config.py | 4 | 1076 | import os
import json
'''read the config.json file and make available as a config dict'''
def load_config(path):
fileobj = open(path)
c = ""
for line in fileobj:
if line.strip().startswith("#"):
continue
else:
c += line
out = json.loads(c)
# add some critical defaults if necessary
if 'facet_field' not in out:
out['facet_field'] = ''
return out
here = os.path.dirname(__file__)
parent = os.path.dirname(here)
config_path = os.path.join(parent, 'config.json')
config = load_config(config_path)
if os.path.exists(os.path.join(parent, 'local_config.json')):
local_config = load_config(os.path.join(parent, 'local_config.json'))
config.update(local_config)
__all__ = ['config']
''' wrap a config dict in a class if required'''
class Config(object):
def __init__(self,confdict=config):
'''Create Configuration object from a configuration dictionary.'''
self.cfg = confdict
def __getattr__(self, attr):
return self.cfg.get(attr, None)
| mit | cc7e75ef88c2161b40fec569a6ac8d20 | 22.911111 | 74 | 0.6171 | 3.635135 | false | true | false | false |
okfn/bibserver | bibserver/ingest.py | 4 | 12956 | '''
Independent running process.
Handling uploads asynchronously.
See: https://github.com/okfn/bibserver/wiki/AsyncUploadDesign
'''
import os, stat, sys, uuid, time
import subprocess
from cStringIO import StringIO
import requests
import hashlib
import json
from datetime import datetime
import traceback
import bibserver.dao
from bibserver.config import config
from bibserver.importer import Importer
from bibserver.core import app
import bibserver.util as util
from flask import render_template, make_response, abort, send_from_directory, redirect, request
# Constant used to track installed plugins
PLUGINS = {}
class IngestTicketInvalidOwnerException(Exception):
pass
class IngestTicketInvalidInit(Exception):
pass
class IngestTicketInvalidId(Exception):
pass
class IngestTicket(dict):
def __init__(self,*args,**kwargs):
'Creates a new Ingest Ticket, ready for processing by the ingest pipeline'
if '_id' not in kwargs:
kwargs['_id'] = uuid.uuid4().hex
if 'state' not in kwargs:
kwargs['state'] = 'new'
if '_created' not in kwargs:
kwargs['_created'] = time.time()
owner = kwargs.get('owner')
if not type(owner) in (str, unicode):
raise IngestTicketInvalidOwnerException()
for x in ('collection', 'format'):
if not kwargs.get(x):
raise IngestTicketInvalidInit('You need to supply the parameter %s' % x)
for x in ('_created', '_last_modified'):
if x in kwargs:
kwargs[x] = datetime.fromtimestamp(kwargs[x])
dict.__init__(self,*args,**kwargs)
@classmethod
def load(cls, ticket_id):
filename = os.path.join(config['download_cache_directory'], ticket_id) + '.ticket'
if not os.path.exists(filename):
raise IngestTicketInvalidId(ticket_id)
data = json.loads(open(filename).read())
return cls(**data)
def save(self):
self['_last_modified'] = time.time()
self['_created'] = time.mktime(self['_created'].timetuple())
filename = os.path.join(config['download_cache_directory'], self['_id']) + '.ticket'
open(filename, 'wb').write(json.dumps(self))
for x in ('_created', '_last_modified'):
self[x] = datetime.fromtimestamp(self[x])
def fail(self, msg):
self['state'] = 'failed'
err = (datetime.now().strftime("%Y%m%d%H%M"), msg)
self.setdefault('exception', []).append(err)
self.save()
def delete(self):
filename = os.path.join(config['download_cache_directory'], self['_id']) + '.ticket'
os.remove(filename)
def __unicode__(self):
try:
return u'%s/%s,%s [%s] - %s' % (self['owner'], self['collection'], self['format'], self['state'], self['_last_modified'])
except:
return repr(self)
def __str__(self):
return unicode(self).encode('utf8')
@property
def id(self):
return self['_id']
def index(ticket):
ticket['state'] = 'populating_index'
ticket.save()
# Make sure the parsed content is in the cache
download_cache_directory = config['download_cache_directory']
in_path = os.path.join(download_cache_directory, ticket['data_json'])
if not os.path.exists(in_path):
ticket.fail('Parsed content for %s not found' % in_path)
return
data = open(in_path).read()
if len(data) < 1:
raise Exception('The parsed data in this ticket is empty.' )
# TODO check for metadata section to update collection from this?
owner = bibserver.dao.Account.get(ticket['owner'])
importer = Importer(owner=owner)
collection = {
'label': ticket['collection'],
'collection': util.slugify(ticket['collection']),
'description': ticket.get('description'),
'source': ticket['source_url'],
'format': ticket['format'],
'license': ticket.get('license', u"Not specified"),
}
collection, records = importer.upload(open(in_path), collection)
ticket['state'] = 'done'
ticket.save()
def parse(ticket):
ticket['state'] = 'parsing'
ticket.save()
if 'data_md5' not in ticket:
ticket.fail('Attempt to parse ticket, but no data_md5 found')
return
p = PLUGINS.get(ticket['format'])
if not p:
ticket.fail('Parser plugin for format %s not found' % ticket['format'])
return
# Make sure the downloaded content is in the cache
download_cache_directory = config['download_cache_directory']
in_path = os.path.join(download_cache_directory, ticket['data_md5'])
if not os.path.exists(in_path):
ticket.fail('Downloaded content for %s not found' % in_path)
return
p = subprocess.Popen(p['_path'], shell=True, stdin=open(in_path), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
data = p.stdout.read()
md5sum = hashlib.md5(data).hexdigest()
download_cache_directory = config['download_cache_directory']
out_path = os.path.join(download_cache_directory, md5sum)
open(out_path, 'wb').write(data)
ticket['data_json'] = md5sum
if ticket.get('only_parse') == True:
ticket['state'] = 'done'
else:
ticket['state'] = 'parsed'
# Check if there is any data in the stderr of the parser
# If so, add it to the ticket as potential feedback
data_stderr = p.stderr.read()
if len(data_stderr) > 0:
ticket['parse_feedback'] = data_stderr
ticket.save()
def store_data_in_cache(data):
md5sum = hashlib.md5(data).hexdigest()
download_cache_directory = config['download_cache_directory']
out_path = os.path.join(download_cache_directory, md5sum)
if not os.path.exists(out_path):
open(out_path, 'wb').write(data)
return md5sum
def download(ticket):
ticket['state'] = 'downloading'
ticket.save()
p = PLUGINS.get(ticket['format'])
if p and p.get('downloads'):
data = ticket['source_url'].strip()
content_type = 'text/plain'
else:
url = ticket['source_url'].strip()
r = requests.get(url)
content_type = r.headers['content-type']
r.raise_for_status()
data = r.content
if len(data) < 1:
ticket.fail('Data is empty, HTTP status code %s ' % r.status_code)
return
ticket['data_md5'] = store_data_in_cache(data)
ticket['data_content_type'] = content_type
ticket['state'] = 'downloaded'
ticket.save()
def determine_action(ticket):
'For the given ticket determine what the next action to take is based on the state'
try:
state = ticket['state']
print ticket['state'], ticket['_id'],
if state == 'new':
download(ticket)
if state == 'downloaded':
parse(ticket)
if state == 'parsed':
index(ticket)
except:
## TODO
# For some reason saving the traceback to the ticket here is not saving the exception
# The ticket does not record the 'failed' state, and remains in eg. a 'downloading' state
ticket.fail(traceback.format_exc())
print '...', ticket['state']
def get_tickets(state=None):
"Get tickets with the given state"
buf = []
for f in os.listdir(config['download_cache_directory']):
if f.endswith('.ticket'):
t = IngestTicket.load(f[:-7])
if not state or (state == t['state']):
buf.append(t)
return buf
def scan_parserscrapers(directory):
"Scan the specified directory for valid parser/scraper executables"
found = []
for root, dirs, files in os.walk(directory):
for name in files:
filename = os.path.join(root, name)
is_ex = stat.S_IXUSR & os.stat(filename)[stat.ST_MODE]
if is_ex:
# Try and call this executable with a -bibserver to get a config
try:
output = subprocess.check_output(filename+' -bibserver', shell=True)
output_json = json.loads(output)
if output_json['bibserver_plugin']:
output_json['_path'] = filename
found.append(output_json)
except subprocess.CalledProcessError:
sys.stderr.write(traceback.format_exc())
except ValueError:
sys.stderr.write('Error parsing plugin output:\n')
sys.stderr.write(output)
return found
def get_plugins():
filename = os.path.join(config.get('parserscrapers_plugin_directory'), 'plugins.json')
return json.loads(open(filename).read())
def init():
for d in ('download_cache_directory', 'parserscrapers_plugin_directory'):
dd = config.get(d)
if not os.path.exists(dd):
os.mkdir(dd)
# Scan for available parser/scraper plugins
parserscrapers_plugin_directory = config.get('parserscrapers_plugin_directory')
if not parserscrapers_plugin_directory:
sys.stderr.write('Error: parserscrapers_plugin_directory config entry not found\n')
plugins = scan_parserscrapers(parserscrapers_plugin_directory)
if plugins:
for ps in plugins:
PLUGINS[ps['format']] = ps
filename = os.path.join(config.get('parserscrapers_plugin_directory'), 'plugins.json')
open(filename, 'w').write(json.dumps(PLUGINS))
def run():
last_flash = time.time() - 500
count = 0
running = True
while running:
try:
pid = open('ingest.pid').read()
if str(pid) != str(os.getpid()):
print 'Other ingest process %s detected not %s, exiting' % (pid, os.getpid())
sys.exit(2)
except IOError:
print 'Ingest process exiting: ingest.pid file cound not be read'
sys.exit(3)
except:
traceback.print_exc()
sys.exit(4)
for state in ('new', 'downloaded', 'parsed'):
for t in get_tickets(state):
determine_action(t)
count += 1
time.sleep(15)
if time.time() - last_flash > (5 * 60):
sys.stdout.write('Ingest pipeline %s %s performed %s actions\n' % (os.getpid(), time.ctime(), count))
last_flash = time.time()
def reset_all_tickets():
for t in get_tickets():
print 'Resetting', t['_id']
t['state'] = 'new'
t.save()
@app.route('/ticket/')
@app.route('/ticket/<ticket_id>')
def view_ticket(ticket_id=None):
ingest_tickets = get_tickets()
sort_key = request.values.get('sort')
if sort_key:
ingest_tickets.sort(key=lambda x: x.get(sort_key))
if ticket_id:
try:
t = IngestTicket.load(ticket_id)
except bibserver.ingest.IngestTicketInvalidId:
abort(404)
else:
t = None
return render_template('tickets/view.html', ticket=t, ingest_tickets = ingest_tickets)
@app.route('/ticket/<ticket_id>/<payload>', methods=['GET', 'POST'])
def ticket_serve(ticket_id, payload):
t = IngestTicket.load(ticket_id)
if payload == 'data':
filename = t['data_md5']
elif payload == 'bibjson':
filename = t['data_json']
elif (payload == 'reset') and (request.method == 'POST'):
t['state'] = 'new'
for cleanfield in ('failed_index', 'parse_feedback'):
if cleanfield in t:
del t[cleanfield]
t.save()
return make_response('OK')
elif (payload == 'delete') and (request.method == 'POST'):
t.delete()
return make_response('OK')
return redirect('/data/'+filename)
@app.route('/data.txt')
def data_list():
'Output a list of all the raw data files, one file per line'
data_list = []
for t in get_tickets():
if 'data_json' in t:
data_list.append('/data/' + t['data_json'])
resp = make_response( '\n'.join(data_list) )
resp.mimetype = "text/plain"
return resp
@app.route('/data/<filename>')
def data_serve(filename):
path = config['download_cache_directory']
if not path.startswith('/'):
path = os.path.join(os.getcwd(), path)
response = send_from_directory(path, filename)
response.headers['Content-Type'] = 'text/plain'
return response
if __name__ == '__main__':
init()
for x in sys.argv[1:]:
if x == '-x':
reset_all_tickets()
elif x.startswith('-p'):
for t in get_tickets():
print t
if x == '-pp':
print '-' * 80
for k,v in t.items():
print ' '*4, k+':', v
elif x == '-d':
open('ingest.pid', 'w').write('%s' % os.getpid())
run()
if len(sys.argv) == 1:
run()
| mit | 9d85079a307fd20ce747cbd745907511 | 34.988889 | 133 | 0.589225 | 3.832002 | false | false | false | false |
okfn/bibserver | bibserver/util.py | 4 | 2105 | from urllib import urlopen, urlencode
import md5
import re
from unicodedata import normalize
from functools import wraps
from flask import request, current_app
def jsonp(f):
"""Wraps JSONified output for JSONP"""
@wraps(f)
def decorated_function(*args, **kwargs):
callback = request.args.get('callback', False)
if callback:
content = str(callback) + '(' + str(f(*args,**kwargs).data) + ')'
return current_app.response_class(content, mimetype='application/javascript')
else:
return f(*args, **kwargs)
return decorated_function
# derived from http://flask.pocoo.org/snippets/45/ (pd) and customised
def request_wants_json():
best = request.accept_mimetypes.best_match(['application/json', 'text/html'])
if best == 'application/json' and request.accept_mimetypes[best] > request.accept_mimetypes['text/html']:
best = True
else:
best = False
if request.values.get('format','').lower() == 'json' or request.path.endswith(".json"):
best = True
return best
# derived from http://flask.pocoo.org/snippets/5/ (public domain)
# changed delimiter to _ instead of - due to ES search problem on the -
_punct_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
def slugify(text, delim=u'_'):
"""Generates an slightly worse ASCII-only slug."""
result = []
for word in _punct_re.split(text.lower()):
word = normalize('NFKD', word).encode('ascii', 'ignore')
if word:
result.append(word)
return unicode(delim.join(result))
# get gravatar for email address
def get_gravatar(email, size=None, default=None, border=None):
email = email.lower().strip()
hash = md5.md5(email).hexdigest()
args = {'gravatar_id':hash}
if size and 1 <= int(size) <= 512:
args['size'] = size
if default: args['default'] = default
if border: args['border'] = border
url = 'http://www.gravatar.com/avatar.php?' + urlencode(args)
response = urlopen(url)
image = response.read()
response.close()
return image
| mit | 0d7500ae61a4018ef3da9352d940345d | 31.890625 | 109 | 0.629929 | 3.667247 | false | false | false | false |
siddhantgoel/streaming-form-data | tests/test_finder.py | 1 | 1032 | import pytest
from streaming_form_data._parser import Finder
def test_invalid_init():
for value in (None, 'abc', 123, 123.456, [123, 456], (123, 456)):
with pytest.raises(TypeError):
Finder(value)
with pytest.raises(ValueError):
Finder(b'')
def test_init():
finder = Finder(b'hello')
assert finder.target == b'hello'
assert finder.inactive()
assert not finder.found()
def test_single_byte():
finder = Finder(b'-')
assert finder.inactive()
finder.feed(45)
assert finder.found()
def test_normal():
finder = Finder(b'hello')
assert finder.inactive()
for byte in [104, 101, 108, 108]:
finder.feed(byte)
assert finder.active()
assert not finder.found()
finder.feed(111)
assert not finder.active()
assert finder.found()
def test_wrong_byte():
finder = Finder(b'hello')
assert finder.inactive()
finder.feed(104)
assert finder.active()
finder.feed(42)
assert finder.inactive()
| mit | fc648f710031b84216338bf2193095fa | 16.793103 | 69 | 0.620155 | 3.646643 | false | true | false | false |
kashifrazzaqui/vyked | tests/integration/test_registry.py | 2 | 2463 |
import requests
import multiprocessing
from vyked import Host, HTTPService, TCPService, TCPServiceClient, Registry
from vyked import request, get, Response, api
from vyked.registry import Repository
processes = []
class ServiceC(HTTPService):
def __init__(self, host, port):
super().__init__("ServiceC", 1, host, port)
@get(path="/{data}")
def get_echo(self, request):
return Response(status=200, body='blah'.encode())
class ServiceA(TCPService):
def __init__(self, host, port):
super().__init__("ServiceA", 1, host, port)
@api
def echo(self, data):
return data
class ServiceClientA(TCPServiceClient):
def __init__(self):
super().__init__("ServiceA", 1)
@request
def echo(self, data):
return locals()
class ServiceB(HTTPService):
def __init__(self, host, port, client_a):
self._client_a = client_a
super().__init__("ServiceB", 1, host, port)
@get(path="/{data}")
def get_echo(self, request):
data = request.match_info.get('data')
d = yield from self._client_a.echo(data)
return Response(status=200, body=d.encode())
def start_registry():
repository = Repository()
registry = Registry(None, 4500, repository)
registry.start()
def start_servicea():
service_a = ServiceA(host='0.0.0.0', port=4501)
Host.configure(registry_host='127.0.0.1', registry_port=4500,
pubsub_host='127.0.0.1', pubsub_port=6379, name='service_a')
Host.attach_tcp_service(service_a)
Host.run()
def start_serviceb():
client_a = ServiceClientA()
service_b = ServiceB(host='0.0.0.0', port=4503, client_a=client_a)
service_b.clients = [client_a]
Host.configure(registry_host='127.0.0.1', registry_port=4500,
pubsub_host='127.0.0.1', pubsub_port=6379, name='service_b')
Host.attach_http_service(service_b)
Host.run()
def setup_module():
global processes
for target in [start_registry, start_servicea, start_serviceb]:
p = multiprocessing.Process(target=target)
p.start()
processes.append(p)
# allow the subsystems to start up.
# sleep for awhile
import time
time.sleep(5)
def teardown_module():
for p in processes:
p.terminate()
def test_service_b():
url = 'http://127.0.0.1:4503/blah'
r = requests.get(url)
assert r.text == 'blah'
assert r.status_code == 200
| mit | 2ce62ce807892d7bea7cd41f10d18873 | 22.457143 | 79 | 0.623224 | 3.378601 | false | false | false | false |
fastavro/fastavro | fastavro/_read_py.py | 1 | 35221 | # cython: auto_cpdef=True
"""Python code for reading AVRO files"""
# This code is a modified version of the code at
# http://svn.apache.org/viewvc/avro/trunk/lang/py/src/avro/ which is under
# Apache 2.0 license (http://www.apache.org/licenses/LICENSE-2.0)
import bz2
import json
import lzma
import zlib
from datetime import datetime, timezone
from decimal import Context
from io import BytesIO
from struct import error as StructError
from typing import IO, Union, Optional, Generic, TypeVar, Iterator, Dict
from .io.binary_decoder import BinaryDecoder
from .io.json_decoder import AvroJSONDecoder
from .logical_readers import LOGICAL_READERS
from .schema import (
extract_record_type,
is_nullable_union,
extract_logical_type,
parse_schema,
)
from .types import Schema, AvroMessage, NamedSchemas
from ._read_common import (
SchemaResolutionError,
MAGIC,
SYNC_SIZE,
HEADER_SCHEMA,
missing_codec_lib,
)
from .const import NAMED_TYPES, AVRO_TYPES
T = TypeVar("T")
decimal_context = Context()
epoch = datetime(1970, 1, 1, tzinfo=timezone.utc)
epoch_naive = datetime(1970, 1, 1)
def match_types(writer_type, reader_type):
if isinstance(writer_type, list) or isinstance(reader_type, list):
return True
if isinstance(writer_type, dict) or isinstance(reader_type, dict):
try:
return match_schemas(writer_type, reader_type)
except SchemaResolutionError:
return False
if writer_type == reader_type:
return True
# promotion cases
elif writer_type == "int" and reader_type in ["long", "float", "double"]:
return True
elif writer_type == "long" and reader_type in ["float", "double"]:
return True
elif writer_type == "float" and reader_type == "double":
return True
elif writer_type == "string" and reader_type == "bytes":
return True
elif writer_type == "bytes" and reader_type == "string":
return True
return False
def match_schemas(w_schema, r_schema):
error_msg = f"Schema mismatch: {w_schema} is not {r_schema}"
if isinstance(w_schema, list):
# If the writer is a union, checks will happen in read_union after the
# correct schema is known
return r_schema
elif isinstance(r_schema, list):
# If the reader is a union, ensure one of the new schemas is the same
# as the writer
for schema in r_schema:
if match_types(w_schema, schema):
return schema
else:
raise SchemaResolutionError(error_msg)
else:
# Check for dicts as primitive types are just strings
if isinstance(w_schema, dict):
w_type = w_schema["type"]
else:
w_type = w_schema
if isinstance(r_schema, dict):
r_type = r_schema["type"]
else:
r_type = r_schema
if w_type == r_type == "map":
if match_types(w_schema["values"], r_schema["values"]):
return r_schema
elif w_type == r_type == "array":
if match_types(w_schema["items"], r_schema["items"]):
return r_schema
elif w_type in NAMED_TYPES and r_type in NAMED_TYPES:
if w_type == r_type == "fixed" and w_schema["size"] != r_schema["size"]:
raise SchemaResolutionError(
f"Schema mismatch: {w_schema} size is different than {r_schema} size"
)
w_unqual_name = w_schema["name"].split(".")[-1]
r_unqual_name = r_schema["name"].split(".")[-1]
if w_unqual_name == r_unqual_name or w_schema["name"] in r_schema.get(
"aliases", []
):
return r_schema
elif w_type not in AVRO_TYPES and r_type in NAMED_TYPES:
if match_types(w_type, r_schema["name"]):
return r_schema["name"]
elif match_types(w_type, r_type):
return r_schema
raise SchemaResolutionError(error_msg)
def read_null(
decoder,
writer_schema=None,
named_schemas=None,
reader_schema=None,
return_record_name=False,
return_record_name_override=False,
):
return decoder.read_null()
def skip_null(decoder, writer_schema=None, named_schemas=None):
decoder.read_null()
def read_boolean(
decoder,
writer_schema=None,
named_schemas=None,
reader_schema=None,
return_record_name=False,
return_record_name_override=False,
):
return decoder.read_boolean()
def skip_boolean(decoder, writer_schema=None, named_schemas=None):
decoder.read_boolean()
def read_int(
decoder,
writer_schema=None,
named_schemas=None,
reader_schema=None,
return_record_name=False,
return_record_name_override=False,
):
return decoder.read_int()
def skip_int(decoder, writer_schema=None, named_schemas=None):
decoder.read_int()
def read_long(
decoder,
writer_schema=None,
named_schemas=None,
reader_schema=None,
return_record_name=False,
return_record_name_override=False,
):
return decoder.read_long()
def skip_long(decoder, writer_schema=None, named_schemas=None):
decoder.read_long()
def read_float(
decoder,
writer_schema=None,
named_schemas=None,
reader_schema=None,
return_record_name=False,
return_record_name_override=False,
):
return decoder.read_float()
def skip_float(decoder, writer_schema=None, named_schemas=None):
decoder.read_float()
def read_double(
decoder,
writer_schema=None,
named_schemas=None,
reader_schema=None,
return_record_name=False,
return_record_name_override=False,
):
return decoder.read_double()
def skip_double(decoder, writer_schema=None, named_schemas=None):
decoder.read_double()
def read_bytes(
decoder,
writer_schema=None,
named_schemas=None,
reader_schema=None,
return_record_name=False,
return_record_name_override=False,
):
return decoder.read_bytes()
def skip_bytes(decoder, writer_schema=None, named_schemas=None):
decoder.read_bytes()
def read_utf8(
decoder,
writer_schema=None,
named_schemas=None,
reader_schema=None,
return_record_name=False,
return_record_name_override=False,
):
return decoder.read_utf8()
def skip_utf8(decoder, writer_schema=None, named_schemas=None):
decoder.read_utf8()
def read_fixed(
decoder,
writer_schema,
named_schemas=None,
reader_schema=None,
return_record_name=False,
return_record_name_override=False,
):
size = writer_schema["size"]
return decoder.read_fixed(size)
def skip_fixed(decoder, writer_schema, named_schemas=None):
size = writer_schema["size"]
decoder.read_fixed(size)
def read_enum(
decoder,
writer_schema,
named_schemas,
reader_schema=None,
return_record_name=False,
return_record_name_override=False,
):
symbol = writer_schema["symbols"][decoder.read_enum()]
if reader_schema and symbol not in reader_schema["symbols"]:
default = reader_schema.get("default")
if default:
return default
else:
symlist = reader_schema["symbols"]
msg = f"{symbol} not found in reader symbol list {symlist}"
raise SchemaResolutionError(msg)
return symbol
def skip_enum(decoder, writer_schema, named_schemas):
decoder.read_enum()
def read_array(
decoder,
writer_schema,
named_schemas,
reader_schema=None,
return_record_name=False,
return_record_name_override=False,
):
if reader_schema:
def item_reader(
decoder, w_schema, r_schema, return_record_name, return_record_name_override
):
return read_data(
decoder,
w_schema["items"],
named_schemas,
r_schema["items"],
return_record_name,
return_record_name_override,
)
else:
def item_reader(
decoder, w_schema, r_schema, return_record_name, return_record_name_override
):
return read_data(
decoder,
w_schema["items"],
named_schemas,
None,
return_record_name,
return_record_name_override,
)
read_items = []
decoder.read_array_start()
for item in decoder.iter_array():
read_items.append(
item_reader(
decoder,
writer_schema,
reader_schema,
return_record_name,
return_record_name_override,
)
)
decoder.read_array_end()
return read_items
def skip_array(decoder, writer_schema, named_schemas):
decoder.read_array_start()
for item in decoder.iter_array():
skip_data(decoder, writer_schema["items"], named_schemas)
decoder.read_array_end()
def read_map(
decoder,
writer_schema,
named_schemas,
reader_schema=None,
return_record_name=False,
return_record_name_override=False,
):
if reader_schema:
def item_reader(decoder, w_schema, r_schema):
return read_data(
decoder,
w_schema["values"],
named_schemas,
r_schema["values"],
return_record_name,
return_record_name_override,
)
else:
def item_reader(decoder, w_schema, r_schema):
return read_data(
decoder,
w_schema["values"],
named_schemas,
None,
return_record_name,
return_record_name_override,
)
read_items = {}
decoder.read_map_start()
for item in decoder.iter_map():
key = decoder.read_utf8()
read_items[key] = item_reader(decoder, writer_schema, reader_schema)
decoder.read_map_end()
return read_items
def skip_map(decoder, writer_schema, named_schemas):
decoder.read_map_start()
for item in decoder.iter_map():
decoder.read_utf8()
skip_data(decoder, writer_schema["values"], named_schemas)
decoder.read_map_end()
def read_union(
decoder,
writer_schema,
named_schemas,
reader_schema=None,
return_record_name=False,
return_record_name_override=False,
):
# schema resolution
index = decoder.read_index()
idx_schema = writer_schema[index]
if reader_schema:
# Handle case where the reader schema is just a single type (not union)
if not isinstance(reader_schema, list):
if match_types(idx_schema, reader_schema):
return read_data(
decoder,
idx_schema,
named_schemas,
reader_schema,
return_record_name,
return_record_name_override,
)
else:
for schema in reader_schema:
if match_types(idx_schema, schema):
return read_data(
decoder,
idx_schema,
named_schemas,
schema,
return_record_name,
return_record_name_override,
)
msg = f"schema mismatch: {writer_schema} not found in {reader_schema}"
raise SchemaResolutionError(msg)
else:
if return_record_name_override and is_nullable_union(writer_schema):
return read_data(
decoder,
idx_schema,
named_schemas,
None,
return_record_name,
return_record_name_override,
)
elif return_record_name and extract_record_type(idx_schema) == "record":
return (
idx_schema["name"],
read_data(
decoder,
idx_schema,
named_schemas,
None,
return_record_name,
return_record_name_override,
),
)
elif return_record_name and extract_record_type(idx_schema) not in AVRO_TYPES:
# idx_schema is a named type
return (
named_schemas["writer"][idx_schema]["name"],
read_data(
decoder,
idx_schema,
named_schemas,
None,
return_record_name,
return_record_name_override,
),
)
else:
return read_data(decoder, idx_schema, named_schemas)
def skip_union(decoder, writer_schema, named_schemas):
# schema resolution
index = decoder.read_index()
skip_data(decoder, writer_schema[index], named_schemas)
def read_record(
decoder,
writer_schema,
named_schemas,
reader_schema=None,
return_record_name=False,
return_record_name_override=False,
):
"""A record is encoded by encoding the values of its fields in the order
that they are declared. In other words, a record is encoded as just the
concatenation of the encodings of its fields. Field values are encoded per
their schema.
Schema Resolution:
* the ordering of fields may be different: fields are matched by name.
* schemas for fields with the same name in both records are resolved
recursively.
* if the writer's record contains a field with a name not present in the
reader's record, the writer's value for that field is ignored.
* if the reader's record schema has a field that contains a default value,
and writer's schema does not have a field with the same name, then the
reader should use the default value from its field.
* if the reader's record schema has a field with no default value, and
writer's schema does not have a field with the same name, then the
field's value is unset.
"""
record = {}
if reader_schema is None:
for field in writer_schema["fields"]:
record[field["name"]] = read_data(
decoder,
field["type"],
named_schemas,
None,
return_record_name,
return_record_name_override,
)
else:
readers_field_dict = {}
aliases_field_dict = {}
for f in reader_schema["fields"]:
readers_field_dict[f["name"]] = f
for alias in f.get("aliases", []):
aliases_field_dict[alias] = f
for field in writer_schema["fields"]:
readers_field = readers_field_dict.get(
field["name"],
aliases_field_dict.get(field["name"]),
)
if readers_field:
record[readers_field["name"]] = read_data(
decoder,
field["type"],
named_schemas,
readers_field["type"],
return_record_name,
return_record_name_override,
)
else:
skip_data(decoder, field["type"], named_schemas)
# fill in default values
if len(readers_field_dict) > len(record):
writer_fields = [f["name"] for f in writer_schema["fields"]]
for f_name, field in readers_field_dict.items():
if f_name not in writer_fields and f_name not in record:
if "default" in field:
record[field["name"]] = field["default"]
else:
msg = f'No default value for {field["name"]}'
raise SchemaResolutionError(msg)
return record
def skip_record(decoder, writer_schema, named_schemas):
for field in writer_schema["fields"]:
skip_data(decoder, field["type"], named_schemas)
READERS = {
"null": read_null,
"boolean": read_boolean,
"string": read_utf8,
"int": read_int,
"long": read_long,
"float": read_float,
"double": read_double,
"bytes": read_bytes,
"fixed": read_fixed,
"enum": read_enum,
"array": read_array,
"map": read_map,
"union": read_union,
"error_union": read_union,
"record": read_record,
"error": read_record,
"request": read_record,
}
SKIPS = {
"null": skip_null,
"boolean": skip_boolean,
"string": skip_utf8,
"int": skip_int,
"long": skip_long,
"float": skip_float,
"double": skip_double,
"bytes": skip_bytes,
"fixed": skip_fixed,
"enum": skip_enum,
"array": skip_array,
"map": skip_map,
"union": skip_union,
"error_union": skip_union,
"record": skip_record,
"error": skip_record,
"request": skip_record,
}
def maybe_promote(data, writer_type, reader_type):
if writer_type == "int":
# No need to promote to long since they are the same type in Python
if reader_type == "float" or reader_type == "double":
return float(data)
if writer_type == "long":
if reader_type == "float" or reader_type == "double":
return float(data)
if writer_type == "string" and reader_type == "bytes":
return data.encode()
if writer_type == "bytes" and reader_type == "string":
return data.decode()
return data
def read_data(
decoder,
writer_schema,
named_schemas,
reader_schema=None,
return_record_name=False,
return_record_name_override=False,
):
"""Read data from file object according to schema."""
record_type = extract_record_type(writer_schema)
if reader_schema:
reader_schema = match_schemas(writer_schema, reader_schema)
reader_fn = READERS.get(record_type)
if reader_fn:
try:
data = reader_fn(
decoder,
writer_schema,
named_schemas,
reader_schema,
return_record_name,
return_record_name_override,
)
except StructError:
raise EOFError(f"cannot read {record_type} from {decoder.fo}")
if "logicalType" in writer_schema:
logical_type = extract_logical_type(writer_schema)
fn = LOGICAL_READERS.get(logical_type)
if fn:
return fn(data, writer_schema, reader_schema)
if reader_schema is not None:
return maybe_promote(data, record_type, extract_record_type(reader_schema))
else:
return data
else:
return read_data(
decoder,
named_schemas["writer"][record_type],
named_schemas,
named_schemas["reader"].get(reader_schema),
return_record_name,
return_record_name_override,
)
def skip_data(decoder, writer_schema, named_schemas):
record_type = extract_record_type(writer_schema)
reader_fn = SKIPS.get(record_type)
if reader_fn:
reader_fn(decoder, writer_schema, named_schemas)
else:
skip_data(decoder, named_schemas["writer"][record_type], named_schemas)
def skip_sync(fo, sync_marker):
"""Skip an expected sync marker, complaining if it doesn't match"""
if fo.read(SYNC_SIZE) != sync_marker:
raise ValueError("expected sync marker not found")
def null_read_block(decoder):
"""Read block in "null" codec."""
return BytesIO(decoder.read_bytes())
def deflate_read_block(decoder):
"""Read block in "deflate" codec."""
data = decoder.read_bytes()
# -15 is the log of the window size; negative indicates "raw" (no
# zlib headers) decompression. See zlib.h.
return BytesIO(zlib.decompressobj(-15).decompress(data))
def bzip2_read_block(decoder):
"""Read block in "bzip2" codec."""
data = decoder.read_bytes()
return BytesIO(bz2.decompress(data))
def xz_read_block(decoder):
length = read_long(decoder)
data = decoder.read_fixed(length)
return BytesIO(lzma.decompress(data))
BLOCK_READERS = {
"null": null_read_block,
"deflate": deflate_read_block,
"bzip2": bzip2_read_block,
"xz": xz_read_block,
}
def snappy_read_block(decoder):
length = read_long(decoder)
data = decoder.read_fixed(length - 4)
decoder.read_fixed(4) # CRC
return BytesIO(snappy.decompress(data))
try:
import snappy
except ImportError:
BLOCK_READERS["snappy"] = missing_codec_lib("snappy", "python-snappy")
else:
BLOCK_READERS["snappy"] = snappy_read_block
def zstandard_read_block(decoder):
length = read_long(decoder)
data = decoder.read_fixed(length)
return BytesIO(zstd.ZstdDecompressor().decompressobj().decompress(data))
try:
import zstandard as zstd
except ImportError:
BLOCK_READERS["zstandard"] = missing_codec_lib("zstandard", "zstandard")
else:
BLOCK_READERS["zstandard"] = zstandard_read_block
def lz4_read_block(decoder):
length = read_long(decoder)
data = decoder.read_fixed(length)
return BytesIO(lz4.block.decompress(data))
try:
import lz4.block
except ImportError:
BLOCK_READERS["lz4"] = missing_codec_lib("lz4", "lz4")
else:
BLOCK_READERS["lz4"] = lz4_read_block
def _iter_avro_records(
decoder,
header,
codec,
writer_schema,
named_schemas,
reader_schema,
return_record_name=False,
return_record_name_override=False,
):
"""Return iterator over avro records."""
sync_marker = header["sync"]
read_block = BLOCK_READERS.get(codec)
if not read_block:
raise ValueError(f"Unrecognized codec: {codec}")
block_count = 0
while True:
try:
block_count = decoder.read_long()
except StopIteration:
return
block_fo = read_block(decoder)
for i in range(block_count):
yield read_data(
BinaryDecoder(block_fo),
writer_schema,
named_schemas,
reader_schema,
return_record_name,
return_record_name_override,
)
skip_sync(decoder.fo, sync_marker)
def _iter_avro_blocks(
decoder,
header,
codec,
writer_schema,
named_schemas,
reader_schema,
return_record_name=False,
return_record_name_override=False,
):
"""Return iterator over avro blocks."""
sync_marker = header["sync"]
read_block = BLOCK_READERS.get(codec)
if not read_block:
raise ValueError(f"Unrecognized codec: {codec}")
while True:
offset = decoder.fo.tell()
try:
num_block_records = decoder.read_long()
except StopIteration:
return
block_bytes = read_block(decoder)
skip_sync(decoder.fo, sync_marker)
size = decoder.fo.tell() - offset
yield Block(
block_bytes,
num_block_records,
codec,
reader_schema,
writer_schema,
named_schemas,
offset,
size,
return_record_name,
return_record_name_override,
)
class Block:
"""An avro block. Will yield records when iterated over
.. attribute:: num_records
Number of records in the block
.. attribute:: writer_schema
The schema used when writing
.. attribute:: reader_schema
The schema used when reading (if provided)
.. attribute:: offset
Offset of the block from the beginning of the avro file
.. attribute:: size
Size of the block in bytes
"""
def __init__(
self,
bytes_,
num_records,
codec,
reader_schema,
writer_schema,
named_schemas,
offset,
size,
return_record_name=False,
return_record_name_override=False,
):
self.bytes_ = bytes_
self.num_records = num_records
self.codec = codec
self.reader_schema = reader_schema
self.writer_schema = writer_schema
self._named_schemas = named_schemas
self.offset = offset
self.size = size
self.return_record_name = return_record_name
self.return_record_name_override = return_record_name_override
def __iter__(self):
for i in range(self.num_records):
yield read_data(
BinaryDecoder(self.bytes_),
self.writer_schema,
self._named_schemas,
self.reader_schema,
self.return_record_name,
self.return_record_name_override,
)
def __str__(self):
return (
f"Avro block: {len(self.bytes_)} bytes, "
+ f"{self.num_records} records, "
+ f"codec: {self.codec}, position {self.offset}+{self.size}"
)
class file_reader(Generic[T]):
def __init__(
self,
fo_or_decoder,
reader_schema=None,
return_record_name=False,
return_record_name_override=False,
):
if isinstance(fo_or_decoder, AvroJSONDecoder):
self.decoder = fo_or_decoder
else:
# If a decoder was not provided, assume binary
self.decoder = BinaryDecoder(fo_or_decoder)
self._named_schemas = {"writer": {}, "reader": {}}
if reader_schema:
self.reader_schema = parse_schema(
reader_schema, self._named_schemas["reader"], _write_hint=False
)
else:
self.reader_schema = None
self.return_record_name = return_record_name
self.return_record_name_override = return_record_name_override
self._elems = None
def _read_header(self):
try:
self._header = read_data(
self.decoder,
HEADER_SCHEMA,
self._named_schemas,
None,
self.return_record_name,
self.return_record_name_override,
)
except (StopIteration, EOFError):
raise ValueError("cannot read header - is it an avro file?")
# `meta` values are bytes. So, the actual decoding has to be external.
self.metadata = {k: v.decode() for k, v in self._header["meta"].items()}
self._schema = json.loads(self.metadata["avro.schema"])
self.codec = self.metadata.get("avro.codec", "null")
# Always parse the writer schema since it might have named types that
# need to be stored in self._named_types
self.writer_schema = parse_schema(
self._schema, self._named_schemas["writer"], _write_hint=False, _force=True
)
@property
def schema(self):
import warnings
warnings.warn(
"The 'schema' attribute is deprecated. Please use 'writer_schema'",
DeprecationWarning,
)
return self._schema
def __iter__(self) -> Iterator[T]:
if not self._elems:
raise NotImplementedError
return self._elems
def __next__(self) -> T:
return next(self._elems)
class reader(file_reader[AvroMessage]):
"""Iterator over records in an avro file.
Parameters
----------
fo
File-like object to read from
reader_schema
Reader schema
return_record_name
If true, when reading a union of records, the result will be a tuple
where the first value is the name of the record and the second value is
the record itself
return_record_name_override
If true, this will modify the behavior of return_record_name so that
the record name is only returned for unions where there is more than
one record. For unions that only have one record, this option will make
it so that the record is returned by itself, not a tuple with the name.
Example::
from fastavro import reader
with open('some-file.avro', 'rb') as fo:
avro_reader = reader(fo)
for record in avro_reader:
process_record(record)
The `fo` argument is a file-like object so another common example usage
would use an `io.BytesIO` object like so::
from io import BytesIO
from fastavro import writer, reader
fo = BytesIO()
writer(fo, schema, records)
fo.seek(0)
for record in reader(fo):
process_record(record)
.. attribute:: metadata
Key-value pairs in the header metadata
.. attribute:: codec
The codec used when writing
.. attribute:: writer_schema
The schema used when writing
.. attribute:: reader_schema
The schema used when reading (if provided)
"""
def __init__(
self,
fo: Union[IO, AvroJSONDecoder],
reader_schema: Optional[Schema] = None,
return_record_name: bool = False,
return_record_name_override: bool = False,
):
super().__init__(
fo, reader_schema, return_record_name, return_record_name_override
)
if isinstance(self.decoder, AvroJSONDecoder):
self.decoder.configure(self.reader_schema, self._named_schemas["reader"])
self.writer_schema = self.reader_schema
self.reader_schema = None
self._named_schemas["writer"] = self._named_schemas["reader"]
self._named_schemas["reader"] = {}
def _elems():
while not self.decoder.done:
yield read_data(
self.decoder,
self.writer_schema,
self._named_schemas,
self.reader_schema,
self.return_record_name,
self.return_record_name_override,
)
self.decoder.drain()
self._elems = _elems()
else:
self._read_header()
self._elems = _iter_avro_records(
self.decoder,
self._header,
self.codec,
self.writer_schema,
self._named_schemas,
self.reader_schema,
self.return_record_name,
self.return_record_name_override,
)
class block_reader(file_reader[Block]):
"""Iterator over :class:`.Block` in an avro file.
Parameters
----------
fo
Input stream
reader_schema
Reader schema
return_record_name
If true, when reading a union of records, the result will be a tuple
where the first value is the name of the record and the second value is
the record itself
return_record_name_override
If true, this will modify the behavior of return_record_name so that
the record name is only returned for unions where there is more than
one record. For unions that only have one record, this option will make
it so that the record is returned by itself, not a tuple with the name.
Example::
from fastavro import block_reader
with open('some-file.avro', 'rb') as fo:
avro_reader = block_reader(fo)
for block in avro_reader:
process_block(block)
.. attribute:: metadata
Key-value pairs in the header metadata
.. attribute:: codec
The codec used when writing
.. attribute:: writer_schema
The schema used when writing
.. attribute:: reader_schema
The schema used when reading (if provided)
"""
def __init__(
self,
fo: IO,
reader_schema: Optional[Schema] = None,
return_record_name: bool = False,
return_record_name_override: bool = False,
):
super().__init__(
fo, reader_schema, return_record_name, return_record_name_override
)
self._read_header()
self._elems = _iter_avro_blocks(
self.decoder,
self._header,
self.codec,
self.writer_schema,
self._named_schemas,
self.reader_schema,
self.return_record_name,
self.return_record_name_override,
)
def schemaless_reader(
fo: IO,
writer_schema: Schema,
reader_schema: Optional[Schema] = None,
return_record_name: bool = False,
return_record_name_override: bool = False,
) -> AvroMessage:
"""Reads a single record written using the
:meth:`~fastavro._write_py.schemaless_writer`
Parameters
----------
fo
Input stream
writer_schema
Schema used when calling schemaless_writer
reader_schema
If the schema has changed since being written then the new schema can
be given to allow for schema migration
return_record_name
If true, when reading a union of records, the result will be a tuple
where the first value is the name of the record and the second value is
the record itself
return_record_name_override
If true, this will modify the behavior of return_record_name so that
the record name is only returned for unions where there is more than
one record. For unions that only have one record, this option will make
it so that the record is returned by itself, not a tuple with the name.
Example::
parsed_schema = fastavro.parse_schema(schema)
with open('file', 'rb') as fp:
record = fastavro.schemaless_reader(fp, parsed_schema)
Note: The ``schemaless_reader`` can only read a single record.
"""
if writer_schema == reader_schema:
# No need for the reader schema if they are the same
reader_schema = None
named_schemas: Dict[str, NamedSchemas] = {"writer": {}, "reader": {}}
writer_schema = parse_schema(writer_schema, named_schemas["writer"])
if reader_schema:
reader_schema = parse_schema(reader_schema, named_schemas["reader"])
decoder = BinaryDecoder(fo)
return read_data(
decoder,
writer_schema,
named_schemas,
reader_schema,
return_record_name,
return_record_name_override,
)
def is_avro(path_or_buffer: Union[str, IO]) -> bool:
"""Return True if path (or buffer) points to an Avro file. This will only
work for avro files that contain the normal avro schema header like those
create from :func:`~fastavro._write_py.writer`. This function is not intended
to be used with binary data created from
:func:`~fastavro._write_py.schemaless_writer` since that does not include the
avro header.
Parameters
----------
path_or_buffer
Path to file
"""
fp: IO
if isinstance(path_or_buffer, str):
fp = open(path_or_buffer, "rb")
close = True
else:
fp = path_or_buffer
close = False
try:
header = fp.read(len(MAGIC))
return header == MAGIC
finally:
if close:
fp.close()
| mit | 751aa81b4253fd583cede26932c849c2 | 27.335479 | 89 | 0.582948 | 4.074618 | false | false | false | false |
bugsnag/bugsnag-python | example/flask/server.py | 1 | 6176 | # https://docs.bugsnag.com/platforms/python/flask
#
# this example app demonstrates some of the basic syntax to get Bugsnag error
# reporting configured in your Python Flask code. The key steps are:
#
# 1. call `bugsnag.configure(api_key={your api key})`
# 2. attach the bugsnag.flask `handle_exceptions` wrapper to your app
# ***********************************************************
from flask import Flask, request, render_template
import bugsnag
# Import platform specific Bugsnag features
from bugsnag.flask import handle_exceptions
app = Flask(__name__)
# Initialize Bugsnag to begin tracking errors. Only an api key is required, but
# here are some other helpful configuration details:
bugsnag.configure(
# get your own api key at bugsnag.com
api_key='YOUR_API_KEY_HERE',
# if you track deploys or session rates, make sure to set the correct
# version.
app_version='1.2.3',
# By default, requests are sent asynchronously. If you would like to block
# until the request is done, you can set to false
asynchronous=True,
# Defaults to True, this allows you to log each session which will be used
# to calculate crash rates in your dashboard for each release.
auto_capture_sessions=True,
# Sets which exception classes should never be sent to Bugsnag.
ignore_classes=['Http404', 'DontCare'],
# Defines the release stage for all events that occur in this app.
release_stage='development',
# Defines which release stages bugsnag should report. e.g. ignore staging
# errors.
notify_release_stages=['development', 'production'],
# Any param key that contains one of these strings will be filtered out of
# all error reports.
params_filters=["credit_card_number", "password", "ssn"],
# We mark stacktrace lines as inProject if they come from files inside
# root:
# project_root = "/path/to/your/app",
# Useful if you are wrapping bugsnag.notify() in with your own library, to
# ensure errors group properly.
# traceback_exclude_module = [myapp.custom_logging],
)
# Attach Bugsnag to flask's exception handler
handle_exceptions(app)
# You can define a callback function which, when attached to to your Bugsnag
# client, will run right before each and every report is sent to the api. Here
# you can evaluate and modify the report data.
def callback(notification):
"""
This callback will evaluate and modify every exception report, handled and
unhandled, that occurs within the app, right before it is sent to Bugsnag.
"""
# adding user info and metadata to every report:
notification.user = {
# in your app, you can pull these details from session.
'name': 'Alan Turing',
'email': 'turing@code.net',
'id': '1234567890',
}
notification.add_tab('company', {'name': 'Stark Industries'})
if notification.context == 'GET /crashcallback':
# The callback will evaluate all exceptions, but in this example only
# errors from @app.route('/crashcallback') will have the below data
# added to their error reports.
notification.add_tab(
'Diagnostics',
{
'message': 'Flask demo: Everything is fine',
'status': 200,
'password': 'password1', # this will be filtered
},
)
# note that if you return false from the callback, this will cancel the
# entire error report.
# attach your callback to Bugsnag. Important to attach AFTER
# 'handle_exceptions(app)' above, so that the function will have full access to
# the exception data.
bugsnag.before_notify(callback)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/crashdict')
def crashdict():
"""
Deliberately triggers an unhandled KeyError to be reported by the bugsnag
exception handler, and crash the app.
"""
customheader = request.headers['my-custom-header']
return 'Received your header: ' + customheader
@app.route('/crashcallback')
def crashcallback():
"""
Deliberately raises an unhandled error which will have diagnostic data
attached by the global callback function, and crash the app.
"""
raise (Exception('SomethingBad'))
@app.route('/handled')
def handle_zero_div():
"""
Deliberately triggers a handled exception, and reports it to Bugsnag.
"""
try:
x = 1 / 0
print('{} is x'.format(x))
except Exception as e:
bugsnag.notify(e)
return (
'The app hasn\'t crashed, but check '
+ '<a href=\"https://app.bugsnag.com\">app.bugsnag.com</a> to view'
+ ' notifications'
)
@app.route('/notifywithmetadata')
def notifywithmetadata():
"""
Manually notifies Bugsnag of a handled exception, with some metadata
locally attached.
"""
bugsnag.notify(
Exception('Flask demo: Manual notification with metadata'),
# this app adds some metadata globally, but you can also attach
# specific details to a particular exception
metadata={
'Request info': {
'route': 'notifywithmetadata',
'headers': request.headers,
},
'Resolve info': {
'status': 200,
'message': 'Metadata has been added to this notification',
},
},
)
return (
'Metadata was added to the notification, check '
+ '<a href=\"bugsnag.com\">bugsnag.com</a> '
+ ' to view on the "Request info" and "Resolve info" tabs'
)
@app.route('/notifywithcontext')
def notifywithcontext():
"""
Notifies Bugsnag of a handled exception, which has a modified 'context'
attribute for the purpose of improving how these exceptions will group
together in the Bugsnag dashboard, and a severity attribute that has been
modifed to overwrite the default level (warning).
"""
bugsnag.notify(
Exception('Flask demo: Manual notification with context and severity'),
context='notifywithcontext',
severity='info',
)
return 'The context and severity were changed.'
| mit | 9e3855f13f92395c3b6b24f2b6d3f48a | 33.892655 | 79 | 0.656898 | 4.12008 | false | false | false | false |
flav-io/flavio | flavio/physics/betadecays/common.py | 1 | 1337 | """Common functions for beta decays."""
import flavio
from flavio.physics.edms.common import proton_charges
from flavio.physics.bdecays.wilsoncoefficients import get_wceff_fccc_std, get_CVLSM
from math import sqrt
def wc_eff(par, wc_obj, scale, nu):
r"""Lee-Yang effective couplings.
See eqS. (2), (9) of arXiv:1803.08732."""
flavio.citations.register("Gonzalez-Alonso:2018omy")
# wilson coefficients
wc = get_wceff_fccc_std(wc_obj, par, 'du', 'e', nu, None, scale, nf=3)
# proton charges
g = proton_charges(par, scale)
gV = g['gV_u-d']
gA = g['gA_u-d']
gS = g['gS_u-d']
gP = g['gP_u-d']
gT = g['gT_u-d']
# radiative corrections
# Note: CVLSM is the universal Marciano-Sirlin result that needs to be
# divided out since it's already contained in the Deltas
CVLSM = get_CVLSM(par, scale, nf=3)
DeltaRV = par['DeltaRV']
DeltaRA = DeltaRV # not needed for superallowed, for neutron difference absorbed in lambda
rV = sqrt(1 + DeltaRV) / CVLSM
rA = sqrt(1 + DeltaRA) / CVLSM
# effective couplings
# note that C_i' = C_i
C = {}
C['V'] = gV * (wc['VL'] * rV + wc['VR'])
C['A'] = -gA * (wc['VL'] * rA - wc['VR'])
C['S'] = gS * (wc['SL'] + wc['SR'])
C['P'] = gP * (wc['SL'] - wc['SR'])
C['T'] = 4 * gT * (wc['T'])
return C
| mit | 49d6a1c870496fe972477f2b3db6c6b7 | 33.282051 | 95 | 0.593867 | 2.556405 | false | false | false | false |
flav-io/flavio | flavio/physics/bdecays/formfactors/b_p/bsz_parameters.py | 1 | 3118 | import numpy as np
import json
import pkgutil
from flavio.classes import Parameter
from flavio.statistics.probability import MultivariateNormalDistribution
FFs = ["f+", "fT", "f0"]
ai = ["a0", "a1", "a2"]
ff_a = [(ff, a) for ff in FFs for a in ai]
a_ff_string = [a + '_' + ff for ff in FFs for a in ai]
tex_a = {'a0': 'a_0', 'a1': 'a_1', 'a2': 'a_2', }
tex_ff = {'f+': 'f_+', 'fT': 'f_T', 'f0': 'f_0', }
def get_ffpar(filename):
f = pkgutil.get_data('flavio.physics', filename)
data = json.loads(f.decode('utf-8'))
central = np.array([data['central'][ff].get(a, np.nan) for ff, a in ff_a])
unc = np.array([data['uncertainty'][ff].get(a, np.nan) for ff, a in ff_a])
corr = np.array([[data['correlation'][ff1 + ff2].get(a1 + a2, np.nan) for ff1, a1 in ff_a] for ff2, a2 in ff_a])
# delete the parameter a0_f0, which is instead fixed
# using the exact kinematical relation f0(0) = f+(0)
pos_a0_f0 = ff_a.index(('f0', 'a0'))
central = np.delete(central, [pos_a0_f0])
unc = np.delete(unc, [pos_a0_f0])
corr = np.delete(corr, [pos_a0_f0], axis=0)
corr = np.delete(corr, [pos_a0_f0], axis=1)
return [central, unc, corr]
def load_parameters(filename, process, constraints):
implementation_name = process + ' BSZ'
parameter_names = [implementation_name + ' ' + coeff_name for coeff_name in a_ff_string]
# a0_f0 is not treated as independent parameter!
parameter_names.remove(implementation_name + ' a0_f0')
for parameter_name in parameter_names:
try: # check if parameter object already exists
p = Parameter[parameter_name]
except KeyError: # if not, create a new one
p = Parameter(parameter_name)
# get LaTeX representation of coefficient and form factor names
_tex_a = tex_a[parameter_name.split(' ')[-1].split('_')[0]]
_tex_ff = tex_ff[parameter_name.split(' ')[-1].split('_')[-1]]
p.tex = r'$' + _tex_a + r'^{' + _tex_ff + r'}$'
p.description = r'BSZ form factor parametrization coefficient $' + _tex_a + r'$ of $' + _tex_ff + r'$'
else: # if parameter exists, remove existing constraints
constraints.remove_constraint(parameter_name)
[central, unc, corr] = get_ffpar(filename)
constraints.add_constraint(parameter_names,
MultivariateNormalDistribution(central_value=central, covariance=np.outer(unc, unc)*corr))
# Resonance masses used in arXiv:1811.00983
resonance_masses_gkvd = {
'B->K': {
'm0': 5.630,
'm+': 5.412,
},
'B->pi': {
'm0': 5.540,
'm+': 5.325,
},
'B->D': {
'm0': 6.420,
'm+': 6.330,
},
}
def gkvd_load(version, fit, transitions, constraints):
"""Load the form factor parameters given in arXiv:1811.00983"""
for tr in transitions:
for m, v in resonance_masses_gkvd[tr].items():
constraints.set_constraint('{} BCL {}'.format(tr, m), v)
filename = 'data/arXiv-1811-00983{}/{}_{}.json'.format(version, tr.replace('->', ''), fit)
load_parameters(filename, tr, constraints)
| mit | 332442c17f8688e495ab4da7cb3e2c56 | 37.975 | 116 | 0.597178 | 2.963878 | false | false | false | false |
bugsnag/bugsnag-python | setup.py | 1 | 1653 | #!/usr/bin/env python
"""
Bugsnag
=======
The official Python notifier for `Bugsnag <https://bugsnag.com/>`_.
Provides support for automatically capturing and sending exceptions
in your Django and other Python apps to Bugsnag, to help you find
and solve your bugs as fast as possible.
"""
from setuptools import setup, find_packages
setup(
name='bugsnag',
version='4.3.0',
description='Automatic error monitoring for django, flask, etc.',
long_description=__doc__,
author='Simon Maynard',
author_email='simon@bugsnag.com',
url='https://bugsnag.com/',
license='MIT',
python_requires='>=3.5, <4',
packages=find_packages(include=['bugsnag', 'bugsnag.*']),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
'Topic :: Software Development'
],
package_data={
'bugsnag': ['py.typed'],
},
test_suite='tests',
install_requires=['webob'],
extras_require={
'flask': ['flask', 'blinker']
},
)
| mit | 42a09f1ca3ad393b67c604352acfcd19 | 30.788462 | 69 | 0.611615 | 4.021898 | false | false | false | false |
flav-io/flavio | flavio/statistics/test_functions.py | 1 | 1289 | import unittest
import numpy as np
import flavio
from flavio.statistics.functions import *
class TestFunctions(unittest.TestCase):
def test_deltachi2(self):
self.assertEqual(delta_chi2(3, 1), 9)
self.assertAlmostEqual(delta_chi2(1, 2), 2.30, delta=0.006)
self.assertAlmostEqual(delta_chi2(2, 3), 8.03, delta=0.006)
self.assertAlmostEqual(delta_chi2(3, 2), 11.83, delta=0.006)
def test_pull(self):
self.assertEqual(pull(9, 1), 3)
self.assertAlmostEqual(pull(2.30, 2), 1, delta=0.006)
self.assertAlmostEqual(pull(8.03, 3), 2, delta=0.006)
self.assertAlmostEqual(pull(11.83, 2), 3, delta=0.006)
self.assertAlmostEqual(pull(delta_chi2(4.52, 8), 8), 4.52, places=10)
def test_cl(self):
self.assertAlmostEqual(confidence_level(1), 0.6826894921370859, places=10)
self.assertAlmostEqual(confidence_level(2), 0.9544997361036416, places=10)
self.assertAlmostEqual(confidence_level(5), 0.9999994266968562, places=10)
def test_pvalue(self):
# trivial
self.assertAlmostEqual(pvalue(1, 1), 1 - 0.6826894921370859, places=10)
# non-trivial: http://psychclassics.yorku.ca/Fisher/Methods/tabIII.gif
self.assertAlmostEqual(pvalue(22.362, 13), 0.05, places=6)
| mit | 94346b1e015de0a00c42a250f2d8943d | 43.448276 | 82 | 0.677269 | 3.106024 | false | true | false | false |
flav-io/flavio | flavio/physics/bdecays/formfactors/b_p/bcl_parameters.py | 1 | 1188 | import yaml
import pkgutil
import numpy as np
from flavio.classes import Parameter
from flavio.statistics.probability import MultivariateNormalDistribution
def load_parameters(filename, constraints):
f = pkgutil.get_data('flavio.physics', filename)
ff_dict = yaml.safe_load(f)
for parameter_name in ff_dict['parameters']:
try: # check if parameter object already exists
p = Parameter[parameter_name]
except: # otherwise, create a new one
p = Parameter(parameter_name)
else: # if parameter exists, remove existing constraints
constraints.remove_constraint(parameter_name)
covariance = np.outer(ff_dict['uncertainties'], ff_dict['uncertainties'])*ff_dict['correlation']
if not np.allclose(covariance, covariance.T):
# if the covariance is not symmetric, it is assumed that only the values above the diagonal are present.
# then: M -> M + M^T - diag(M)
covariance = covariance + covariance.T - np.diag(np.diag(covariance))
constraints.add_constraint(ff_dict['parameters'],
MultivariateNormalDistribution(central_value=ff_dict['central_values'], covariance=covariance) )
| mit | 90f6e5767660350ec25719a9d87a0b09 | 50.652174 | 112 | 0.705387 | 4.242857 | false | false | false | false |
flav-io/flavio | flavio/physics/bdecays/formfactors/hqet.py | 1 | 5822 | """Common functions for HQET form factors."""
from math import sqrt, log, pi
from functools import lru_cache
from flavio.math.functions import li2
from flavio.physics.running import running
from flavio.physics.bdecays.formfactors import common
def get_hqet_parameters(par):
p = {}
# The scale here is fixed to 2.7 GeV ~ sqrt(m_b^pole * m_c^pole)
alphas = running.get_alpha(par, scale=2.7, nf_out=5)['alpha_s']
p['ash'] = alphas / pi
p['mb1S'] = running.get_mb_1S(par)
p['mb'] = p['mb1S'] * (1 + 2 * alphas**2 / 9)
p['mc'] = p['mb'] - 3.4
mBbar = (par['m_B0'] + 3 * par['m_B*0']) / 4
# eq. (25); note the comment about the renormalon cancellation thereafter
p['Lambdabar'] = mBbar - p['mb'] + par['lambda_1'] / (2 * p['mb1S'])
p['epsc'] = p['Lambdabar'] / (2 * p['mc'])
p['epsb'] = p['Lambdabar'] / (2 * p['mb'])
p['zc'] = p['mc'] / p['mb']
return p
def xi(z, rho2, c, xi3, order_z):
r"""Leading-order Isgur-Wise function:
$$\xi(z)=1-\rho^2 (w-1) + c (w-1)^2 + \xi^{(3)} (w-1)^3/6
where w=w(z) is expanded in $z$ up to an including terms of order
`z**order_z`.
"""
xi = (1
- rho2 * common.w_minus_1_pow_n(z, n=1, order_z=order_z)
+ c * common.w_minus_1_pow_n(z, n=2, order_z=order_z)
+ xi3 / 6 * common.w_minus_1_pow_n(z, n=3, order_z=order_z))
return xi
def Lz(par, w, z, order_z):
w_minus_1 = common.w_minus_1_pow_n(z, n=1, order_z=order_z)
w_minus_1_sq = common.w_minus_1_pow_n(z, n=2, order_z=order_z)
chi2 = par['chi_2(1)'] + par['chi_2p(1)'] * w_minus_1 + par['chi_2pp(1)'] / 2 * w_minus_1_sq
chi3 = par['chi_3p(1)'] * w_minus_1 + par['chi_3pp(1)'] / 2 * w_minus_1_sq
eta = par['eta(1)'] + par['etap(1)'] * w_minus_1 + par['etapp(1)'] / 2 * w_minus_1_sq
d = {}
# w is not expanded in the kinematical factors
d[1] = -4 * (w - 1) * chi2 + 12 * chi3
d[2] = -4 * chi3
d[3] = 4 * chi2
d[4] = 2 * eta - 1
d[5] = -1
d[6] = -2 * (1 + eta) / (w + 1)
return d
def ell_i(i, par, z, order_z):
"""Sub-sub-leading power correction $\ell_i(w(z))$."""
w_minus_1 = common.w_minus_1_pow_n(z, n=1, order_z=order_z)
return par['CLN l_{}(1)'.format(i)] + w_minus_1 * par['CLN lp_{}(1)'.format(i)]
def ell(par, z, order_z):
"""Sub-sub-leading power correction $\ell_{i}(w(z))$ for $i=1\ldots6$ as dictionary."""
return {i + 1: ell_i(i + 1, par, z, order_z) for i in range(6)}
def r(w):
if w == 1:
return 1
return log(w + sqrt(-1 + w**2)) / sqrt(-1 + w**2)
def omega_plus(w):
return w + sqrt(-1 + w**2)
def omega_minus(w):
return w - sqrt(-1 + w**2)
@lru_cache(maxsize=32)
def omega(w, z):
if w == 1:
return -1 + (z + 1) / (z - 1) * log(z)
return (1 + (w * (2 * li2(1 - z * omega_minus(w))
- li2(1 - omega_minus(w)**2) -
2 * li2(1 - z * omega_plus(w)) + li2(1 - omega_plus(w)**2))
) / (2. * sqrt(-1 + w**2)) - w * log(z) * r(w))
def CP(w, z):
wz = 1 / 2 * (z + 1 / z)
return ((-2 * (-w + wz) * (-1 + z) * z *
(-1 + z + z * (1 + z) * log(z)) *
((-1 + z**2) * log(z) +
(z * (3 + z**2) +
w *
(-1 + z - (3 + 2 * w) * z**2 +
z**3)) * r(w)) +
4 * (w - wz)**2 * z**2 * omega(w, z)) /
(2. * (w - wz)**2 * z**2))
def CV1(w, z):
wz = 1 / 2 * (z + 1 / z)
return ((12 * (-w + wz) * z -
(-1 + z**2) * log(z) +
2 * (1 + w) * (-1 + (-1 + 3 * w) * z - z**2) *
r(w) + 4 * (w - wz) * z * omega(w, z)) /
(6. * (w - wz) * z))
def CV2(w, z):
wz = 1 / 2 * (z + 1 / z)
return (-(z *
(2 * (-w + wz) * (-1 + z) +
(3 - 2 * w - (-2 + 4 * w) * z + z**2) *
log(z)) +
(2 - (-1 + 5 * w + 2 * w**2) * z +
(2 * w + 4 * w**2) * z**2 -
(1 + w) * z**3) * r(w)) /
(6. * (w - wz)**2 * z**2))
def CV3(w, z):
wz = 1 / 2 * (z + 1 / z)
return ((2 * (-w + wz) * (-1 + z) * z +
(1 + (2 - 4 * w) * z + (3 - 2 * w) * z**2) *
log(z) +
(1 + w - (2 * w + 4 * w**2) * z +
(-1 + 5 * w + 2 * w**2) * z**2 - 2 * z**3)
* r(w)) / (6. * (w - wz)**2 * z))
def CA1(w, z):
wz = 1 / 2 * (z + 1 / z)
return ((12 * (-w + wz) * z -
(-1 + z**2) * log(z) +
2 * (-1 + w) * (-1 + (1 + 3 * w) * z - z**2) *
r(w) + 4 * (w - wz) * z * omega(w, z)) /
(6. * (w - wz) * z))
def CA2(w, z):
wz = 1 / 2 * (z + 1 / z)
return (-(z *
(2 * (-w + wz) * (1 + z) +
(3 + 2 * w - (2 + 4 * w) * z + z**2) *
log(z)) +
(2 + (-1 - 5 * w + 2 * w**2) * z +
(-2 * w + 4 * w**2) * z**2 +
(1 - w) * z**3) * r(w)) /
(6. * (w - wz)**2 * z**2))
def CA3(w, z):
wz = 1 / 2 * (z + 1 / z)
return ((2 * (-w + wz) * z * (1 + z) -
(1 - (2 + 4 * w) * z + (3 + 2 * w) * z**2) *
log(z) +
(1 - w + (-2 * w + 4 * w**2) * z +
(-1 - 5 * w + 2 * w**2) * z**2 + 2 * z**3)
* r(w)) / (6. * (w - wz)**2 * z))
def CT1(w, z):
wz = 1 / 2 * (z + 1 / z)
return (((-1 + w) *
(-1 + (2 + 4 * w) * z - z**2) * r(w) +
(6 * (-w + wz) * z -
(-1 + z**2) * log(z)) +
2 * (w - wz) * z * omega(w, z)) / (3. * (w - wz) * z))
def CT2(w, z):
wz = 1 / 2 * (z + 1 / z)
return (2 * (z * log(z) + (1 - w * z) * r(w))) / (3. * (w - wz) * z)
def CT3(w, z):
wz = 1 / 2 * (z + 1 / z)
return (2 * (log(z) + (w - z) * r(w))) / (3. * (w - wz))
| mit | 57e23eec8e5bbbcd746ff8d8f1e49a50 | 30.47027 | 96 | 0.374785 | 2.315831 | false | false | false | false |
flav-io/flavio | flavio/physics/kdecays/kll.py | 1 | 4902 | r"""Functions for $K^0\to \ell^+\ell^-$ decays"""
import flavio
from flavio.config import config
from flavio.physics.bdecays.common import lambda_K
from flavio.physics import ckm
from flavio.physics.kdecays.wilsoncoefficients import wilsoncoefficients_sm_sl
from flavio.physics.common import add_dict
from math import pi, sqrt
def amplitudes(par, wc, l1, l2):
r"""Amplitudes P and S entering the $K\to\ell_1^+\ell_2^-$ observables.
Parameters
----------
- `par`: parameter dictionary
- `wc`: Wilson coefficient dictionary
- `K`: should be `'KL'` or `'KS'`
- `l1` and `l2`: should be `'e'` or `'mu'`
"""
# masses
ml1 = par['m_'+l1]
ml2 = par['m_'+l2]
mK = par['m_K0']
# Wilson coefficients
qqll = 'sd' + l1 + l2
# For LFV expressions see arXiv:1602.00881 eq. (5)
C9m = wc['C9_'+qqll] - wc['C9p_'+qqll] # only relevant for l1 != l2
C10m = wc['C10_'+qqll] - wc['C10p_'+qqll]
CPm = wc['CP_'+qqll] - wc['CPp_'+qqll]
CSm = wc['CS_'+qqll] - wc['CSp_'+qqll]
P = (ml2 + ml1)/mK * C10m + mK * CPm # neglecting mu, md
S = (ml2 - ml1)/mK * C9m + mK * CSm # neglecting mu, md
xi_t = ckm.xi('t', 'sd')(par)
return xi_t * P, xi_t * S
def amplitudes_LD(par, K, l):
r"""Long-distance amplitudes entering the $K\to\ell^+\ell^-$ observables."""
ml = par['m_' + l]
mK = par['m_' + K]
s2w = par['s2w']
pre = 2 * ml / mK / s2w
# numbers extracted from arXiv:1711.11030
flavio.citations.register("Chobanova:2017rkj")
ASgaga = 2.49e-4 * (-2.821 + 1.216j)
ALgaga = 2.02e-4 * (par['chi_disp(KL->gammagamma)'] - 5.21j)
S = pre * ASgaga
P = -pre * ALgaga
return S, P
def amplitudes_eff(par, wc, K, l1, l2, ld=True):
r"""Effective amplitudes entering the $K\to\ell_1^+\ell_2^-$ observables."""
P, S = amplitudes(par, wc, l1, l2)
if l1 != l2 or not ld:
SLD = 0
PLD = 0
else:
SLD, PLD = amplitudes_LD(par, K, l1)
if K == 'KS' and l1 == l2:
Peff = P.imag
Seff = S.real + SLD
if K == 'KL':
Peff = P.real + PLD
Seff = S.imag
return Peff, Seff
def get_wc(wc_obj, par, l1, l2):
scale = config['renormalization scale']['kdecays']
label = 'sd' + l1 + l2
wcnp = wc_obj.get_wc(label, scale, par)
if l1 == l2:
# include SM contributions for LF conserving decay
_c = wilsoncoefficients_sm_sl(par, scale)
xi_t = ckm.xi('t', 'sd')(par)
xi_c = ckm.xi('c', 'sd')(par)
wcsm = {'C10_sd' + l1 + l2: _c['C10_t'] + xi_c / xi_t * _c['C10_c']}
else:
wcsm = {}
return add_dict((wcsm, wcnp))
def br_kll(par, wc_obj, K, l1, l2, ld=True):
r"""Branching ratio of $K\to\ell_1^+\ell_2^-$"""
# parameters
wc = get_wc(wc_obj, par, l1, l2)
GF = par['GF']
alphaem = par['alpha_e']
ml1 = par['m_'+l1]
ml2 = par['m_'+l2]
mK = par['m_K0']
tauK = par['tau_'+K]
fK = par['f_K0']
# appropriate CKM elements
N = 4 * GF / sqrt(2) * alphaem / (4 * pi)
beta = sqrt(lambda_K(mK**2, ml1**2, ml2**2)) / mK**2
beta_p = sqrt(1 - (ml1 + ml2)**2 / mK**2)
beta_m = sqrt(1 - (ml1 - ml2)**2 / mK**2)
prefactor = 2 * abs(N)**2 / 32. / pi * mK**3 * tauK * beta * fK**2
Peff, Seff = amplitudes_eff(par, wc, K, l1, l2, ld=ld)
return prefactor * (beta_m**2 * abs(Peff)**2 + beta_p**2 * abs(Seff)**2)
# function returning function needed for prediction instance
def br_kll_fct(K, l1, l2):
def f(wc_obj, par):
return br_kll(par, wc_obj, K, l1, l2)
return f
def br_kll_fct_lsum(K, l1, l2):
def f(wc_obj, par):
return br_kll(par, wc_obj, K, l1, l2) + br_kll(par, wc_obj, K, l2, l1)
return f
_tex = {'e': 'e', 'mu': r'\mu'}
_tex_p = {'KL': r'K_L', 'KS': r'K_S',}
for l in ['e', 'mu']:
for P in _tex_p:
_obs_name = "BR({}->{}{})".format(P, l, l)
_obs = flavio.classes.Observable(_obs_name)
_process_tex = _tex_p[P] + r"\to "+_tex[l]+r"^+"+_tex[l]+r"^-"
_process_taxonomy = r'Process :: $s$ hadron decays :: FCNC decays :: $K\to \ell\ell$ :: $' + _process_tex + r'$'
_obs.add_taxonomy(_process_taxonomy)
_obs.set_description(r"Branching ratio of $" + _process_tex +r"$")
_obs.tex = r"$\text{BR}(" + _process_tex + r")$"
flavio.classes.Prediction(_obs_name, br_kll_fct(P, l, l))
# LFV decay
for P in _tex_p:
_obs_name = "BR({}->emu,mue)".format(P)
_obs = flavio.classes.Observable(_obs_name)
_process_tex = _tex_p[P] + r"\to e^\pm\mu^\mp"
_process_taxonomy = r'Process :: $s$ hadron decays :: FCNC decays :: $K\to \ell\ell$ :: $' + _process_tex + r'$'
_obs.add_taxonomy(_process_taxonomy)
_obs.set_description(r"Branching ratio of $" + _process_tex +r"$")
_obs.tex = r"$\text{BR}(" + _process_tex + r")$"
flavio.classes.Prediction(_obs_name, br_kll_fct_lsum(P, 'e', 'mu'))
| mit | e749c349ca2d28917d34bfb2515b723c | 32.806897 | 120 | 0.5459 | 2.436382 | false | false | false | false |
pythonindia/wye | wye/base/constants.py | 1 | 2585 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import inspect
def _user_attributes(cls):
# gives all inbuilt attrs
defaults = dir(type(str('defaults'), (object,), {}))
return [
item[0] for item in inspect.getmembers(cls) if item[0] not in defaults]
def choices(cls):
"""
Decorator to set `CHOICES` and other attributes
"""
_choices = []
for attr in _user_attributes(cls):
val = getattr(cls, attr)
setattr(cls, attr[1:], val[0])
_choices.append((val[0], val[1]))
setattr(cls, 'CHOICES', tuple(_choices))
return cls
@choices
class WorkshopStatus:
_DRAFT = [1, "Draft"]
_REQUESTED = [2, "Workshop Requested"]
_ACCEPTED = [3, "Workshop Accepted "]
_DECLINED = [4, "Workshop Declined"]
_FEEDBACK_PENDING = [5, "FeedBack Pending"]
_HOLD = [6, "Workshop On Hold"]
_COMPLETED = [7, "Workshop Completed"]
_UNABLE_TO_COMPLETE = [8, "Workshop unable to complete"]
@choices
class WorkshopLevel:
_BEGINNER = [1, "Beginner"]
_INTERMEDIATE = [2, "Intermediate"]
_ADVANCE = [2, "Advance"]
@choices
class OrganisationType:
_COLLEGE = [1, "College"]
_ORGANISATION = [2, "Free Software Organisation"]
_STUDENT_GROUP = [3, "Student Group"]
_OTHERS = [4, "Others"]
@choices
class FeedbackType:
_PRESENTER = [1, "Presenter"]
_ORGANISATION = [2, "Organisation"]
@choices
class WorkshopRatings:
_VERY_BAD = [-1, 'Very Bad']
_BAD = [-2, 'Bad']
_NEUTRAL = [0, 'Neutral']
_GOOD = [1, 'Good']
_VERY_GOOD = [2, 'Very Good']
class WorkshopAction:
ACTIVE = ('active', 'deactive')
ASSIGNME = ('opt-in', 'opt-out')
@choices
class ContactFeedbackType:
_WORKSHOP = [1, "Workshop"]
_ACCOUNT = [2, "Account"]
_ORGANISATION = [3, "Organisation"]
_OTHERS = [4, "Others"]
@choices
class WorkshopAudience:
_BE_FINAL_YEAR = [1, "Engineering 4th Year"]
_BE_THIRD_YEAR = [2, "Engineering 3rd Year"]
_BE_SECOND_YEAR = [3, "Engineering 2ndYear"]
_BE_FIRST_YEAR = [4, "Engineering 1st Year"]
_MASTER_FINAL_YEAR = [5, "MCA Final Year"]
_MASTER_SECOND_YEAR = [6, "MCA Second Year"]
_MASTER_FIRST_YEAR = [7, "MCA First Year"]
_DIPLOMA_THIRD_YEAR = [8, "Diploma 3rd Year"]
_DIPLOMA_SECOND_YEAR = [9, "Diploma 2nd Year"]
_DIPLOMA_FIRST_YEAR = [10, "Diploma 1st Year"]
_TEN_PLUS_TWO = [11, "10+2"]
_TEN_PLUD_ONE = [12, "10+1"]
_SCHOOL = [13, "School"]
_OTHERS = [14, "Others"]
@choices
class YesNO:
_YES = [1, "Yes"]
_NO = [2, "No"]
| mit | bbb61755f436a422427711a240aa7d65 | 24.097087 | 79 | 0.598066 | 2.825137 | false | false | false | false |
flav-io/flavio | flavio/physics/bdecays/formfactors/lambdab_32/QuarkModel_MCN.py | 1 | 5367 | from math import sqrt, exp
import flavio
def lambda_momentum(q2, mL, mLb):
# daughter baryon momentum in the Lb rest frame
s = q2/(mLb**2)
r = (mL/mLb)**2
phi = (1-r)**2 - 2*(1+r)*s + s**2
if phi > 0 :
return mLb/2*sqrt(phi)
else :
return mLb/2*sqrt(-phi)
def lambda_mass(m_q, m_s):
return 2*m_q + m_s
def alpha_lambda_lambdaprime(alpha_l1, alpha_l2):
return sqrt((alpha_l1**2 + alpha_l2**2)/2)
def F(a0, a2, a4, p_L, m_q, m_L, alpha):
return (a0 + a2*p_L**2 + a4*p_L**4)*exp(-3*(m_q*p_L)**2/(2*(m_L*alpha)**2))
_process_dict = {}
_process_dict['Lambdab->Lambda(1520)'] = {'X': 'Lambda(1520)'}
def formfactors(process, par, q2):
r"Functions for $\Lambda_b\to X_{3/2}$ form factors where $X_{3/2} is a spin-3/2 baryon$ using the Quark Model and the MCN approach treated in arXiv:1108.6129 [nucl-th]"
flavio.citations.register("Mott:2011cx")
# Using the PDG mass values instead the model ones, will be covered by the uncertainties attached to the form factors.
pd = _process_dict[process]
mL = par['m_Lambda(1520)']
mLb = par['m_Lambdab']
m_q = par[process + ' m_q']
m_s = par[process + ' m_s']
alpha_l1 = par[process +' alpha_Lambdab']
alpha_l2 = par[process +' alpha_'+pd['X']]
ff_list = ['F1', 'F2', 'F3', 'F4',
'G1', 'G2', 'G3', 'G4',
'H1', 'H2', 'H3', 'H4', 'H5', 'H6']
p_lambda = lambda_momentum(q2, mL, mLb)
m_lambda_tilde = lambda_mass(m_q, m_s)
alpha_ll = alpha_lambda_lambdaprime(alpha_l1, alpha_l2)
ff_dict = {}
for e in ff_list:
a0 = par[process+' '+e+' a0']
a2 = par[process+' '+e+' a2']
a4 = par[process+' '+e+' a4']
ff_dict[e] = F(a0, a2, a4, p_lambda, m_q, m_lambda_tilde, alpha_ll)
return ff_dict, mL, mLb
def ff_equiv(process, q2, par):
# transform FormDict in form factors used in arXiv:1903.00448
ff_dict, mL, mLb = formfactors(process, par, q2)
e_fVt = par[process+' fVt uncertainty']
e_fVperp = par[process+' fVperp uncertainty']
e_fV0 = par[process+' fV0 uncertainty']
e_fVg = par[process+' fVg uncertainty']
e_fAt = par[process+' fAt uncertainty']
e_fAperp = par[process+' fAperp uncertainty']
e_fA0 = par[process+' fA0 uncertainty']
e_fAg = par[process+' fAg uncertainty']
e_fTt = par[process+' fTt uncertainty']
e_fTperp = par[process+' fTperp uncertainty']
e_fT0 = par[process+' fT0 uncertainty']
e_fTg = par[process+' fTg uncertainty']
e_fT5t = par[process+' fT5t uncertainty']
e_fT5perp = par[process+' fT5perp uncertainty']
e_fT50 = par[process+' fT50 uncertainty']
e_fT5g = par[process+' fT5g uncertainty']
ff = {}
ff['fVt'] = ( ff_dict['F2']*mL*(mL**2 - mLb**2 - q2) + mLb*(2*ff_dict['F1']*mL*(mL - mLb) - 2*ff_dict['F4']*mL*mLb + ff_dict['F3']*(mL**2 - mLb**2 + q2)) )/( 2*mL*(mL-mLb)*mLb**2 ) * e_fVt
ff['fVperp'] = ( ff_dict["F1"]/mLb - ff_dict["F4"]*mL/(mL**2 - 2*mL*mLb + mLb**2 - q2) )*e_fVperp
ff['fV0'] = ( ff_dict["F2"]*mL*(mL**4 + (mLb**2 - q2)**2 - 2*mL**2*(mLb**2 + q2)) + mLb*(2*ff_dict["F1"]*mL*(mL + mLb)*(mL**2 - 2*mL*mLb + mLb**2 - q2) - 2*ff_dict["F4"]*mL*mLb*(mL**2 - mLb**2 + q2) + ff_dict["F3"]*(mL**4 + (mLb**2 - q2)**2 - 2*mL**2*(mLb**2 + q2))) )/( 2*mL*mLb**2*(mL+mLb)*(mL**2 - 2*mL*mLb + mLb**2 - q2) )*e_fV0
ff['fVg'] = ff_dict["F4"]*e_fVg
ff['fAt'] = ( ff_dict["G2"]*mL*(mL**2 - mLb**2 - q2) + mLb*(-2*ff_dict["G4"]*mL*mLb + 2*ff_dict["G1"]*mL*(mL + mLb) + ff_dict["G3"]*(mL**2 - mLb**2 + q2)) )/( 2*mL*mLb**2*(mL + mLb) )*e_fAt
ff['fAperp'] = ( ff_dict["G1"]/mLb - (ff_dict["G4"]*mL)/(mL**2 + 2*mL*mLb + mLb**2 - q2) )*e_fAperp
ff['fA0'] = ( ff_dict["G2"]*mL*(mL**4 + (mLb**2 - q2)**2 - 2*mL**2*(mLb**2 + q2)) + mLb*(2*ff_dict["G1"]*mL*(mL - mLb)*(mL**2 + 2*mL*mLb + mLb**2 - q2) - 2*ff_dict["G4"]*mL*mLb*(mL**2 - mLb**2 + q2) + ff_dict["G3"]*(mL**4 + (mLb**2 - q2)**2 - 2*mL**2*(mLb**2 + q2))) )/( 2*mL*(mL - mLb)*mLb**2*(mL**2 + 2*mL*mLb + mLb**2 - q2) )*e_fA0
ff['fAg'] = -ff_dict["G4"]*e_fAg
ff['fTt'] = 0*e_fTt
ff['fTperp'] = ( 2*ff_dict["H5"]*mL - ((ff_dict["H3"]+ff_dict["H6"])*mL**2)/mLb + ff_dict["H3"]*mLb + 2*ff_dict["H1"]*mL*(mL + mLb)/mLb - 2*(ff_dict["H5"] + ff_dict["H6"])*mL**2*(mL - mLb)/((mL - mLb)**2 - q2) - ff_dict["H3"]*q2/mLb + ff_dict["H2"]*mL*(-mL**2 + mLb**2 + q2)/mLb**2 )/( 2*mL*(mL + mLb) )*e_fTperp
ff['fT0'] = ( (ff_dict["H1"] + ff_dict["H2"] - ff_dict["H3"] - ff_dict["H6"])/mLb - 2*((ff_dict["H5"] + ff_dict["H6"])*mL)/((mL - mLb)**2 - q2) + ff_dict["H4"]*((mL + mLb)**2 - q2)/(2*mL*mLb**2) )*e_fT0
ff['fTg'] = ( ff_dict["H5"]*(mL- mLb) - ff_dict["H6"]*(-mL**2 + mLb**2 + q2)/(2*mLb) )*e_fTg
ff['fT5t'] = 0*e_fT5t
ff['fT5perp'] = ( -1/(2*mL*(mL-mLb)*mLb**2*(mL**2 + 2*mL*mLb + mLb**2 - q2)) * (ff_dict["H2"]*mL*(mL**4 + (mLb**2 - q2)**2 - 2*mL**2*(mLb**2 + q2)) + mLb*(mL*(2*ff_dict["H5"]*mLb*(mL*mLb + mLb**2 - q2) + ff_dict["H6"]*mL*(mL**2 + 2*mL*mLb + mLb**2 -q2)) - 2*ff_dict["H1"]*mL*(mL - mLb)*(mL**2 + 2*mL*mLb + mLb**2 -q2) + ff_dict["H3"]*(mL**4 + (mLb**2 - q2)**2 - 2*mL**2*(mLb**2 + q2)))) )*e_fT5perp
ff['fT50'] = ( ff_dict["H1"]/mLb + 2*ff_dict["H5"]*mL/(mL**2 + 2*mL*mLb + mLb**2 - q2) )*e_fT50
ff['fT5g'] = ( -ff_dict["H5"]*(mL + mLb) - ff_dict["H6"]*(mL**2 + 2*mL*mLb + mLb**2 - q2)/(2*mLb) )*e_fT5g
return ff
| mit | e655c845e3f4baf425f4bd8d80a8c5c6 | 50.605769 | 402 | 0.529346 | 1.936147 | false | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.